| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import PIL |
| | from PIL import Image |
| | PIL.Image.MAX_IMAGE_PIXELS=500000000 |
| |
|
| | import re |
| | import copy |
| | |
| | from abc import ABC, abstractmethod |
| |
|
| | import torch |
| | import torch.nn as nn |
| | from torch import Tensor |
| | import torch.nn.functional as F |
| |
|
| | from transformers import AutoConfig, AutoModelForCausalLM, Qwen2Config, Qwen2Model, Qwen2ForCausalLM |
| |
|
| | from transformers.modeling_outputs import CausalLMOutputWithPast |
| | from transformers.generation.utils import GenerateOutput |
| |
|
| | from typing import List, Tuple, Optional, Union, Dict, Any |
| |
|
| | |
| | |
| | |
| |
|
| | CONTROLLER_HEART_BEAT_EXPIRATION = 30 |
| | WORKER_HEART_BEAT_INTERVAL = 15 |
| | LOGDIR = "." |
| | |
| | IGNORE_INDEX = -100 |
| | IMAGE_TOKEN_INDEX = -200 |
| | DEFAULT_IMAGE_TOKEN = "<image>" |
| | DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>" |
| | DEFAULT_IM_START_TOKEN = "<im_start>" |
| | DEFAULT_IM_END_TOKEN = "<im_end>" |
| | IMAGE_PLACEHOLDER = "<image-placeholder>" |
| |
|
| | class LlavaConfig(Qwen2Config): |
| | model_type = "llava_qwen2" |
| |
|
| | class LlavaMetaModel: |
| |
|
| | def __init__(self, config): |
| | super(LlavaMetaModel, self).__init__(config) |
| |
|
| | if hasattr(config, "mm_vision_tower"): |
| | self.vision_tower = build_vision_tower(config, delay_load=True) |
| | self.mm_projector = build_vision_projector(config) |
| |
|
| | if 'unpad' in getattr(config, 'mm_patch_merge_type', ''): |
| | self.image_newline = nn.Parameter( |
| | torch.empty(config.hidden_size, dtype=self.dtype) |
| | ) |
| |
|
| | def get_vision_tower(self): |
| | vision_tower = getattr(self, 'vision_tower', None) |
| | if type(vision_tower) is list: |
| | vision_tower = vision_tower[0] |
| | return vision_tower |
| |
|
| | def initialize_vision_modules(self, model_args, fsdp=None): |
| | vision_tower = model_args.vision_tower |
| | mm_vision_select_layer = model_args.mm_vision_select_layer |
| | mm_vision_select_feature = model_args.mm_vision_select_feature |
| | pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter |
| | mm_patch_merge_type = model_args.mm_patch_merge_type |
| |
|
| | self.config.mm_vision_tower = vision_tower |
| |
|
| | if self.get_vision_tower() is None: |
| | vision_tower = build_vision_tower(model_args) |
| |
|
| | if fsdp is not None and len(fsdp) > 0: |
| | self.vision_tower = [vision_tower] |
| | else: |
| | self.vision_tower = vision_tower |
| | else: |
| | if fsdp is not None and len(fsdp) > 0: |
| | vision_tower = self.vision_tower[0] |
| | else: |
| | vision_tower = self.vision_tower |
| | vision_tower.load_model() |
| |
|
| | self.config.use_mm_proj = True |
| | self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') |
| | self.config.mm_hidden_size = vision_tower.hidden_size |
| | self.config.mm_vision_select_layer = mm_vision_select_layer |
| | self.config.mm_vision_select_feature = mm_vision_select_feature |
| | self.config.mm_patch_merge_type = mm_patch_merge_type |
| |
|
| | if getattr(self, 'mm_projector', None) is None: |
| | self.mm_projector = build_vision_projector(self.config) |
| |
|
| | if 'unpad' in mm_patch_merge_type: |
| | embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype)) |
| | self.image_newline = nn.Parameter( |
| | torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std |
| | ) |
| | else: |
| | |
| | for p in self.mm_projector.parameters(): |
| | p.requires_grad = True |
| |
|
| | if pretrain_mm_mlp_adapter is not None: |
| | mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') |
| |
|
| | def get_w(weights, keyword): |
| | return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} |
| |
|
| | self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) |
| |
|
| | def select_best_resolution(original_size, possible_resolutions): |
| | """ |
| | Selects the best resolution from a list of possible resolutions based on the original size. |
| | |
| | Args: |
| | original_size (tuple): The original size of the image in the format (width, height). |
| | possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...]. |
| | |
| | Returns: |
| | tuple: The best fit resolution in the format (width, height). |
| | """ |
| | original_width, original_height = original_size |
| | best_fit = None |
| | max_effective_resolution = 0 |
| | min_wasted_resolution = float('inf') |
| |
|
| | for width, height in possible_resolutions: |
| | scale = min(width / original_width, height / original_height) |
| | downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale) |
| | effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height) |
| | wasted_resolution = (width * height) - effective_resolution |
| |
|
| | if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution): |
| | max_effective_resolution = effective_resolution |
| | min_wasted_resolution = wasted_resolution |
| | best_fit = (width, height) |
| |
|
| | return best_fit |
| |
|
| | def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): |
| | """ |
| | Calculate the shape of the image patch grid after the preprocessing for images of any resolution. |
| | |
| | Args: |
| | image_size (tuple): The size of the input image in the format (width, height). |
| | grid_pinpoints (str): A string representation of a list of possible resolutions. |
| | patch_size (int): The size of each image patch. |
| | |
| | Returns: |
| | tuple: The shape of the image patch grid in the format (width, height). |
| | """ |
| | import ast |
| | if type(grid_pinpoints) is list: |
| | possible_resolutions = grid_pinpoints |
| | else: |
| | possible_resolutions = ast.literal_eval(grid_pinpoints) |
| | width, height = select_best_resolution(image_size, possible_resolutions) |
| | return width // patch_size, height // patch_size |
| |
|
| | class LlavaMetaForCausalLM(ABC): |
| |
|
| | @abstractmethod |
| | def get_model(self): |
| | pass |
| |
|
| | def get_vision_tower(self): |
| | return self.get_model().get_vision_tower() |
| |
|
| | def encode_images(self, images): |
| | image_features = self.get_model().get_vision_tower()(images) |
| | image_features = self.get_model().mm_projector(image_features) |
| | return image_features |
| |
|
| | def prepare_inputs_labels_for_multimodal( |
| | self, input_ids, position_ids, attention_mask, past_key_values, labels, |
| | images, image_sizes=None |
| | ): |
| | vision_tower = self.get_vision_tower() |
| | if vision_tower is None or images is None or input_ids.shape[1] == 1: |
| | return input_ids, position_ids, attention_mask, past_key_values, None, labels |
| |
|
| | if type(images) is list or images.ndim == 5: |
| | if type(images) is list: |
| | images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] |
| | concat_images = torch.cat([image for image in images], dim=0) |
| | image_features = self.encode_images(concat_images) |
| | split_sizes = [image.shape[0] for image in images] |
| | image_features = torch.split(image_features, split_sizes, dim=0) |
| | mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat') |
| | image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square') |
| | if mm_patch_merge_type == 'flat': |
| | image_features = [x.flatten(0, 1) for x in image_features] |
| | elif mm_patch_merge_type.startswith('spatial'): |
| | new_image_features = [] |
| | for image_idx, image_feature in enumerate(image_features): |
| | if image_feature.shape[0] > 1: |
| | base_image_feature = image_feature[0] |
| | image_feature = image_feature[1:] |
| | height = width = self.get_vision_tower().num_patches_per_side |
| | assert height * width == base_image_feature.shape[0] |
| | if image_aspect_ratio == 'anyres': |
| | if hasattr(self.get_vision_tower(), 's2_image_size'): |
| | img_size = self.get_vision_tower().s2_image_size |
| | elif isinstance(self.get_vision_tower().config, dict): |
| | img_size = self.get_vision_tower().config["image_cfg"]["image_size"] |
| | else: |
| | img_size = self.get_vision_tower().config.image_size |
| |
|
| | num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, img_size) |
| | image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) |
| | else: |
| | raise NotImplementedError |
| | if 'unpad' in mm_patch_merge_type: |
| | image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() |
| | image_feature = image_feature.flatten(1, 2).flatten(2, 3) |
| | image_feature = unpad_image(image_feature, image_sizes[image_idx]) |
| | image_feature = torch.cat(( |
| | image_feature, |
| | self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device) |
| | ), dim=-1) |
| | image_feature = image_feature.flatten(1, 2).transpose(0, 1) |
| | else: |
| | image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() |
| | image_feature = image_feature.flatten(0, 3) |
| | image_feature = torch.cat((base_image_feature, image_feature), dim=0) |
| | else: |
| | image_feature = image_feature[0] |
| | if 'unpad' in mm_patch_merge_type: |
| | image_feature = torch.cat(( |
| | image_feature, |
| | self.model.image_newline[None].to(image_feature.device) |
| | ), dim=0) |
| | new_image_features.append(image_feature) |
| | image_features = new_image_features |
| | else: |
| | raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}") |
| | else: |
| | image_features = self.encode_images(images) |
| |
|
| | |
| | if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): |
| | raise NotImplementedError |
| |
|
| | |
| | |
| | |
| | |
| | _labels = labels |
| | _position_ids = position_ids |
| | _attention_mask = attention_mask |
| | if attention_mask is None: |
| | attention_mask = torch.ones_like(input_ids, dtype=torch.bool) |
| | else: |
| | attention_mask = attention_mask.bool() |
| | if position_ids is None: |
| | position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) |
| | if labels is None: |
| | labels = torch.full_like(input_ids, IGNORE_INDEX) |
| |
|
| | |
| | _input_ids = input_ids |
| | input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] |
| | labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] |
| |
|
| | new_input_embeds = [] |
| | new_labels = [] |
| | cur_image_idx = 0 |
| | for batch_idx, cur_input_ids in enumerate(input_ids): |
| | num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() |
| | if num_images == 0: |
| | cur_image_features = image_features[cur_image_idx] |
| | cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) |
| | cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) |
| | new_input_embeds.append(cur_input_embeds) |
| | new_labels.append(labels[batch_idx]) |
| | cur_image_idx += 1 |
| | continue |
| |
|
| | image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] |
| | cur_input_ids_noim = [] |
| | cur_labels = labels[batch_idx] |
| | cur_labels_noim = [] |
| | for i in range(len(image_token_indices) - 1): |
| | cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) |
| | cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) |
| | split_sizes = [x.shape[0] for x in cur_labels_noim] |
| | cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) |
| | cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) |
| | cur_new_input_embeds = [] |
| | cur_new_labels = [] |
| |
|
| | for i in range(num_images + 1): |
| | cur_new_input_embeds.append(cur_input_embeds_no_im[i]) |
| | cur_new_labels.append(cur_labels_noim[i]) |
| | if i < num_images: |
| | cur_image_features = image_features[cur_image_idx] |
| | cur_image_idx += 1 |
| | cur_new_input_embeds.append(cur_image_features) |
| | cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) |
| |
|
| | cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds] |
| |
|
| | cur_new_input_embeds = torch.cat(cur_new_input_embeds) |
| | cur_new_labels = torch.cat(cur_new_labels) |
| |
|
| | new_input_embeds.append(cur_new_input_embeds) |
| | new_labels.append(cur_new_labels) |
| |
|
| | |
| | tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) |
| | if tokenizer_model_max_length is not None: |
| | new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] |
| | new_labels = [x[:tokenizer_model_max_length] for x in new_labels] |
| |
|
| | |
| | max_len = max(x.shape[0] for x in new_input_embeds) |
| | batch_size = len(new_input_embeds) |
| |
|
| | new_input_embeds_padded = [] |
| | new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) |
| | attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) |
| | position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) |
| |
|
| | for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): |
| | cur_len = cur_new_embed.shape[0] |
| | if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": |
| | new_input_embeds_padded.append(torch.cat(( |
| | torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), |
| | cur_new_embed |
| | ), dim=0)) |
| | if cur_len > 0: |
| | new_labels_padded[i, -cur_len:] = cur_new_labels |
| | attention_mask[i, -cur_len:] = True |
| | position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
| | else: |
| | new_input_embeds_padded.append(torch.cat(( |
| | cur_new_embed, |
| | torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) |
| | ), dim=0)) |
| | if cur_len > 0: |
| | new_labels_padded[i, :cur_len] = cur_new_labels |
| | attention_mask[i, :cur_len] = True |
| | position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
| |
|
| | new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) |
| |
|
| | if _labels is None: |
| | new_labels = None |
| | else: |
| | new_labels = new_labels_padded |
| |
|
| | if _attention_mask is None: |
| | attention_mask = None |
| | else: |
| | attention_mask = attention_mask.to(dtype=_attention_mask.dtype) |
| |
|
| | if _position_ids is None: |
| | position_ids = None |
| |
|
| | return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels |
| |
|
| | def initialize_vision_tokenizer(self, model_args, tokenizer): |
| | if model_args.mm_use_im_patch_token: |
| | tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) |
| | self.resize_token_embeddings(len(tokenizer)) |
| |
|
| | if model_args.mm_use_im_start_end: |
| | num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) |
| | self.resize_token_embeddings(len(tokenizer)) |
| |
|
| | if num_new_tokens > 0: |
| | input_embeddings = self.get_input_embeddings().weight.data |
| | output_embeddings = self.get_output_embeddings().weight.data |
| |
|
| | input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( |
| | dim=0, keepdim=True) |
| | output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( |
| | dim=0, keepdim=True) |
| |
|
| | input_embeddings[-num_new_tokens:] = input_embeddings_avg |
| | output_embeddings[-num_new_tokens:] = output_embeddings_avg |
| |
|
| | if model_args.tune_mm_mlp_adapter: |
| | for p in self.get_input_embeddings().parameters(): |
| | p.requires_grad = True |
| | for p in self.get_output_embeddings().parameters(): |
| | p.requires_grad = False |
| |
|
| | if model_args.pretrain_mm_mlp_adapter: |
| | mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu') |
| | embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight'] |
| | assert num_new_tokens == 2 |
| | if input_embeddings.shape == embed_tokens_weight.shape: |
| | input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:] |
| | elif embed_tokens_weight.shape[0] == num_new_tokens: |
| | input_embeddings[-num_new_tokens:] = embed_tokens_weight |
| | else: |
| | raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.") |
| | elif model_args.mm_use_im_patch_token: |
| | if model_args.tune_mm_mlp_adapter: |
| | for p in self.get_input_embeddings().parameters(): |
| | p.requires_grad = False |
| | for p in self.get_output_embeddings().parameters(): |
| | p.requires_grad = False |
| |
|
| |
|
| | class LlavaQwen2Model(LlavaMetaModel, Qwen2Model): |
| | config_class = LlavaConfig |
| |
|
| | def __init__(self, config: Qwen2Config): |
| | super(LlavaQwen2Model, self).__init__(config) |
| |
|
| |
|
| | class LlavaQwen2ForCausalLM(Qwen2ForCausalLM, LlavaMetaForCausalLM): |
| | config_class = LlavaConfig |
| |
|
| | def __init__(self, config): |
| | super(Qwen2ForCausalLM, self).__init__(config) |
| | self.model = LlavaQwen2Model(config) |
| | |
| | self.vocab_size = config.vocab_size |
| | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_model(self): |
| | return self.model |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | images: Optional[torch.FloatTensor] = None, |
| | image_sizes: Optional[List[List[int]]] = None, |
| | return_dict: Optional[bool] = None, |
| | cache_position=None, |
| | ) -> Union[Tuple, CausalLMOutputWithPast]: |
| |
|
| | if inputs_embeds is None: |
| | ( |
| | input_ids, |
| | position_ids, |
| | attention_mask, |
| | past_key_values, |
| | inputs_embeds, |
| | labels |
| | ) = self.prepare_inputs_labels_for_multimodal( |
| | input_ids, |
| | position_ids, |
| | attention_mask, |
| | past_key_values, |
| | labels, |
| | images, |
| | image_sizes |
| | ) |
| |
|
| | return super().forward( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_values=past_key_values, |
| | inputs_embeds=inputs_embeds, |
| | labels=labels, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict |
| | ) |
| |
|
| | @torch.no_grad() |
| | def generate( |
| | self, |
| | inputs: Optional[torch.Tensor] = None, |
| | images: Optional[torch.Tensor] = None, |
| | image_sizes: Optional[torch.Tensor] = None, |
| | **kwargs, |
| | ) -> Union[GenerateOutput, torch.LongTensor]: |
| | position_ids = kwargs.pop("position_ids", None) |
| | attention_mask = kwargs.pop("attention_mask", None) |
| | if "inputs_embeds" in kwargs: |
| | raise NotImplementedError("`inputs_embeds` is not supported") |
| |
|
| | if images is not None: |
| | ( |
| | inputs, |
| | position_ids, |
| | attention_mask, |
| | _, |
| | inputs_embeds, |
| | _ |
| | ) = self.prepare_inputs_labels_for_multimodal( |
| | inputs, |
| | position_ids, |
| | attention_mask, |
| | None, |
| | None, |
| | images, |
| | image_sizes=image_sizes |
| | ) |
| | else: |
| | inputs_embeds = self.get_model().embed_tokens(inputs) |
| |
|
| | return super().generate( |
| | position_ids=position_ids, |
| | attention_mask=attention_mask, |
| | inputs_embeds=inputs_embeds, |
| | **kwargs |
| | ) |
| |
|
| | def prepare_inputs_for_generation(self, input_ids, past_key_values=None, |
| | inputs_embeds=None, **kwargs): |
| | images = kwargs.pop("images", None) |
| | image_sizes = kwargs.pop("image_sizes", None) |
| | inputs = super().prepare_inputs_for_generation( |
| | input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs |
| | ) |
| | if images is not None: |
| | inputs['images'] = images |
| | if image_sizes is not None: |
| | inputs['image_sizes'] = image_sizes |
| | return inputs |
| |
|
| |
|
| | AutoConfig.register("llava_qwen2", LlavaConfig) |
| | AutoModelForCausalLM.register(LlavaConfig, LlavaQwen2ForCausalLM) |
| |
|
| |
|
| | def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): |
| | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')] |
| |
|
| | def insert_separator(X, sep): |
| | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] |
| |
|
| | input_ids = [] |
| | offset = 0 |
| | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: |
| | offset = 1 |
| | input_ids.append(prompt_chunks[0][0]) |
| |
|
| | for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): |
| | input_ids.extend(x[offset:]) |
| |
|
| | |
| | |
| | |
| | |
| | return input_ids |
| |
|
| | def expand2square(pil_img, background_color): |
| | width, height = pil_img.size |
| | if width == height: |
| | return pil_img |
| | elif width > height: |
| | result = Image.new(pil_img.mode, (width, width), background_color) |
| | result.paste(pil_img, (0, (width - height) // 2)) |
| | return result |
| | else: |
| | result = Image.new(pil_img.mode, (height, height), background_color) |
| | result.paste(pil_img, ((height - width) // 2, 0)) |
| | return result |