Spaces:
Runtime error
Runtime error
| from typing import List, Optional, Tuple, Union | |
| import torch | |
| import torch.nn as nn | |
| from PIL import Image | |
| from torch.nn import CrossEntropyLoss | |
| from transformers import ( | |
| AutoConfig, | |
| AutoModelForCausalLM, | |
| Qwen2Config, | |
| Qwen2ForCausalLM, | |
| Qwen2Model, | |
| ) | |
| from transformers.cache_utils import Cache, DynamicCache | |
| from transformers.modeling_outputs import CausalLMOutputWithPast, MoeCausalLMOutputWithPast | |
| from transformers.generation.utils import GenerateOutput | |
| from ..vita_arch import VITAMetaForCausalLM, VITAMetaModel | |
| def custom_forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| ) -> Union[Tuple, CausalLMOutputWithPast]: | |
| r""" | |
| Args: | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., | |
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored | |
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. | |
| Returns: | |
| Example: | |
| ```python | |
| >>> from transformers import AutoTokenizer, Qwen2ForCausalLM | |
| >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) | |
| >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) | |
| >>> prompt = "Hey, are you conscious? Can you talk to me?" | |
| >>> inputs = tokenizer(prompt, return_tensors="pt") | |
| >>> # Generate | |
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30) | |
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." | |
| ```""" | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) | |
| outputs = self.model( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| cache_position=cache_position, | |
| ) | |
| hidden_states = outputs[0] | |
| logits = self.lm_head(hidden_states) | |
| # logits = logits.float() | |
| loss = None | |
| if labels is not None: | |
| # Shift so that tokens < n predict n | |
| shift_logits = logits[..., :-1, :].contiguous() | |
| shift_labels = labels[..., 1:].contiguous() | |
| # Flatten the tokens | |
| loss_fct = CrossEntropyLoss() | |
| shift_logits = shift_logits.view(-1, self.config.vocab_size) | |
| shift_labels = shift_labels.view(-1) | |
| # Enable model parallelism | |
| shift_labels = shift_labels.to(shift_logits.device) | |
| loss = loss_fct(shift_logits, shift_labels) | |
| if not return_dict: | |
| output = (logits,) + outputs[1:] | |
| return (loss,) + output if loss is not None else output | |
| #import pdb; pdb.set_trace() | |
| return CausalLMOutputWithPast( | |
| loss=loss, | |
| logits=logits, | |
| past_key_values=outputs.past_key_values, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| Qwen2ForCausalLM.forward = custom_forward | |
| class VITAQwen2Config(Qwen2Config): | |
| model_type = "vita-Qwen2" | |
| class VITAQwen2Model(VITAMetaModel, Qwen2Model): | |
| config_class = VITAQwen2Config | |
| def __init__(self, config: Qwen2Config): | |
| super(VITAQwen2Model, self).__init__(config) | |
| class VITAQwen2ForCausalLM(Qwen2ForCausalLM, VITAMetaForCausalLM): | |
| config_class = VITAQwen2Config | |
| def __init__(self, config): | |
| super(Qwen2ForCausalLM, self).__init__(config) | |
| self.model = VITAQwen2Model(config) | |
| self.vocab_size = config.vocab_size | |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_model(self): | |
| return self.model | |
| def forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| images: Optional[torch.FloatTensor] = None, | |
| audios: Optional[dict] = None, | |
| sf_masks: Optional[torch.Tensor] = None, | |
| return_dict: Optional[bool] = None, | |
| cache_position: Optional[torch.LongTensor] = None, | |
| ) -> Union[Tuple, CausalLMOutputWithPast]: | |
| if inputs_embeds is None: | |
| ( | |
| input_ids, | |
| position_ids, | |
| attention_mask, | |
| past_key_values, | |
| inputs_embeds, | |
| labels, | |
| ) = self.prepare_inputs_labels_for_multimodal( | |
| input_ids, position_ids, attention_mask, past_key_values, labels, images, audios, sf_masks | |
| ) | |
| return super().forward( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| labels=labels, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| cache_position=cache_position, | |
| ) | |
| def generate( | |
| self, | |
| inputs: Optional[torch.Tensor] = None, | |
| images: Optional[torch.Tensor] = None, | |
| audios: Optional[torch.Tensor] = None, | |
| sf_masks: Optional[torch.Tensor] = None, | |
| shared_v_pid_stride: Optional[int] = None, | |
| **kwargs, | |
| ) -> Union[GenerateOutput, torch.LongTensor]: | |
| position_ids = kwargs.pop("position_ids", None) | |
| attention_mask = kwargs.pop("attention_mask", None) | |
| if "inputs_embeds" in kwargs: | |
| raise NotImplementedError("`inputs_embeds` is not supported") | |
| if images is not None or audios is not None: | |
| ( | |
| inputs, | |
| position_ids, | |
| attention_mask, | |
| _, | |
| inputs_embeds, | |
| _ | |
| ) = self.prepare_inputs_labels_for_multimodal( | |
| inputs, | |
| position_ids, | |
| attention_mask, | |
| None, | |
| None, | |
| images, | |
| audios, | |
| sf_masks, | |
| shared_v_pid_stride, | |
| ) | |
| else: | |
| inputs_embeds = self.get_model().embed_tokens(inputs) | |
| return super().generate( | |
| position_ids=position_ids, | |
| attention_mask=attention_mask, | |
| inputs_embeds=inputs_embeds, | |
| **kwargs | |
| ) | |
| def prepare_inputs_for_generation( | |
| self, | |
| input_ids, | |
| past_key_values=None, | |
| inputs_embeds=None, | |
| attention_mask=None, | |
| **kwargs, | |
| ): | |
| images = kwargs.pop("images", None) | |
| audios = kwargs.pop("audios", None) | |
| sf_masks = kwargs.pop("sf_masks", None) | |
| _inputs = super().prepare_inputs_for_generation( | |
| input_ids, | |
| past_key_values=past_key_values, | |
| inputs_embeds=inputs_embeds, | |
| attention_mask=attention_mask, | |
| **kwargs, | |
| ) | |
| # import pdb; pdb.set_trace() | |
| position_ids = _inputs["position_ids"] | |
| cache_position = _inputs["cache_position"] | |
| if cache_position.shape[-1] == 1 and position_ids.shape[-1] > 1: | |
| new_position_ids = torch.zeros((position_ids.shape[0],1), dtype=position_ids.dtype, device=position_ids.device) | |
| new_position_ids[:, 0] = position_ids[0,-1] + cache_position[-1] + 1 - position_ids.shape[-1] | |
| position_ids = new_position_ids | |
| _inputs["position_ids"] = position_ids | |
| # import pdb; pdb.set_trace() | |
| if images is not None: | |
| _inputs["images"] = images | |
| if audios is not None: | |
| _inputs["audios"] = audios | |
| if sf_masks is not None: | |
| _inputs["sf_masks"] = sf_masks | |
| return _inputs | |
| def expand2square(self, pil_img, background_color): | |
| width, height = pil_img.size | |
| if width == height: | |
| return pil_img | |
| elif width > height: | |
| result = Image.new(pil_img.mode, (width, width), background_color) | |
| result.paste(pil_img, (0, (width - height) // 2)) | |
| return result | |
| else: | |
| result = Image.new(pil_img.mode, (height, height), background_color) | |
| result.paste(pil_img, ((height - width) // 2, 0)) | |
| return result | |
| def process_images(self, images, model_cfg): | |
| vision_tower = self.get_vision_tower() | |
| if not vision_tower.is_loaded: | |
| vision_tower.load_model() | |
| image_processor = vision_tower.image_processor | |
| image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None) | |
| new_images = [] | |
| if image_aspect_ratio == "pad": | |
| for image in images: | |
| image = self.expand2square( | |
| image, tuple(int(x * 255) for x in image_processor.image_mean) | |
| ) | |
| image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0] | |
| new_images.append(image) | |
| else: | |
| return image_processor(images, return_tensors="pt")["pixel_values"] | |
| if all(x.shape == new_images[0].shape for x in new_images): | |
| new_images = torch.stack(new_images, dim=0) | |
| return new_images | |
| AutoConfig.register("vita-Qwen2", VITAQwen2Config) | |
| AutoModelForCausalLM.register(VITAQwen2Config, VITAQwen2ForCausalLM) | |