InternVL3_5-4B-RM / modeling_internvl_chat.py
lanczos's picture
modify model scripts
bd058ef verified
# --------------------------------------------------------
# InternVL
# Copyright (c) 2024 OpenGVLab
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from functools import wraps
import warnings
from typing import List, Optional, Tuple, Union
from types import MethodType
import torch.utils.checkpoint
import transformers
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import GenerationConfig
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers import LlamaForCausalLM, Qwen2ForCausalLM, Qwen3ForCausalLM, Qwen3MoeForCausalLM
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from .configuration_internvl_chat import InternVLChatConfig
from .conversation import get_conv_template
from .modeling_intern_vit import InternVisionModel, has_flash_attn
logger = logging.get_logger(__name__)
def version_cmp(v1, v2, op='eq'):
import operator
from packaging import version
op_func = getattr(operator, op)
return op_func(version.parse(v1), version.parse(v2))
def transformers_seq_cls_forward(self, *args, origin_forward, **kwargs):
labels = kwargs.pop('labels', None)
return_dict = kwargs.pop('return_dict', None)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_ids = kwargs.get('input_ids')
inputs_embeds = kwargs.get('inputs_embeds')
output = origin_forward(*args, **kwargs)
if hasattr(output, 'logits'):
output.logits = output.logits.to(self.score.weight.dtype)
elif 'last_hidden_state' in output:
output.logits = output['last_hidden_state'].to(self.score.weight.dtype)
logits = self.score(output.logits)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if output.get('attention_mask') is not None:
# When use padding_free in seq_cls tasks, `revert_padding_free` will add a attention_mask in the output
batch_size = output.get('attention_mask').shape[0]
sequence_lengths = output.get('attention_mask').sum(dim=1) - 1
elif input_ids is not None:
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
sequence_lengths = sequence_lengths % input_ids.shape[-1]
elif kwargs.get('attention_mask') is not None:
sequence_lengths = kwargs['attention_mask'].sum(dim=1) - 1
else:
sequence_lengths = -1
if isinstance(sequence_lengths, torch.Tensor):
sequence_lengths = sequence_lengths.to(logits.device)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits, ) + output[1:]
return ((loss, ) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=output.past_key_values,
hidden_states=output.hidden_states,
attentions=output.attentions,
)
class InternVLChatModel(PreTrainedModel):
config_class = InternVLChatConfig
main_input_name = 'pixel_values'
base_model_prefix = 'language_model'
_supports_flash_attn_2 = True
supports_gradient_checkpointing = True
_no_split_modules = [
"InternVisionModel",
"Qwen3DecoderLayer",
]
# support transformers 4.51.+
_tp_plan = ''
def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
super().__init__(config)
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
image_size = config.force_image_size or config.vision_config.image_size
patch_size = config.vision_config.patch_size
self.patch_size = patch_size
self.select_layer = config.select_layer
self.template = config.template
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
self.downsample_ratio = config.downsample_ratio
self.ps_version = config.ps_version
use_flash_attn = use_flash_attn if has_flash_attn else False
config.vision_config.use_flash_attn = True if use_flash_attn else False
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
logger.info(f'num_image_token: {self.num_image_token}')
logger.info(f'ps_version: {self.ps_version}')
if vision_model is not None:
self.vision_model = vision_model
else:
self.vision_model = InternVisionModel(config.vision_config)
if language_model is not None:
self.language_model = language_model
else:
architecture: str = config.llm_config.architectures[0]
if architecture == 'LlamaForCausalLM':
self.language_model = LlamaForCausalLM(config.llm_config)
elif architecture == 'Qwen2ForCausalLM':
self.language_model = Qwen2ForCausalLM(config.llm_config)
elif architecture == 'Qwen3MoeForCausalLM':
self.language_model = Qwen3MoeForCausalLM(config.llm_config)
elif architecture == 'Qwen3ForCausalLM':
self.language_model = Qwen3ForCausalLM(config.llm_config)
else:
raise NotImplementedError(f'{architecture} is not implemented.')
vit_hidden_size = config.vision_config.hidden_size
llm_hidden_size = config.llm_config.hidden_size
self.mlp1 = nn.Sequential(
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
nn.GELU(),
nn.Linear(llm_hidden_size, llm_hidden_size)
)
# import pdb; pdb.set_trace()
#! >>> NEW: user token & user embedding table
emb = self.language_model.get_input_embeddings()
target_dtype = getattr(emb.weight, "dtype", torch.float32)
target_device = getattr(emb.weight, "device", torch.device("cpu"))
user_ckpt_path = getattr(config, "user_table_path", "/vcc-data/peihaow/perrm/user_table.pt")
train_user_table = bool(getattr(config, "train_user_table", False))
self.user_table = self._build_or_load_user_table(
user_ckpt_path=user_ckpt_path,
default_num_users=getattr(config, "num_users", 100000),
hidden_size=llm_hidden_size,
dtype=target_dtype,
device=target_device,
trainable=train_user_table,
)
self.user_token_id = None
#! <<< NEW
self.img_context_token_id = None
self.conv_template = get_conv_template(self.template)
self.system_message = self.conv_template.system_message
#! >>> NEW: Patch it to be a sequence cls model
llm_model = self.language_model
llm_model.score = nn.Linear(llm_model.config.hidden_size, config.num_labels, bias=False, dtype=llm_model.dtype)
llm_model.set_output_embeddings(nn.Identity())
#! <<< NEW
origin_forward = llm_model.forward
@wraps(origin_forward.__func__)
def new_forward(self, *args, **kwargs):
return transformers_seq_cls_forward(self, *args, origin_forward=origin_forward, **kwargs)
llm_model.forward = MethodType(new_forward, llm_model)
def _build_or_load_user_table(self,
user_ckpt_path: Optional[str],
default_num_users: int,
hidden_size: int,
dtype: torch.dtype,
device: torch.device,
trainable: bool) -> nn.Embedding:
"""
如果提供了 checkpoint,就按其中的 num_embeddings/embedding_dim/weight 恢复;
否则新建一个随机初始化的表。始终把 dtype/device 对齐到 LLM embedding。
"""
if user_ckpt_path:
ckpt = torch.load(user_ckpt_path, map_location="cpu")
# 兼容不同 key 命名(你给出的格式)
num_embeddings = int(ckpt["num_embeddings"])
embedding_dim = int(ckpt["embedding_dim"])
weight = ckpt["weight"] # [num_embeddings, embedding_dim]
if embedding_dim != hidden_size:
raise ValueError(
f"user_table embedding_dim={embedding_dim} != llm_hidden_size={hidden_size}. "
"请使用同一隐藏维度,或在加载后加一层线性投影对齐。"
)
table = nn.Embedding(num_embeddings, embedding_dim)
with torch.no_grad():
table.weight.copy_(weight)
else:
table = nn.Embedding(default_num_users, hidden_size)
nn.init.normal_(table.weight, std=0.02)
# 训练与参数组控制
table.weight.requires_grad = trainable
# 对齐 dtype/device(特别是 bf16/fp16)
table.to(device=device, dtype=dtype)
return table
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: torch.LongTensor = None,
user_ids: Optional[torch.LongTensor] = None,
inputs_embeds: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
image_flags: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
import pdb; pdb.set_trace()
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
image_flags = image_flags.squeeze(-1)
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
vit_embeds = self.extract_feature(pixel_values)
vit_embeds = vit_embeds[image_flags == 1]
vit_batch_size = pixel_values.shape[0]
B, N, C = input_embeds.shape
input_embeds = input_embeds.reshape(B * N, C)
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
# print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
input_ids = input_ids.reshape(B * N)
selected = (input_ids == self.img_context_token_id)
try:
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
except Exception as e:
vit_embeds = vit_embeds.reshape(-1, C)
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
f'vit_embeds.shape={vit_embeds.shape}')
n_token = min(selected.sum(), vit_embeds.size(0))
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token]
input_embeds = input_embeds.reshape(B, N, C)
#! >>> NEW: 用 user_table 替换 <USER> 位置
if (self.user_token_id is not None) and (user_ids is not None):
# user 向量: [B, H] -> [B, 1, H] -> [B, N, H]
uvec = self.user_table(user_ids.to(input_embeds.device)).unsqueeze(1).expand(-1, N, -1)
user_mask = (input_ids.view(B, N) == self.user_token_id).unsqueeze(-1) # [B, N, 1]
# 替换:所有 <USER> 位置用对应 batch 的 uvec
input_embeds = torch.where(user_mask, uvec.to(input_embeds.dtype), input_embeds)
#! <<< NEW
outputs = self.language_model(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def pixel_shuffle(self, x, scale_factor=0.5):
n, w, h, c = x.size()
# N, W, H, C --> N, W, H * scale, C // scale
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
x = x.permute(0, 2, 1, 3).contiguous()
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
int(c / (scale_factor * scale_factor)))
if self.ps_version == 'v1':
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
'which results in a transposed image.')
else:
x = x.permute(0, 2, 1, 3).contiguous()
return x
def extract_feature(self, pixel_values):
if self.select_layer == -1:
vit_embeds = self.vision_model(
pixel_values=pixel_values,
output_hidden_states=False,
return_dict=True).last_hidden_state
else:
vit_embeds = self.vision_model(
pixel_values=pixel_values,
output_hidden_states=True,
return_dict=True).hidden_states[self.select_layer]
vit_embeds = vit_embeds[:, 1:, :]
h = w = int(vit_embeds.shape[1] ** 0.5)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
vit_embeds = self.mlp1(vit_embeds)
return vit_embeds
def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_images_list=None, num_patches_list=None,
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
if history is not None or return_history:
print('Now multi-turn chat is not supported in batch_chat.')
raise NotImplementedError
if image_counts is not None:
num_patches_list = image_counts
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
self.img_context_token_id = img_context_token_id
if verbose and pixel_values is not None:
image_bs = pixel_values.shape[0]
print(f'dynamic ViT batch size: {image_bs}')
if num_images_list is None:
num_images_list = [1] * len(questions)
queries = []
sum_images = 0
for idx, num_images in enumerate(num_images_list):
question = questions[idx]
template = get_conv_template(self.template)
template.system_message = self.system_message
template.append_message(template.roles[0], question)
template.append_message(template.roles[1], None)
query = template.get_prompt()
num_patches_sublist = num_patches_list[sum_images:sum_images+num_images]
sum_images += num_images
for num_patches in num_patches_sublist:
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
query = query.replace('<image>', image_tokens, 1)
queries.append(query)
tokenizer.padding_side = 'left'
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
input_ids = model_inputs['input_ids'].to(self.device)
attention_mask = model_inputs['attention_mask'].to(self.device)
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
generation_config['eos_token_id'] = eos_token_id
generation_output = self.generate(
pixel_values=pixel_values,
input_ids=input_ids,
attention_mask=attention_mask,
**generation_config
)
if generation_config['return_dict_in_generate']:
sequences = generation_output.sequences
hidden_states = generation_output.hidden_states[-1][-1]#.squeeze() # 0 indicates most recent step and -1 indicates last layer
else:
sequences = generation_output
responses = tokenizer.batch_decode(sequences, skip_special_tokens=True)
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
if generation_config['output_hidden_states']:
return responses, hidden_states
return responses
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
verbose=False):
if history is None and pixel_values is not None and '<image>' not in question:
question = '<image>\n' + question
if num_patches_list is None:
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
self.img_context_token_id = img_context_token_id
template = get_conv_template(self.template)
template.system_message = self.system_message
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
history = [] if history is None else history
for (old_question, old_answer) in history:
template.append_message(template.roles[0], old_question)
template.append_message(template.roles[1], old_answer)
template.append_message(template.roles[0], question)
template.append_message(template.roles[1], None)
query = template.get_prompt()
if verbose and pixel_values is not None:
image_bs = pixel_values.shape[0]
print(f'dynamic ViT batch size: {image_bs}')
for num_patches in num_patches_list:
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
query = query.replace('<image>', image_tokens, 1)
model_inputs = tokenizer(query, return_tensors='pt')
input_ids = model_inputs['input_ids'].to(self.device)
attention_mask = model_inputs['attention_mask'].to(self.device)
generation_config['eos_token_id'] = eos_token_id
generation_output = self.generate(
pixel_values=pixel_values,
input_ids=input_ids,
attention_mask=attention_mask,
**generation_config
)
if generation_config['return_dict_in_generate']:
sequences = generation_output.sequences
hidden_states = generation_output.hidden_states[-1][-1].squeeze() # 0 indicates most recent step and -1 indicates last layer
else:
sequences = generation_output
response = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0]
response = response.split(template.sep.strip())[0].strip()
if generation_config['output_hidden_states']:
return response, hidden_states
history.append((question, response))
if return_history:
return response, history
else:
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
if verbose:
print(query_to_print, response)
return response
@torch.no_grad()
def generate(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_ids: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
visual_features: Optional[torch.FloatTensor] = None,
generation_config: Optional[GenerationConfig] = None,
output_hidden_states: Optional[bool] = None,
**generate_kwargs,
) -> torch.LongTensor:
assert self.img_context_token_id is not None
if pixel_values is not None:
if visual_features is not None:
vit_embeds = visual_features
else:
vit_embeds = self.extract_feature(pixel_values)
input_embeds = self.language_model.get_input_embeddings()(input_ids)
B, N, C = input_embeds.shape
input_embeds = input_embeds.reshape(B * N, C)
input_ids = input_ids.reshape(B * N)
selected = (input_ids == self.img_context_token_id)
assert selected.sum() != 0
# try:
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
# except:
# import pdb
# pdb.set_trace()
input_embeds = input_embeds.reshape(B, N, C)
else:
input_embeds = self.language_model.get_input_embeddings()(input_ids)
outputs = self.language_model.generate(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
generation_config=generation_config,
output_hidden_states=output_hidden_states,
use_cache=True,
**generate_kwargs,
)
return outputs
@property
def lm_head(self):
return self.language_model.get_output_embeddings()
def get_output_embeddings(self):
return self.language_model.get_output_embeddings()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
return self.language_model.set_input_embeddings(value)
def set_output_embeddings(self, value):
return self.language_model.set_output_embeddings(value)
from transformers.modeling_layers import GenericForSequenceClassification
class InternVLChatForSequenceClassification(GenericForSequenceClassification, InternVLChatModel):
pass