Delete model_vlm.py
Browse files- model_vlm.py +0 -130
model_vlm.py
DELETED
|
@@ -1,130 +0,0 @@
|
|
| 1 |
-
from .VLMConfig import VLMConfig
|
| 2 |
-
from .model import *
|
| 3 |
-
from typing import Optional, Tuple, List
|
| 4 |
-
from torch import nn
|
| 5 |
-
import warnings
|
| 6 |
-
from transformers import CLIPProcessor, CLIPModel
|
| 7 |
-
import torch
|
| 8 |
-
|
| 9 |
-
warnings.filterwarnings('ignore')
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
class VisionProj(nn.Module):
|
| 13 |
-
def __init__(self, ve_dim=768, lm_dim=512):
|
| 14 |
-
super().__init__()
|
| 15 |
-
self.ve_dim = ve_dim
|
| 16 |
-
self.lm_dim = lm_dim
|
| 17 |
-
self.vision_proj = nn.Sequential(
|
| 18 |
-
nn.Linear(self.ve_dim, self.lm_dim)
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
def forward(self, image_encoders):
|
| 22 |
-
vision_proj = self.vision_proj(image_encoders)
|
| 23 |
-
return vision_proj
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
# 继承自语言模型
|
| 27 |
-
class MiniMindVLM(MiniMindLM):
|
| 28 |
-
config_class = VLMConfig
|
| 29 |
-
|
| 30 |
-
def __init__(self, params: VLMConfig = None):
|
| 31 |
-
super().__init__(params)
|
| 32 |
-
if not params: params = VLMConfig()
|
| 33 |
-
self.params = params
|
| 34 |
-
self.vision_encoder, self.processor = self.__class__.get_vision_model()
|
| 35 |
-
self.vision_proj = VisionProj(lm_dim=params.dim)
|
| 36 |
-
|
| 37 |
-
@staticmethod
|
| 38 |
-
def get_vision_model(model_path="./model/vision_model/clip-vit-base-patch16"):
|
| 39 |
-
model = CLIPModel.from_pretrained(model_path)
|
| 40 |
-
processor = CLIPProcessor.from_pretrained(model_path)
|
| 41 |
-
# 冻结 vision_encoder 的所有参数
|
| 42 |
-
for param in model.parameters():
|
| 43 |
-
param.requires_grad = False
|
| 44 |
-
return model.eval(), processor
|
| 45 |
-
|
| 46 |
-
@staticmethod
|
| 47 |
-
def image2tensor(image, processor):
|
| 48 |
-
if image.mode in ['RGBA', 'LA']: image = image.convert('RGB')
|
| 49 |
-
inputs = processor(images=image, return_tensors="pt")['pixel_values']
|
| 50 |
-
return inputs
|
| 51 |
-
|
| 52 |
-
@staticmethod
|
| 53 |
-
def get_image_embeddings(image_tensors, vision_model):
|
| 54 |
-
with torch.no_grad():
|
| 55 |
-
outputs = vision_model.vision_model(pixel_values=image_tensors)
|
| 56 |
-
img_embedding = outputs.last_hidden_state[:, 1:, :].squeeze()
|
| 57 |
-
return img_embedding
|
| 58 |
-
|
| 59 |
-
def count_vision_proj(self, tokens, h, vision_tensors=None, seqlen=512):
|
| 60 |
-
def find_indices(tokens, image_ids):
|
| 61 |
-
image_ids_tensor = torch.tensor(image_ids).to(tokens.device)
|
| 62 |
-
len_image_ids = len(image_ids)
|
| 63 |
-
if len_image_ids > tokens.size(1):
|
| 64 |
-
return None
|
| 65 |
-
tokens_view = tokens.unfold(1, len_image_ids, 1)
|
| 66 |
-
matches = (tokens_view == image_ids_tensor).all(dim=2)
|
| 67 |
-
return {
|
| 68 |
-
batch_idx: [(idx.item(), idx.item() + len_image_ids - 1) for idx in
|
| 69 |
-
matches[batch_idx].nonzero(as_tuple=True)[0]]
|
| 70 |
-
for batch_idx in range(tokens.size(0)) if matches[batch_idx].any()
|
| 71 |
-
} or None
|
| 72 |
-
|
| 73 |
-
image_indices = find_indices(tokens, self.params.image_ids)
|
| 74 |
-
if vision_tensors is not None and image_indices:
|
| 75 |
-
vision_proj = self.vision_proj(vision_tensors)
|
| 76 |
-
if len(vision_proj.shape) == 3:
|
| 77 |
-
vision_proj = vision_proj.unsqueeze(0)
|
| 78 |
-
new_h = []
|
| 79 |
-
for i in range(h.size(0)):
|
| 80 |
-
if i in image_indices:
|
| 81 |
-
h_i = h[i]
|
| 82 |
-
img_idx = 0
|
| 83 |
-
for start_idx, end_idx in image_indices[i]:
|
| 84 |
-
if img_idx < vision_proj.size(1):
|
| 85 |
-
h_i = torch.cat((h_i[:start_idx], vision_proj[i][img_idx], h_i[end_idx + 1:]), dim=0)[
|
| 86 |
-
:seqlen]
|
| 87 |
-
img_idx += 1
|
| 88 |
-
new_h.append(h_i)
|
| 89 |
-
else:
|
| 90 |
-
new_h.append(h[i])
|
| 91 |
-
return torch.stack(new_h, dim=0)
|
| 92 |
-
return h
|
| 93 |
-
|
| 94 |
-
def forward(self,
|
| 95 |
-
input_ids: Optional[torch.Tensor] = None,
|
| 96 |
-
past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
|
| 97 |
-
use_cache: bool = False,
|
| 98 |
-
**args):
|
| 99 |
-
start_pos = args.get('start_pos', 0)
|
| 100 |
-
pixel_tensors = args.get('pixel_tensors', None)
|
| 101 |
-
h = self.tok_embeddings(input_ids)
|
| 102 |
-
|
| 103 |
-
if pixel_tensors is not None and start_pos == 0:
|
| 104 |
-
if len(pixel_tensors.shape) == 6:
|
| 105 |
-
pixel_tensors = pixel_tensors.squeeze(2)
|
| 106 |
-
bs, num, c, im_h, im_w = pixel_tensors.shape
|
| 107 |
-
stack_dim = 1 if bs > 1 else 0
|
| 108 |
-
vision_tensors = torch.stack([
|
| 109 |
-
MiniMindVLM.get_image_embeddings(pixel_tensors[:, i, :, :, :], self.vision_encoder)
|
| 110 |
-
for i in range(num)
|
| 111 |
-
], dim=stack_dim)
|
| 112 |
-
h = self.count_vision_proj(tokens=input_ids, h=h, vision_tensors=vision_tensors, seqlen=input_ids.shape[1])
|
| 113 |
-
|
| 114 |
-
pos_cis = self.pos_cis[start_pos:start_pos + input_ids.shape[1]]
|
| 115 |
-
past_kvs = []
|
| 116 |
-
for l, layer in enumerate(self.layers):
|
| 117 |
-
h, past_kv = layer(
|
| 118 |
-
h, pos_cis,
|
| 119 |
-
past_key_value=past_key_values[l] if past_key_values else None,
|
| 120 |
-
use_cache=use_cache
|
| 121 |
-
)
|
| 122 |
-
past_kvs.append(past_kv)
|
| 123 |
-
|
| 124 |
-
logits = self.output(self.norm(h))
|
| 125 |
-
aux_loss = sum(l.feed_forward.aux_loss for l in self.layers if isinstance(l.feed_forward, MOEFeedForward))
|
| 126 |
-
|
| 127 |
-
self.OUT.__setitem__('logits', logits)
|
| 128 |
-
self.OUT.__setitem__('aux_loss', aux_loss)
|
| 129 |
-
self.OUT.__setitem__('past_key_values', past_kvs)
|
| 130 |
-
return self.OUT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|