wanghanrui
commited on
Commit
·
c3bf9f0
1
Parent(s):
99e4976
Add initial model and configuration files
Browse files- added_tokens.json +28 -0
- audio_preprocess.py +99 -0
- config.json +117 -0
- configuration.py +135 -0
- generation_config.json +7 -0
- merges.txt +0 -0
- message.py +29 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +897 -0
- modeling_mufun.py +601 -0
- special_tokens_map.json +32 -0
- text_preprocess.py +243 -0
- tokenizer_config.json +241 -0
- vocab.json +0 -0
added_tokens.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</think>": 151668,
|
| 3 |
+
"</tool_call>": 151658,
|
| 4 |
+
"</tool_response>": 151666,
|
| 5 |
+
"<think>": 151667,
|
| 6 |
+
"<tool_call>": 151657,
|
| 7 |
+
"<tool_response>": 151665,
|
| 8 |
+
"<|box_end|>": 151649,
|
| 9 |
+
"<|box_start|>": 151648,
|
| 10 |
+
"<|endoftext|>": 151643,
|
| 11 |
+
"<|file_sep|>": 151664,
|
| 12 |
+
"<|fim_middle|>": 151660,
|
| 13 |
+
"<|fim_pad|>": 151662,
|
| 14 |
+
"<|fim_prefix|>": 151659,
|
| 15 |
+
"<|fim_suffix|>": 151661,
|
| 16 |
+
"<|im_end|>": 151645,
|
| 17 |
+
"<|im_start|>": 151644,
|
| 18 |
+
"<|image_pad|>": 151655,
|
| 19 |
+
"<|object_ref_end|>": 151647,
|
| 20 |
+
"<|object_ref_start|>": 151646,
|
| 21 |
+
"<|quad_end|>": 151651,
|
| 22 |
+
"<|quad_start|>": 151650,
|
| 23 |
+
"<|repo_name|>": 151663,
|
| 24 |
+
"<|video_pad|>": 151656,
|
| 25 |
+
"<|vision_end|>": 151653,
|
| 26 |
+
"<|vision_pad|>": 151654,
|
| 27 |
+
"<|vision_start|>": 151652
|
| 28 |
+
}
|
audio_preprocess.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import warnings
|
| 4 |
+
warnings.filterwarnings('ignore')
|
| 5 |
+
import requests
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from transformers.pipelines.audio_utils import ffmpeg_read
|
| 8 |
+
import mutagen
|
| 9 |
+
from torchaudio import functional as taF
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
feature_extractor_sampling_rate = 16000
|
| 13 |
+
clip_length = 30*feature_extractor_sampling_rate
|
| 14 |
+
clip_drop = feature_extractor_sampling_rate//2
|
| 15 |
+
AUDIO_EXTENSIONS = ('.wav', '.mp3', '.flac', '.opus', '.ogg')
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_audio_single(audio_file, seg=None):
|
| 19 |
+
assert isinstance(audio_file, str), "audio_file should be a string"
|
| 20 |
+
if audio_file.endswith(AUDIO_EXTENSIONS):
|
| 21 |
+
inputs=audio_file
|
| 22 |
+
in_sampling_rate=mutagen.File(inputs).info.sample_rate
|
| 23 |
+
if inputs.startswith("http://") or inputs.startswith("https://"):
|
| 24 |
+
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
|
| 25 |
+
inputs = requests.get(inputs).content
|
| 26 |
+
else:
|
| 27 |
+
with open(inputs, "rb") as f:
|
| 28 |
+
inputs = f.read()
|
| 29 |
+
if isinstance(inputs, bytes):
|
| 30 |
+
inputs = ffmpeg_read(inputs, in_sampling_rate)
|
| 31 |
+
if seg is not None:
|
| 32 |
+
inputs = inputs[int(seg[0] * in_sampling_rate):int(seg[1] * in_sampling_rate)]
|
| 33 |
+
if in_sampling_rate != feature_extractor_sampling_rate:
|
| 34 |
+
inputs = taF.resample(
|
| 35 |
+
torch.from_numpy(inputs.copy()), in_sampling_rate, feature_extractor_sampling_rate
|
| 36 |
+
).numpy()
|
| 37 |
+
if len(inputs) <= clip_length:
|
| 38 |
+
return [inputs]
|
| 39 |
+
else:
|
| 40 |
+
audios = []
|
| 41 |
+
for i in range(0, len(inputs), clip_length):
|
| 42 |
+
chunk = inputs[i : i + clip_length]
|
| 43 |
+
chunk_index = len(chunk)
|
| 44 |
+
if chunk_index > clip_drop:
|
| 45 |
+
audios.append(chunk)
|
| 46 |
+
return audios
|
| 47 |
+
if audio_file.endswith('.npy'):
|
| 48 |
+
return [np.load(audio_file)]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def load_audios(audio_preprocess, audio_files, segs=None, audio_folder=None):
|
| 52 |
+
if audio_files is None:
|
| 53 |
+
return None, None
|
| 54 |
+
if isinstance(audio_files, str):
|
| 55 |
+
audio_files = [audio_files]
|
| 56 |
+
if segs:
|
| 57 |
+
if segs and isinstance(segs[0], float):
|
| 58 |
+
segs = [segs]
|
| 59 |
+
else:
|
| 60 |
+
segs = [None for _ in range(len(audio_files))]
|
| 61 |
+
if audio_folder:
|
| 62 |
+
audio_files = [os.path.join(audio_folder, afile) for afile in audio_files]
|
| 63 |
+
|
| 64 |
+
def get_single_audio(audio_file, seg):
|
| 65 |
+
try:
|
| 66 |
+
if seg:
|
| 67 |
+
audio = load_audio_single(audio_file, seg)
|
| 68 |
+
else:
|
| 69 |
+
audio = load_audio_single(audio_file)
|
| 70 |
+
|
| 71 |
+
audio = [audio_preprocess(aud) for aud in audio]
|
| 72 |
+
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"Error loading {audio_file} seg {seg}: {e}")
|
| 75 |
+
audio = None
|
| 76 |
+
|
| 77 |
+
return audio
|
| 78 |
+
|
| 79 |
+
audio_size= []
|
| 80 |
+
audio_list = []
|
| 81 |
+
for ii in range(len(audio_files)):
|
| 82 |
+
audio_file = audio_files[ii]
|
| 83 |
+
seg = segs[ii]
|
| 84 |
+
single_audio_list = get_single_audio(audio_file,seg)
|
| 85 |
+
audio_size.append(len(single_audio_list))
|
| 86 |
+
audio_list.extend(single_audio_list)
|
| 87 |
+
|
| 88 |
+
return audio_list, audio_size
|
| 89 |
+
|
| 90 |
+
class AudioPreprocess:
|
| 91 |
+
def __init__(self, image_processor, data_args={}):
|
| 92 |
+
self.image_aspect_ratio = getattr(data_args, 'image_aspect_ratio', None)
|
| 93 |
+
self.image_processor = image_processor
|
| 94 |
+
# self.image_grid_pinpoints = getattr(data_args, 'image_grid_pinpoints', None)
|
| 95 |
+
|
| 96 |
+
def __call__(self, image):
|
| 97 |
+
assert self.image_aspect_ratio == "audio", "image_aspect_ratio should be 'audio' for audio preprocessing"
|
| 98 |
+
return self.image_processor(image, sampling_rate=feature_extractor_sampling_rate, return_tensors="pt").input_features
|
| 99 |
+
|
config.json
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"TinyLlavaForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"cache_dir": null,
|
| 6 |
+
"connector_type": "blp_4i_2x",
|
| 7 |
+
"hidden_size": 4096,
|
| 8 |
+
"ignore_index": -100,
|
| 9 |
+
"image_aspect_ratio": "audio",
|
| 10 |
+
"image_token_index": -200,
|
| 11 |
+
"llm_model_name_or_path": "Qwen/Qwen3-8B-Base",
|
| 12 |
+
"model_type": "tinyllava",
|
| 13 |
+
"auto_map": {
|
| 14 |
+
"AutoConfig": "configuration.TinyLlavaConfig",
|
| 15 |
+
"AutoModelForCausalLM": "modeling_mufun.TinyLlavaForConditionalGeneration"
|
| 16 |
+
},
|
| 17 |
+
"num_queries": 128,
|
| 18 |
+
"num_resampler_layers": 3,
|
| 19 |
+
"pad_token": "<|endoftext|>",
|
| 20 |
+
"resampler_hidden_size": 768,
|
| 21 |
+
"text_config": {
|
| 22 |
+
"_attn_implementation_autoset": true,
|
| 23 |
+
"_name_or_path": "Qwen/Qwen3-8B-Base",
|
| 24 |
+
"architectures": [
|
| 25 |
+
"Qwen3ForCausalLM"
|
| 26 |
+
],
|
| 27 |
+
"attention_bias": false,
|
| 28 |
+
"attention_dropout": 0.0,
|
| 29 |
+
"bos_token_id": 151643,
|
| 30 |
+
"eos_token_id": 151643,
|
| 31 |
+
"head_dim": 128,
|
| 32 |
+
"hidden_act": "silu",
|
| 33 |
+
"hidden_size": 4096,
|
| 34 |
+
"initializer_range": 0.02,
|
| 35 |
+
"intermediate_size": 12288,
|
| 36 |
+
"max_position_embeddings": 32768,
|
| 37 |
+
"max_window_layers": 36,
|
| 38 |
+
"model_type": "qwen3",
|
| 39 |
+
"num_attention_heads": 32,
|
| 40 |
+
"num_hidden_layers": 36,
|
| 41 |
+
"num_key_value_heads": 8,
|
| 42 |
+
"rms_norm_eps": 1e-06,
|
| 43 |
+
"rope_scaling": null,
|
| 44 |
+
"rope_theta": 1000000,
|
| 45 |
+
"sliding_window": null,
|
| 46 |
+
"torch_dtype": "bfloat16",
|
| 47 |
+
"use_cache": false,
|
| 48 |
+
"use_sliding_window": false,
|
| 49 |
+
"vocab_size": 151936
|
| 50 |
+
},
|
| 51 |
+
"tokenizer_model_max_length": 32768,
|
| 52 |
+
"tokenizer_name_or_path": "Qwen/Qwen3-8B-Base",
|
| 53 |
+
"tokenizer_padding_side": "right",
|
| 54 |
+
"tokenizer_use_fast": false,
|
| 55 |
+
"torch_dtype": "bfloat16",
|
| 56 |
+
"transformers_version": "4.51.3",
|
| 57 |
+
"tune_type_connector": "full",
|
| 58 |
+
"tune_type_llm": "full",
|
| 59 |
+
"tune_type_vision_tower": "full",
|
| 60 |
+
"tune_vision_tower_from_layer": 0,
|
| 61 |
+
"use_cache": false,
|
| 62 |
+
"vision_config": {
|
| 63 |
+
"_name_or_path": "openai/whisper-large-v3",
|
| 64 |
+
"activation_dropout": 0.0,
|
| 65 |
+
"activation_function": "gelu",
|
| 66 |
+
"apply_spec_augment": false,
|
| 67 |
+
"architectures": [
|
| 68 |
+
"WhisperForConditionalGeneration"
|
| 69 |
+
],
|
| 70 |
+
"attention_dropout": 0.0,
|
| 71 |
+
"begin_suppress_tokens": [
|
| 72 |
+
220,
|
| 73 |
+
50257
|
| 74 |
+
],
|
| 75 |
+
"bos_token_id": 50257,
|
| 76 |
+
"classifier_proj_size": 256,
|
| 77 |
+
"d_model": 1280,
|
| 78 |
+
"decoder_attention_heads": 20,
|
| 79 |
+
"decoder_ffn_dim": 5120,
|
| 80 |
+
"decoder_layerdrop": 0.0,
|
| 81 |
+
"decoder_layers": 32,
|
| 82 |
+
"decoder_start_token_id": 50258,
|
| 83 |
+
"dropout": 0.0,
|
| 84 |
+
"encoder_attention_heads": 20,
|
| 85 |
+
"encoder_ffn_dim": 5120,
|
| 86 |
+
"encoder_layerdrop": 0.0,
|
| 87 |
+
"encoder_layers": 32,
|
| 88 |
+
"eos_token_id": 50257,
|
| 89 |
+
"init_std": 0.02,
|
| 90 |
+
"mask_feature_length": 10,
|
| 91 |
+
"mask_feature_min_masks": 0,
|
| 92 |
+
"mask_feature_prob": 0.0,
|
| 93 |
+
"mask_time_length": 10,
|
| 94 |
+
"mask_time_min_masks": 2,
|
| 95 |
+
"mask_time_prob": 0.05,
|
| 96 |
+
"max_length": 448,
|
| 97 |
+
"max_source_positions": 1500,
|
| 98 |
+
"max_target_positions": 448,
|
| 99 |
+
"median_filter_width": 7,
|
| 100 |
+
"model_name_or_path": "openai/whisper-large-v3",
|
| 101 |
+
"model_name_or_path2": "",
|
| 102 |
+
"model_type": "whisper",
|
| 103 |
+
"num_hidden_layers": 32,
|
| 104 |
+
"num_mel_bins": 128,
|
| 105 |
+
"scale_embedding": false,
|
| 106 |
+
"torch_dtype": "float16",
|
| 107 |
+
"use_cache": true,
|
| 108 |
+
"use_weighted_layer_sum": false,
|
| 109 |
+
"vocab_size": 51866
|
| 110 |
+
},
|
| 111 |
+
"vision_feature_layer": -2,
|
| 112 |
+
"vision_feature_select_strategy": "patch",
|
| 113 |
+
"vision_hidden_size": 1280,
|
| 114 |
+
"vision_model_name_or_path": "openai/whisper-large-v3",
|
| 115 |
+
"vision_model_name_or_path2": "",
|
| 116 |
+
"vocab_size": 151936
|
| 117 |
+
}
|
configuration.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import PretrainedConfig, LlavaConfig
|
| 2 |
+
from transformers import CONFIG_MAPPING
|
| 3 |
+
from transformers import AutoConfig
|
| 4 |
+
# from .utils.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
| 5 |
+
IGNORE_INDEX = -100
|
| 6 |
+
IMAGE_TOKEN_INDEX = -200
|
| 7 |
+
DEFAULT_IMAGE_TOKEN = "<audio>"
|
| 8 |
+
class TinyLlavaConfig(PretrainedConfig):
|
| 9 |
+
|
| 10 |
+
model_type = "tinyllava"
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
llm_model_name_or_path = '',
|
| 14 |
+
tokenizer_name_or_path = None,
|
| 15 |
+
vision_model_name_or_path = '',
|
| 16 |
+
vision_model_name_or_path2 = '',
|
| 17 |
+
connector_type = None,
|
| 18 |
+
text_config=None,
|
| 19 |
+
hidden_size=2048,
|
| 20 |
+
vocab_size=32000,
|
| 21 |
+
ignore_index=-100,
|
| 22 |
+
image_token_index=32000,
|
| 23 |
+
pad_token = None,
|
| 24 |
+
pad_token_id = None,
|
| 25 |
+
tokenizer_padding_side = 'right',
|
| 26 |
+
tokenizer_model_max_length = 2048,
|
| 27 |
+
vision_config = None,
|
| 28 |
+
vision_hidden_size = None,
|
| 29 |
+
vision_feature_layer = -2,
|
| 30 |
+
vision_feature_select_strategy = 'patch',
|
| 31 |
+
image_aspect_ratio = 'square',
|
| 32 |
+
resampler_hidden_size = None,
|
| 33 |
+
num_queries = None,
|
| 34 |
+
num_resampler_layers = None,
|
| 35 |
+
use_cache = False,
|
| 36 |
+
cache_dir = None,
|
| 37 |
+
tokenizer_use_fast = False,
|
| 38 |
+
tune_type_llm = 'frozen',
|
| 39 |
+
tune_type_connector = 'frozen',
|
| 40 |
+
tune_type_vision_tower = 'frozen',
|
| 41 |
+
tune_vision_tower_from_layer = -1,
|
| 42 |
+
|
| 43 |
+
**kwargs
|
| 44 |
+
|
| 45 |
+
):
|
| 46 |
+
self.llm_model_name_or_path = llm_model_name_or_path
|
| 47 |
+
self.tokenizer_name_or_path = tokenizer_name_or_path or self.llm_model_name_or_path
|
| 48 |
+
self.vision_model_name_or_path = vision_model_name_or_path
|
| 49 |
+
self.vision_model_name_or_path2 = vision_model_name_or_path2
|
| 50 |
+
self.connector_type = connector_type
|
| 51 |
+
self.tune_type_llm = tune_type_llm
|
| 52 |
+
self.tune_type_connector = tune_type_connector
|
| 53 |
+
self.tune_type_vision_tower = tune_type_vision_tower
|
| 54 |
+
self.tune_vision_tower_from_layer = tune_vision_tower_from_layer
|
| 55 |
+
|
| 56 |
+
self.ignore_index = IGNORE_INDEX
|
| 57 |
+
self.image_token_index = IMAGE_TOKEN_INDEX
|
| 58 |
+
self.pad_token = pad_token
|
| 59 |
+
self.pad_token_id = pad_token_id
|
| 60 |
+
self.tokenizer_padding_side = tokenizer_padding_side
|
| 61 |
+
self.tokenizer_model_max_length = tokenizer_model_max_length
|
| 62 |
+
self.vision_feature_layer = vision_feature_layer
|
| 63 |
+
self.vision_feature_select_strategy = vision_feature_select_strategy
|
| 64 |
+
self.image_aspect_ratio = image_aspect_ratio
|
| 65 |
+
self.resampler_hidden_size = resampler_hidden_size
|
| 66 |
+
self.num_queries = num_queries
|
| 67 |
+
self.num_resampler_layers = num_resampler_layers
|
| 68 |
+
self.use_cache = use_cache
|
| 69 |
+
self.cache_dir = cache_dir
|
| 70 |
+
self.tokenizer_use_fast = tokenizer_use_fast
|
| 71 |
+
self._load_text_config(text_config)
|
| 72 |
+
self._load_vision_config(vision_config)
|
| 73 |
+
|
| 74 |
+
super().__init__(**kwargs)
|
| 75 |
+
|
| 76 |
+
def load_from_config(self, config):
|
| 77 |
+
self.llm_model_name_or_path = getattr(config, 'model_name_or_path', '')
|
| 78 |
+
self.tokenizer_name_or_path = getattr(config, 'tokenizer_name_or_path', None) or self.llm_model_name_or_path
|
| 79 |
+
self.vision_model_name_or_path = getattr(config, 'vision_tower', '')
|
| 80 |
+
self.vision_model_name_or_path2 = getattr(config, 'vision_tower2', '')
|
| 81 |
+
self.connector_type = getattr(config, 'connector_type', None)
|
| 82 |
+
self.vision_feature_layer = getattr(config, 'mm_vision_select_layer', -2)
|
| 83 |
+
self.vision_feature_select_strategy = getattr(config, 'mm_vision_select_feature', "patch")
|
| 84 |
+
self.image_aspect_ratio = getattr(config, 'image_aspect_ratio', "pad")
|
| 85 |
+
self.resampler_hidden_size = getattr(config, 'resampler_hidden_size', None)
|
| 86 |
+
self.num_queries = getattr(config, 'num_queries', None)
|
| 87 |
+
self.num_resampler_layers = getattr(config, 'num_resampler_layers', None)
|
| 88 |
+
|
| 89 |
+
self.cache_dir = getattr(config, 'cache_dir', None)
|
| 90 |
+
self.tokenizer_use_fast = getattr(config, 'tokenizer_use_fast', False)
|
| 91 |
+
self.tokenizer_model_max_length = getattr(config, 'model_max_length', 2048)
|
| 92 |
+
self.tokenizer_padding_side = getattr(config, 'tokenizer_padding_side', 'right')
|
| 93 |
+
|
| 94 |
+
self._load_text_config()
|
| 95 |
+
self._load_vision_config()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _load_text_config(self, text_config=None):
|
| 99 |
+
if self.llm_model_name_or_path is None or self.llm_model_name_or_path == '':
|
| 100 |
+
self.text_config = CONFIG_MAPPING['llama']()
|
| 101 |
+
|
| 102 |
+
else:
|
| 103 |
+
self.text_config = AutoConfig.from_pretrained(self.llm_model_name_or_path, trust_remote_code=True)
|
| 104 |
+
if text_config is not None:
|
| 105 |
+
self.text_config = self.text_config.from_dict(text_config)
|
| 106 |
+
|
| 107 |
+
self.hidden_size = getattr(self.text_config, 'hidden_size', getattr(self.text_config, 'model_dim', None))
|
| 108 |
+
self.vocab_size = getattr(self.text_config, 'vocab_size', None)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _load_vision_config(self, vision_config=None):
|
| 113 |
+
if self.vision_model_name_or_path is None or self.vision_model_name_or_path == '':
|
| 114 |
+
self.vision_config = CONFIG_MAPPING['clip_vision_model'](
|
| 115 |
+
intermediate_size=4096,
|
| 116 |
+
hidden_size=1024,
|
| 117 |
+
patch_size=14,
|
| 118 |
+
image_size=336,
|
| 119 |
+
num_hidden_layers=24,
|
| 120 |
+
num_attention_heads=16,
|
| 121 |
+
vocab_size=32000,
|
| 122 |
+
projection_dim=768,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
else:
|
| 126 |
+
self.vision_config = AutoConfig.from_pretrained(self.vision_model_name_or_path.split(':')[-1])
|
| 127 |
+
self.vision_config = getattr(self.vision_config, 'vision_config', self.vision_config)
|
| 128 |
+
if vision_config is not None:
|
| 129 |
+
self.vision_config = self.vision_config.from_dict(vision_config)
|
| 130 |
+
|
| 131 |
+
self.vision_config.model_name_or_path = self.vision_model_name_or_path.split(':')[-1]
|
| 132 |
+
self.vision_config.model_name_or_path2 = self.vision_model_name_or_path2.split(':')[-1]
|
| 133 |
+
self.vision_hidden_size = getattr(self.vision_config, 'hidden_size', None)
|
| 134 |
+
|
| 135 |
+
|
generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 151643,
|
| 4 |
+
"eos_token_id": 151643,
|
| 5 |
+
"transformers_version": "4.51.3",
|
| 6 |
+
"use_cache": false
|
| 7 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
message.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Message:
|
| 2 |
+
def __init__(self, msg=None):
|
| 3 |
+
self._messages = msg if msg else []
|
| 4 |
+
self._images = []
|
| 5 |
+
self.skip_next = False
|
| 6 |
+
|
| 7 |
+
def add_message(self, question, answer=None):
|
| 8 |
+
quension_msg_dict = {'from': 'human'}
|
| 9 |
+
quension_msg_dict['value'] = question
|
| 10 |
+
answer_msg_dict = {'from': 'gpt'}
|
| 11 |
+
answer_msg_dict['value'] = answer
|
| 12 |
+
self._messages.append(quension_msg_dict)
|
| 13 |
+
self._messages.append(answer_msg_dict)
|
| 14 |
+
|
| 15 |
+
def add_image(self, image, index=0):
|
| 16 |
+
self._images.append((image, index))
|
| 17 |
+
|
| 18 |
+
@property
|
| 19 |
+
def images(self):
|
| 20 |
+
return self._images
|
| 21 |
+
|
| 22 |
+
@property
|
| 23 |
+
def messages(self):
|
| 24 |
+
return self._messages
|
| 25 |
+
|
| 26 |
+
def copy(self):
|
| 27 |
+
return Message(self._messages)
|
| 28 |
+
|
| 29 |
+
|
model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2ab701869bc10e99d8092d92b5ed00b430f50eb880f75f02f2e8819e7d5fde6
|
| 3 |
+
size 4902259304
|
model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:847d26dfc9307152ef2abf3af86d182b4708bf36632b9637c003edd88c122ec1
|
| 3 |
+
size 4915962496
|
model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae48064c3e2d20ec52a3717bf1cbfd196dcd7f2a12e082d7c14460f83f9a9224
|
| 3 |
+
size 4983070600
|
model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:312aba608080c39eec448d2ef20337349424a59236f16c2cd39cd34a58eb16b9
|
| 3 |
+
size 3043005336
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,897 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 17844180992
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"connector._connector.linear1.bias": "model-00004-of-00004.safetensors",
|
| 7 |
+
"connector._connector.linear1.weight": "model-00004-of-00004.safetensors",
|
| 8 |
+
"connector._connector.linear2.bias": "model-00004-of-00004.safetensors",
|
| 9 |
+
"connector._connector.linear2.weight": "model-00004-of-00004.safetensors",
|
| 10 |
+
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
| 11 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 14 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 17 |
+
"language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 19 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 26 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"language_model.model.layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 29 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 31 |
+
"language_model.model.layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 33 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 34 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 38 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"language_model.model.layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 41 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"language_model.model.layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 43 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 50 |
+
"language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 53 |
+
"language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 55 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"language_model.model.layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 62 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"language_model.model.layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 65 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 67 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"language_model.model.layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 74 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"language_model.model.layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 77 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 79 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"language_model.model.layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 86 |
+
"language_model.model.layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 89 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 91 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"language_model.model.layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"language_model.model.layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 98 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 101 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 103 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"language_model.model.layers.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"language_model.model.layers.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 110 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 113 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 115 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"language_model.model.layers.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 117 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 118 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 119 |
+
"language_model.model.layers.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 121 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 122 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 123 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 125 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 126 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 127 |
+
"language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 128 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 129 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 130 |
+
"language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 131 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 132 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 133 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 134 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 135 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 136 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 137 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 138 |
+
"language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 139 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 140 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 141 |
+
"language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 142 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 143 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 144 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 145 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 146 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 147 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 148 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 149 |
+
"language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 150 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 151 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 152 |
+
"language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 156 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 157 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 158 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 159 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 160 |
+
"language_model.model.layers.20.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 161 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 162 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 163 |
+
"language_model.model.layers.20.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 164 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 165 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 166 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 167 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 168 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 169 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 170 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 171 |
+
"language_model.model.layers.21.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 172 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 173 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 174 |
+
"language_model.model.layers.21.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 175 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 176 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 177 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 182 |
+
"language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
|
| 183 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 184 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 185 |
+
"language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
|
| 186 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 187 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 188 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"language_model.model.layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 194 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"language_model.model.layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 197 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 199 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"language_model.model.layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 206 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"language_model.model.layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 209 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 211 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"language_model.model.layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 218 |
+
"language_model.model.layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 221 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 223 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"language_model.model.layers.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"language_model.model.layers.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 230 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 233 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 235 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"language_model.model.layers.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"language_model.model.layers.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 242 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 245 |
+
"language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 247 |
+
"language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"language_model.model.layers.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 249 |
+
"language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 250 |
+
"language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"language_model.model.layers.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 253 |
+
"language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 254 |
+
"language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 255 |
+
"language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"language_model.model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 257 |
+
"language_model.model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 258 |
+
"language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 259 |
+
"language_model.model.layers.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 260 |
+
"language_model.model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 261 |
+
"language_model.model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 262 |
+
"language_model.model.layers.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 263 |
+
"language_model.model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 264 |
+
"language_model.model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 265 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 266 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 269 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"language_model.model.layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 271 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"language_model.model.layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 277 |
+
"language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 278 |
+
"language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 279 |
+
"language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 280 |
+
"language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 281 |
+
"language_model.model.layers.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 282 |
+
"language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 283 |
+
"language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 284 |
+
"language_model.model.layers.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 285 |
+
"language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 286 |
+
"language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 287 |
+
"language_model.model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 288 |
+
"language_model.model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 289 |
+
"language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 290 |
+
"language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 291 |
+
"language_model.model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 292 |
+
"language_model.model.layers.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 293 |
+
"language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 294 |
+
"language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 295 |
+
"language_model.model.layers.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 296 |
+
"language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 297 |
+
"language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 298 |
+
"language_model.model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 299 |
+
"language_model.model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 300 |
+
"language_model.model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 301 |
+
"language_model.model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 302 |
+
"language_model.model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 303 |
+
"language_model.model.layers.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 304 |
+
"language_model.model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 305 |
+
"language_model.model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 306 |
+
"language_model.model.layers.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 307 |
+
"language_model.model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 308 |
+
"language_model.model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 309 |
+
"language_model.model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 310 |
+
"language_model.model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 311 |
+
"language_model.model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 312 |
+
"language_model.model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 313 |
+
"language_model.model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 314 |
+
"language_model.model.layers.33.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 315 |
+
"language_model.model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 316 |
+
"language_model.model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 317 |
+
"language_model.model.layers.33.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 318 |
+
"language_model.model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 319 |
+
"language_model.model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 320 |
+
"language_model.model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 321 |
+
"language_model.model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 322 |
+
"language_model.model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 323 |
+
"language_model.model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 324 |
+
"language_model.model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 325 |
+
"language_model.model.layers.34.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
|
| 326 |
+
"language_model.model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 327 |
+
"language_model.model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 328 |
+
"language_model.model.layers.34.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
|
| 329 |
+
"language_model.model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 330 |
+
"language_model.model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 331 |
+
"language_model.model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 332 |
+
"language_model.model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 333 |
+
"language_model.model.layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
| 334 |
+
"language_model.model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
| 335 |
+
"language_model.model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 336 |
+
"language_model.model.layers.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
|
| 337 |
+
"language_model.model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 338 |
+
"language_model.model.layers.35.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
| 339 |
+
"language_model.model.layers.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
|
| 340 |
+
"language_model.model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 341 |
+
"language_model.model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 342 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 343 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 344 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 345 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 346 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 347 |
+
"language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 348 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 349 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 350 |
+
"language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 351 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 352 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 353 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 354 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 355 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 356 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 357 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 358 |
+
"language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 359 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 360 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 361 |
+
"language_model.model.layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 362 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 363 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 364 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 365 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 366 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 367 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 368 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 369 |
+
"language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 370 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 371 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 372 |
+
"language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 373 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 374 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 375 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 376 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 377 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 378 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 379 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 380 |
+
"language_model.model.layers.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 381 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 382 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 383 |
+
"language_model.model.layers.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 384 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 385 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 386 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 387 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 388 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 389 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 390 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 391 |
+
"language_model.model.layers.8.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 392 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 393 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 394 |
+
"language_model.model.layers.8.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 395 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 396 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 397 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 398 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 399 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 400 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 401 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 402 |
+
"language_model.model.layers.9.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
|
| 403 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 404 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 405 |
+
"language_model.model.layers.9.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
|
| 406 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 407 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 408 |
+
"language_model.model.norm.weight": "model-00004-of-00004.safetensors",
|
| 409 |
+
"vision_tower._vision_tower.conv1.bias": "model-00004-of-00004.safetensors",
|
| 410 |
+
"vision_tower._vision_tower.conv1.weight": "model-00004-of-00004.safetensors",
|
| 411 |
+
"vision_tower._vision_tower.conv2.bias": "model-00004-of-00004.safetensors",
|
| 412 |
+
"vision_tower._vision_tower.conv2.weight": "model-00004-of-00004.safetensors",
|
| 413 |
+
"vision_tower._vision_tower.embed_positions.weight": "model-00004-of-00004.safetensors",
|
| 414 |
+
"vision_tower._vision_tower.layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 415 |
+
"vision_tower._vision_tower.layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 416 |
+
"vision_tower._vision_tower.layers.0.fc1.bias": "model-00004-of-00004.safetensors",
|
| 417 |
+
"vision_tower._vision_tower.layers.0.fc1.weight": "model-00004-of-00004.safetensors",
|
| 418 |
+
"vision_tower._vision_tower.layers.0.fc2.bias": "model-00004-of-00004.safetensors",
|
| 419 |
+
"vision_tower._vision_tower.layers.0.fc2.weight": "model-00004-of-00004.safetensors",
|
| 420 |
+
"vision_tower._vision_tower.layers.0.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 421 |
+
"vision_tower._vision_tower.layers.0.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 422 |
+
"vision_tower._vision_tower.layers.0.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 423 |
+
"vision_tower._vision_tower.layers.0.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 424 |
+
"vision_tower._vision_tower.layers.0.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 425 |
+
"vision_tower._vision_tower.layers.0.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 426 |
+
"vision_tower._vision_tower.layers.0.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 427 |
+
"vision_tower._vision_tower.layers.0.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 428 |
+
"vision_tower._vision_tower.layers.0.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 429 |
+
"vision_tower._vision_tower.layers.0.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 430 |
+
"vision_tower._vision_tower.layers.0.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 431 |
+
"vision_tower._vision_tower.layers.1.fc1.bias": "model-00004-of-00004.safetensors",
|
| 432 |
+
"vision_tower._vision_tower.layers.1.fc1.weight": "model-00004-of-00004.safetensors",
|
| 433 |
+
"vision_tower._vision_tower.layers.1.fc2.bias": "model-00004-of-00004.safetensors",
|
| 434 |
+
"vision_tower._vision_tower.layers.1.fc2.weight": "model-00004-of-00004.safetensors",
|
| 435 |
+
"vision_tower._vision_tower.layers.1.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 436 |
+
"vision_tower._vision_tower.layers.1.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 437 |
+
"vision_tower._vision_tower.layers.1.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 438 |
+
"vision_tower._vision_tower.layers.1.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 439 |
+
"vision_tower._vision_tower.layers.1.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 440 |
+
"vision_tower._vision_tower.layers.1.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 441 |
+
"vision_tower._vision_tower.layers.1.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 442 |
+
"vision_tower._vision_tower.layers.1.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 443 |
+
"vision_tower._vision_tower.layers.1.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 444 |
+
"vision_tower._vision_tower.layers.1.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 445 |
+
"vision_tower._vision_tower.layers.1.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 446 |
+
"vision_tower._vision_tower.layers.10.fc1.bias": "model-00004-of-00004.safetensors",
|
| 447 |
+
"vision_tower._vision_tower.layers.10.fc1.weight": "model-00004-of-00004.safetensors",
|
| 448 |
+
"vision_tower._vision_tower.layers.10.fc2.bias": "model-00004-of-00004.safetensors",
|
| 449 |
+
"vision_tower._vision_tower.layers.10.fc2.weight": "model-00004-of-00004.safetensors",
|
| 450 |
+
"vision_tower._vision_tower.layers.10.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 451 |
+
"vision_tower._vision_tower.layers.10.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 452 |
+
"vision_tower._vision_tower.layers.10.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 453 |
+
"vision_tower._vision_tower.layers.10.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 454 |
+
"vision_tower._vision_tower.layers.10.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 455 |
+
"vision_tower._vision_tower.layers.10.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 456 |
+
"vision_tower._vision_tower.layers.10.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 457 |
+
"vision_tower._vision_tower.layers.10.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 458 |
+
"vision_tower._vision_tower.layers.10.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 459 |
+
"vision_tower._vision_tower.layers.10.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 460 |
+
"vision_tower._vision_tower.layers.10.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 461 |
+
"vision_tower._vision_tower.layers.11.fc1.bias": "model-00004-of-00004.safetensors",
|
| 462 |
+
"vision_tower._vision_tower.layers.11.fc1.weight": "model-00004-of-00004.safetensors",
|
| 463 |
+
"vision_tower._vision_tower.layers.11.fc2.bias": "model-00004-of-00004.safetensors",
|
| 464 |
+
"vision_tower._vision_tower.layers.11.fc2.weight": "model-00004-of-00004.safetensors",
|
| 465 |
+
"vision_tower._vision_tower.layers.11.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 466 |
+
"vision_tower._vision_tower.layers.11.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 467 |
+
"vision_tower._vision_tower.layers.11.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 468 |
+
"vision_tower._vision_tower.layers.11.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 469 |
+
"vision_tower._vision_tower.layers.11.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 470 |
+
"vision_tower._vision_tower.layers.11.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 471 |
+
"vision_tower._vision_tower.layers.11.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 472 |
+
"vision_tower._vision_tower.layers.11.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 473 |
+
"vision_tower._vision_tower.layers.11.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 474 |
+
"vision_tower._vision_tower.layers.11.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 475 |
+
"vision_tower._vision_tower.layers.11.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 476 |
+
"vision_tower._vision_tower.layers.12.fc1.bias": "model-00004-of-00004.safetensors",
|
| 477 |
+
"vision_tower._vision_tower.layers.12.fc1.weight": "model-00004-of-00004.safetensors",
|
| 478 |
+
"vision_tower._vision_tower.layers.12.fc2.bias": "model-00004-of-00004.safetensors",
|
| 479 |
+
"vision_tower._vision_tower.layers.12.fc2.weight": "model-00004-of-00004.safetensors",
|
| 480 |
+
"vision_tower._vision_tower.layers.12.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 481 |
+
"vision_tower._vision_tower.layers.12.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 482 |
+
"vision_tower._vision_tower.layers.12.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 483 |
+
"vision_tower._vision_tower.layers.12.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 484 |
+
"vision_tower._vision_tower.layers.12.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 485 |
+
"vision_tower._vision_tower.layers.12.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 486 |
+
"vision_tower._vision_tower.layers.12.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 487 |
+
"vision_tower._vision_tower.layers.12.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 488 |
+
"vision_tower._vision_tower.layers.12.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 489 |
+
"vision_tower._vision_tower.layers.12.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 490 |
+
"vision_tower._vision_tower.layers.12.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 491 |
+
"vision_tower._vision_tower.layers.13.fc1.bias": "model-00004-of-00004.safetensors",
|
| 492 |
+
"vision_tower._vision_tower.layers.13.fc1.weight": "model-00004-of-00004.safetensors",
|
| 493 |
+
"vision_tower._vision_tower.layers.13.fc2.bias": "model-00004-of-00004.safetensors",
|
| 494 |
+
"vision_tower._vision_tower.layers.13.fc2.weight": "model-00004-of-00004.safetensors",
|
| 495 |
+
"vision_tower._vision_tower.layers.13.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 496 |
+
"vision_tower._vision_tower.layers.13.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 497 |
+
"vision_tower._vision_tower.layers.13.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 498 |
+
"vision_tower._vision_tower.layers.13.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 499 |
+
"vision_tower._vision_tower.layers.13.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 500 |
+
"vision_tower._vision_tower.layers.13.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 501 |
+
"vision_tower._vision_tower.layers.13.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 502 |
+
"vision_tower._vision_tower.layers.13.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 503 |
+
"vision_tower._vision_tower.layers.13.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 504 |
+
"vision_tower._vision_tower.layers.13.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 505 |
+
"vision_tower._vision_tower.layers.13.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 506 |
+
"vision_tower._vision_tower.layers.14.fc1.bias": "model-00004-of-00004.safetensors",
|
| 507 |
+
"vision_tower._vision_tower.layers.14.fc1.weight": "model-00004-of-00004.safetensors",
|
| 508 |
+
"vision_tower._vision_tower.layers.14.fc2.bias": "model-00004-of-00004.safetensors",
|
| 509 |
+
"vision_tower._vision_tower.layers.14.fc2.weight": "model-00004-of-00004.safetensors",
|
| 510 |
+
"vision_tower._vision_tower.layers.14.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 511 |
+
"vision_tower._vision_tower.layers.14.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 512 |
+
"vision_tower._vision_tower.layers.14.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 513 |
+
"vision_tower._vision_tower.layers.14.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 514 |
+
"vision_tower._vision_tower.layers.14.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 515 |
+
"vision_tower._vision_tower.layers.14.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 516 |
+
"vision_tower._vision_tower.layers.14.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 517 |
+
"vision_tower._vision_tower.layers.14.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 518 |
+
"vision_tower._vision_tower.layers.14.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 519 |
+
"vision_tower._vision_tower.layers.14.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 520 |
+
"vision_tower._vision_tower.layers.14.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 521 |
+
"vision_tower._vision_tower.layers.15.fc1.bias": "model-00004-of-00004.safetensors",
|
| 522 |
+
"vision_tower._vision_tower.layers.15.fc1.weight": "model-00004-of-00004.safetensors",
|
| 523 |
+
"vision_tower._vision_tower.layers.15.fc2.bias": "model-00004-of-00004.safetensors",
|
| 524 |
+
"vision_tower._vision_tower.layers.15.fc2.weight": "model-00004-of-00004.safetensors",
|
| 525 |
+
"vision_tower._vision_tower.layers.15.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 526 |
+
"vision_tower._vision_tower.layers.15.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 527 |
+
"vision_tower._vision_tower.layers.15.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 528 |
+
"vision_tower._vision_tower.layers.15.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 529 |
+
"vision_tower._vision_tower.layers.15.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 530 |
+
"vision_tower._vision_tower.layers.15.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 531 |
+
"vision_tower._vision_tower.layers.15.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 532 |
+
"vision_tower._vision_tower.layers.15.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 533 |
+
"vision_tower._vision_tower.layers.15.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 534 |
+
"vision_tower._vision_tower.layers.15.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 535 |
+
"vision_tower._vision_tower.layers.15.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 536 |
+
"vision_tower._vision_tower.layers.16.fc1.bias": "model-00004-of-00004.safetensors",
|
| 537 |
+
"vision_tower._vision_tower.layers.16.fc1.weight": "model-00004-of-00004.safetensors",
|
| 538 |
+
"vision_tower._vision_tower.layers.16.fc2.bias": "model-00004-of-00004.safetensors",
|
| 539 |
+
"vision_tower._vision_tower.layers.16.fc2.weight": "model-00004-of-00004.safetensors",
|
| 540 |
+
"vision_tower._vision_tower.layers.16.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 541 |
+
"vision_tower._vision_tower.layers.16.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 542 |
+
"vision_tower._vision_tower.layers.16.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 543 |
+
"vision_tower._vision_tower.layers.16.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 544 |
+
"vision_tower._vision_tower.layers.16.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 545 |
+
"vision_tower._vision_tower.layers.16.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 546 |
+
"vision_tower._vision_tower.layers.16.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 547 |
+
"vision_tower._vision_tower.layers.16.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 548 |
+
"vision_tower._vision_tower.layers.16.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 549 |
+
"vision_tower._vision_tower.layers.16.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 550 |
+
"vision_tower._vision_tower.layers.16.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 551 |
+
"vision_tower._vision_tower.layers.17.fc1.bias": "model-00004-of-00004.safetensors",
|
| 552 |
+
"vision_tower._vision_tower.layers.17.fc1.weight": "model-00004-of-00004.safetensors",
|
| 553 |
+
"vision_tower._vision_tower.layers.17.fc2.bias": "model-00004-of-00004.safetensors",
|
| 554 |
+
"vision_tower._vision_tower.layers.17.fc2.weight": "model-00004-of-00004.safetensors",
|
| 555 |
+
"vision_tower._vision_tower.layers.17.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 556 |
+
"vision_tower._vision_tower.layers.17.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 557 |
+
"vision_tower._vision_tower.layers.17.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 558 |
+
"vision_tower._vision_tower.layers.17.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 559 |
+
"vision_tower._vision_tower.layers.17.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 560 |
+
"vision_tower._vision_tower.layers.17.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 561 |
+
"vision_tower._vision_tower.layers.17.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 562 |
+
"vision_tower._vision_tower.layers.17.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 563 |
+
"vision_tower._vision_tower.layers.17.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 564 |
+
"vision_tower._vision_tower.layers.17.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 565 |
+
"vision_tower._vision_tower.layers.17.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 566 |
+
"vision_tower._vision_tower.layers.18.fc1.bias": "model-00004-of-00004.safetensors",
|
| 567 |
+
"vision_tower._vision_tower.layers.18.fc1.weight": "model-00004-of-00004.safetensors",
|
| 568 |
+
"vision_tower._vision_tower.layers.18.fc2.bias": "model-00004-of-00004.safetensors",
|
| 569 |
+
"vision_tower._vision_tower.layers.18.fc2.weight": "model-00004-of-00004.safetensors",
|
| 570 |
+
"vision_tower._vision_tower.layers.18.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 571 |
+
"vision_tower._vision_tower.layers.18.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 572 |
+
"vision_tower._vision_tower.layers.18.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 573 |
+
"vision_tower._vision_tower.layers.18.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 574 |
+
"vision_tower._vision_tower.layers.18.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 575 |
+
"vision_tower._vision_tower.layers.18.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 576 |
+
"vision_tower._vision_tower.layers.18.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 577 |
+
"vision_tower._vision_tower.layers.18.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 578 |
+
"vision_tower._vision_tower.layers.18.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 579 |
+
"vision_tower._vision_tower.layers.18.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 580 |
+
"vision_tower._vision_tower.layers.18.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 581 |
+
"vision_tower._vision_tower.layers.19.fc1.bias": "model-00004-of-00004.safetensors",
|
| 582 |
+
"vision_tower._vision_tower.layers.19.fc1.weight": "model-00004-of-00004.safetensors",
|
| 583 |
+
"vision_tower._vision_tower.layers.19.fc2.bias": "model-00004-of-00004.safetensors",
|
| 584 |
+
"vision_tower._vision_tower.layers.19.fc2.weight": "model-00004-of-00004.safetensors",
|
| 585 |
+
"vision_tower._vision_tower.layers.19.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 586 |
+
"vision_tower._vision_tower.layers.19.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 587 |
+
"vision_tower._vision_tower.layers.19.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 588 |
+
"vision_tower._vision_tower.layers.19.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 589 |
+
"vision_tower._vision_tower.layers.19.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 590 |
+
"vision_tower._vision_tower.layers.19.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 591 |
+
"vision_tower._vision_tower.layers.19.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 592 |
+
"vision_tower._vision_tower.layers.19.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 593 |
+
"vision_tower._vision_tower.layers.19.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 594 |
+
"vision_tower._vision_tower.layers.19.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 595 |
+
"vision_tower._vision_tower.layers.19.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 596 |
+
"vision_tower._vision_tower.layers.2.fc1.bias": "model-00004-of-00004.safetensors",
|
| 597 |
+
"vision_tower._vision_tower.layers.2.fc1.weight": "model-00004-of-00004.safetensors",
|
| 598 |
+
"vision_tower._vision_tower.layers.2.fc2.bias": "model-00004-of-00004.safetensors",
|
| 599 |
+
"vision_tower._vision_tower.layers.2.fc2.weight": "model-00004-of-00004.safetensors",
|
| 600 |
+
"vision_tower._vision_tower.layers.2.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 601 |
+
"vision_tower._vision_tower.layers.2.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 602 |
+
"vision_tower._vision_tower.layers.2.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 603 |
+
"vision_tower._vision_tower.layers.2.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 604 |
+
"vision_tower._vision_tower.layers.2.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 605 |
+
"vision_tower._vision_tower.layers.2.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 606 |
+
"vision_tower._vision_tower.layers.2.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 607 |
+
"vision_tower._vision_tower.layers.2.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 608 |
+
"vision_tower._vision_tower.layers.2.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 609 |
+
"vision_tower._vision_tower.layers.2.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 610 |
+
"vision_tower._vision_tower.layers.2.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 611 |
+
"vision_tower._vision_tower.layers.20.fc1.bias": "model-00004-of-00004.safetensors",
|
| 612 |
+
"vision_tower._vision_tower.layers.20.fc1.weight": "model-00004-of-00004.safetensors",
|
| 613 |
+
"vision_tower._vision_tower.layers.20.fc2.bias": "model-00004-of-00004.safetensors",
|
| 614 |
+
"vision_tower._vision_tower.layers.20.fc2.weight": "model-00004-of-00004.safetensors",
|
| 615 |
+
"vision_tower._vision_tower.layers.20.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 616 |
+
"vision_tower._vision_tower.layers.20.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 617 |
+
"vision_tower._vision_tower.layers.20.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 618 |
+
"vision_tower._vision_tower.layers.20.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 619 |
+
"vision_tower._vision_tower.layers.20.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 620 |
+
"vision_tower._vision_tower.layers.20.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 621 |
+
"vision_tower._vision_tower.layers.20.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 622 |
+
"vision_tower._vision_tower.layers.20.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 623 |
+
"vision_tower._vision_tower.layers.20.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 624 |
+
"vision_tower._vision_tower.layers.20.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 625 |
+
"vision_tower._vision_tower.layers.20.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 626 |
+
"vision_tower._vision_tower.layers.21.fc1.bias": "model-00004-of-00004.safetensors",
|
| 627 |
+
"vision_tower._vision_tower.layers.21.fc1.weight": "model-00004-of-00004.safetensors",
|
| 628 |
+
"vision_tower._vision_tower.layers.21.fc2.bias": "model-00004-of-00004.safetensors",
|
| 629 |
+
"vision_tower._vision_tower.layers.21.fc2.weight": "model-00004-of-00004.safetensors",
|
| 630 |
+
"vision_tower._vision_tower.layers.21.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 631 |
+
"vision_tower._vision_tower.layers.21.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 632 |
+
"vision_tower._vision_tower.layers.21.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 633 |
+
"vision_tower._vision_tower.layers.21.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 634 |
+
"vision_tower._vision_tower.layers.21.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 635 |
+
"vision_tower._vision_tower.layers.21.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 636 |
+
"vision_tower._vision_tower.layers.21.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 637 |
+
"vision_tower._vision_tower.layers.21.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 638 |
+
"vision_tower._vision_tower.layers.21.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 639 |
+
"vision_tower._vision_tower.layers.21.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 640 |
+
"vision_tower._vision_tower.layers.21.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 641 |
+
"vision_tower._vision_tower.layers.22.fc1.bias": "model-00004-of-00004.safetensors",
|
| 642 |
+
"vision_tower._vision_tower.layers.22.fc1.weight": "model-00004-of-00004.safetensors",
|
| 643 |
+
"vision_tower._vision_tower.layers.22.fc2.bias": "model-00004-of-00004.safetensors",
|
| 644 |
+
"vision_tower._vision_tower.layers.22.fc2.weight": "model-00004-of-00004.safetensors",
|
| 645 |
+
"vision_tower._vision_tower.layers.22.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 646 |
+
"vision_tower._vision_tower.layers.22.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 647 |
+
"vision_tower._vision_tower.layers.22.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 648 |
+
"vision_tower._vision_tower.layers.22.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 649 |
+
"vision_tower._vision_tower.layers.22.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 650 |
+
"vision_tower._vision_tower.layers.22.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 651 |
+
"vision_tower._vision_tower.layers.22.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 652 |
+
"vision_tower._vision_tower.layers.22.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 653 |
+
"vision_tower._vision_tower.layers.22.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 654 |
+
"vision_tower._vision_tower.layers.22.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 655 |
+
"vision_tower._vision_tower.layers.22.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 656 |
+
"vision_tower._vision_tower.layers.23.fc1.bias": "model-00004-of-00004.safetensors",
|
| 657 |
+
"vision_tower._vision_tower.layers.23.fc1.weight": "model-00004-of-00004.safetensors",
|
| 658 |
+
"vision_tower._vision_tower.layers.23.fc2.bias": "model-00004-of-00004.safetensors",
|
| 659 |
+
"vision_tower._vision_tower.layers.23.fc2.weight": "model-00004-of-00004.safetensors",
|
| 660 |
+
"vision_tower._vision_tower.layers.23.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 661 |
+
"vision_tower._vision_tower.layers.23.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 662 |
+
"vision_tower._vision_tower.layers.23.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 663 |
+
"vision_tower._vision_tower.layers.23.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 664 |
+
"vision_tower._vision_tower.layers.23.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 665 |
+
"vision_tower._vision_tower.layers.23.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 666 |
+
"vision_tower._vision_tower.layers.23.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 667 |
+
"vision_tower._vision_tower.layers.23.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 668 |
+
"vision_tower._vision_tower.layers.23.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 669 |
+
"vision_tower._vision_tower.layers.23.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 670 |
+
"vision_tower._vision_tower.layers.23.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 671 |
+
"vision_tower._vision_tower.layers.24.fc1.bias": "model-00004-of-00004.safetensors",
|
| 672 |
+
"vision_tower._vision_tower.layers.24.fc1.weight": "model-00004-of-00004.safetensors",
|
| 673 |
+
"vision_tower._vision_tower.layers.24.fc2.bias": "model-00004-of-00004.safetensors",
|
| 674 |
+
"vision_tower._vision_tower.layers.24.fc2.weight": "model-00004-of-00004.safetensors",
|
| 675 |
+
"vision_tower._vision_tower.layers.24.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 676 |
+
"vision_tower._vision_tower.layers.24.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 677 |
+
"vision_tower._vision_tower.layers.24.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 678 |
+
"vision_tower._vision_tower.layers.24.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 679 |
+
"vision_tower._vision_tower.layers.24.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 680 |
+
"vision_tower._vision_tower.layers.24.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 681 |
+
"vision_tower._vision_tower.layers.24.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 682 |
+
"vision_tower._vision_tower.layers.24.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 683 |
+
"vision_tower._vision_tower.layers.24.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 684 |
+
"vision_tower._vision_tower.layers.24.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 685 |
+
"vision_tower._vision_tower.layers.24.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 686 |
+
"vision_tower._vision_tower.layers.25.fc1.bias": "model-00004-of-00004.safetensors",
|
| 687 |
+
"vision_tower._vision_tower.layers.25.fc1.weight": "model-00004-of-00004.safetensors",
|
| 688 |
+
"vision_tower._vision_tower.layers.25.fc2.bias": "model-00004-of-00004.safetensors",
|
| 689 |
+
"vision_tower._vision_tower.layers.25.fc2.weight": "model-00004-of-00004.safetensors",
|
| 690 |
+
"vision_tower._vision_tower.layers.25.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 691 |
+
"vision_tower._vision_tower.layers.25.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 692 |
+
"vision_tower._vision_tower.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 693 |
+
"vision_tower._vision_tower.layers.25.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 694 |
+
"vision_tower._vision_tower.layers.25.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 695 |
+
"vision_tower._vision_tower.layers.25.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 696 |
+
"vision_tower._vision_tower.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 697 |
+
"vision_tower._vision_tower.layers.25.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 698 |
+
"vision_tower._vision_tower.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 699 |
+
"vision_tower._vision_tower.layers.25.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 700 |
+
"vision_tower._vision_tower.layers.25.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 701 |
+
"vision_tower._vision_tower.layers.26.fc1.bias": "model-00004-of-00004.safetensors",
|
| 702 |
+
"vision_tower._vision_tower.layers.26.fc1.weight": "model-00004-of-00004.safetensors",
|
| 703 |
+
"vision_tower._vision_tower.layers.26.fc2.bias": "model-00004-of-00004.safetensors",
|
| 704 |
+
"vision_tower._vision_tower.layers.26.fc2.weight": "model-00004-of-00004.safetensors",
|
| 705 |
+
"vision_tower._vision_tower.layers.26.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 706 |
+
"vision_tower._vision_tower.layers.26.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 707 |
+
"vision_tower._vision_tower.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 708 |
+
"vision_tower._vision_tower.layers.26.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 709 |
+
"vision_tower._vision_tower.layers.26.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 710 |
+
"vision_tower._vision_tower.layers.26.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 711 |
+
"vision_tower._vision_tower.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 712 |
+
"vision_tower._vision_tower.layers.26.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 713 |
+
"vision_tower._vision_tower.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 714 |
+
"vision_tower._vision_tower.layers.26.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 715 |
+
"vision_tower._vision_tower.layers.26.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 716 |
+
"vision_tower._vision_tower.layers.27.fc1.bias": "model-00004-of-00004.safetensors",
|
| 717 |
+
"vision_tower._vision_tower.layers.27.fc1.weight": "model-00004-of-00004.safetensors",
|
| 718 |
+
"vision_tower._vision_tower.layers.27.fc2.bias": "model-00004-of-00004.safetensors",
|
| 719 |
+
"vision_tower._vision_tower.layers.27.fc2.weight": "model-00004-of-00004.safetensors",
|
| 720 |
+
"vision_tower._vision_tower.layers.27.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 721 |
+
"vision_tower._vision_tower.layers.27.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 722 |
+
"vision_tower._vision_tower.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 723 |
+
"vision_tower._vision_tower.layers.27.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 724 |
+
"vision_tower._vision_tower.layers.27.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 725 |
+
"vision_tower._vision_tower.layers.27.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 726 |
+
"vision_tower._vision_tower.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 727 |
+
"vision_tower._vision_tower.layers.27.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 728 |
+
"vision_tower._vision_tower.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 729 |
+
"vision_tower._vision_tower.layers.27.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 730 |
+
"vision_tower._vision_tower.layers.27.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 731 |
+
"vision_tower._vision_tower.layers.28.fc1.bias": "model-00004-of-00004.safetensors",
|
| 732 |
+
"vision_tower._vision_tower.layers.28.fc1.weight": "model-00004-of-00004.safetensors",
|
| 733 |
+
"vision_tower._vision_tower.layers.28.fc2.bias": "model-00004-of-00004.safetensors",
|
| 734 |
+
"vision_tower._vision_tower.layers.28.fc2.weight": "model-00004-of-00004.safetensors",
|
| 735 |
+
"vision_tower._vision_tower.layers.28.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 736 |
+
"vision_tower._vision_tower.layers.28.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 737 |
+
"vision_tower._vision_tower.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 738 |
+
"vision_tower._vision_tower.layers.28.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 739 |
+
"vision_tower._vision_tower.layers.28.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 740 |
+
"vision_tower._vision_tower.layers.28.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 741 |
+
"vision_tower._vision_tower.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 742 |
+
"vision_tower._vision_tower.layers.28.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 743 |
+
"vision_tower._vision_tower.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 744 |
+
"vision_tower._vision_tower.layers.28.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 745 |
+
"vision_tower._vision_tower.layers.28.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 746 |
+
"vision_tower._vision_tower.layers.29.fc1.bias": "model-00004-of-00004.safetensors",
|
| 747 |
+
"vision_tower._vision_tower.layers.29.fc1.weight": "model-00004-of-00004.safetensors",
|
| 748 |
+
"vision_tower._vision_tower.layers.29.fc2.bias": "model-00004-of-00004.safetensors",
|
| 749 |
+
"vision_tower._vision_tower.layers.29.fc2.weight": "model-00004-of-00004.safetensors",
|
| 750 |
+
"vision_tower._vision_tower.layers.29.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 751 |
+
"vision_tower._vision_tower.layers.29.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 752 |
+
"vision_tower._vision_tower.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 753 |
+
"vision_tower._vision_tower.layers.29.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 754 |
+
"vision_tower._vision_tower.layers.29.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 755 |
+
"vision_tower._vision_tower.layers.29.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 756 |
+
"vision_tower._vision_tower.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 757 |
+
"vision_tower._vision_tower.layers.29.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 758 |
+
"vision_tower._vision_tower.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 759 |
+
"vision_tower._vision_tower.layers.29.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 760 |
+
"vision_tower._vision_tower.layers.29.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 761 |
+
"vision_tower._vision_tower.layers.3.fc1.bias": "model-00004-of-00004.safetensors",
|
| 762 |
+
"vision_tower._vision_tower.layers.3.fc1.weight": "model-00004-of-00004.safetensors",
|
| 763 |
+
"vision_tower._vision_tower.layers.3.fc2.bias": "model-00004-of-00004.safetensors",
|
| 764 |
+
"vision_tower._vision_tower.layers.3.fc2.weight": "model-00004-of-00004.safetensors",
|
| 765 |
+
"vision_tower._vision_tower.layers.3.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 766 |
+
"vision_tower._vision_tower.layers.3.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 767 |
+
"vision_tower._vision_tower.layers.3.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 768 |
+
"vision_tower._vision_tower.layers.3.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 769 |
+
"vision_tower._vision_tower.layers.3.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 770 |
+
"vision_tower._vision_tower.layers.3.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 771 |
+
"vision_tower._vision_tower.layers.3.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 772 |
+
"vision_tower._vision_tower.layers.3.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 773 |
+
"vision_tower._vision_tower.layers.3.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 774 |
+
"vision_tower._vision_tower.layers.3.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 775 |
+
"vision_tower._vision_tower.layers.3.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 776 |
+
"vision_tower._vision_tower.layers.30.fc1.bias": "model-00004-of-00004.safetensors",
|
| 777 |
+
"vision_tower._vision_tower.layers.30.fc1.weight": "model-00004-of-00004.safetensors",
|
| 778 |
+
"vision_tower._vision_tower.layers.30.fc2.bias": "model-00004-of-00004.safetensors",
|
| 779 |
+
"vision_tower._vision_tower.layers.30.fc2.weight": "model-00004-of-00004.safetensors",
|
| 780 |
+
"vision_tower._vision_tower.layers.30.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 781 |
+
"vision_tower._vision_tower.layers.30.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 782 |
+
"vision_tower._vision_tower.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 783 |
+
"vision_tower._vision_tower.layers.30.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 784 |
+
"vision_tower._vision_tower.layers.30.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 785 |
+
"vision_tower._vision_tower.layers.30.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 786 |
+
"vision_tower._vision_tower.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 787 |
+
"vision_tower._vision_tower.layers.30.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 788 |
+
"vision_tower._vision_tower.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 789 |
+
"vision_tower._vision_tower.layers.30.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 790 |
+
"vision_tower._vision_tower.layers.30.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 791 |
+
"vision_tower._vision_tower.layers.31.fc1.bias": "model-00004-of-00004.safetensors",
|
| 792 |
+
"vision_tower._vision_tower.layers.31.fc1.weight": "model-00004-of-00004.safetensors",
|
| 793 |
+
"vision_tower._vision_tower.layers.31.fc2.bias": "model-00004-of-00004.safetensors",
|
| 794 |
+
"vision_tower._vision_tower.layers.31.fc2.weight": "model-00004-of-00004.safetensors",
|
| 795 |
+
"vision_tower._vision_tower.layers.31.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 796 |
+
"vision_tower._vision_tower.layers.31.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 797 |
+
"vision_tower._vision_tower.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 798 |
+
"vision_tower._vision_tower.layers.31.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 799 |
+
"vision_tower._vision_tower.layers.31.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 800 |
+
"vision_tower._vision_tower.layers.31.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 801 |
+
"vision_tower._vision_tower.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 802 |
+
"vision_tower._vision_tower.layers.31.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 803 |
+
"vision_tower._vision_tower.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 804 |
+
"vision_tower._vision_tower.layers.31.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 805 |
+
"vision_tower._vision_tower.layers.31.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 806 |
+
"vision_tower._vision_tower.layers.4.fc1.bias": "model-00004-of-00004.safetensors",
|
| 807 |
+
"vision_tower._vision_tower.layers.4.fc1.weight": "model-00004-of-00004.safetensors",
|
| 808 |
+
"vision_tower._vision_tower.layers.4.fc2.bias": "model-00004-of-00004.safetensors",
|
| 809 |
+
"vision_tower._vision_tower.layers.4.fc2.weight": "model-00004-of-00004.safetensors",
|
| 810 |
+
"vision_tower._vision_tower.layers.4.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 811 |
+
"vision_tower._vision_tower.layers.4.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 812 |
+
"vision_tower._vision_tower.layers.4.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 813 |
+
"vision_tower._vision_tower.layers.4.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 814 |
+
"vision_tower._vision_tower.layers.4.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 815 |
+
"vision_tower._vision_tower.layers.4.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 816 |
+
"vision_tower._vision_tower.layers.4.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 817 |
+
"vision_tower._vision_tower.layers.4.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 818 |
+
"vision_tower._vision_tower.layers.4.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 819 |
+
"vision_tower._vision_tower.layers.4.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 820 |
+
"vision_tower._vision_tower.layers.4.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 821 |
+
"vision_tower._vision_tower.layers.5.fc1.bias": "model-00004-of-00004.safetensors",
|
| 822 |
+
"vision_tower._vision_tower.layers.5.fc1.weight": "model-00004-of-00004.safetensors",
|
| 823 |
+
"vision_tower._vision_tower.layers.5.fc2.bias": "model-00004-of-00004.safetensors",
|
| 824 |
+
"vision_tower._vision_tower.layers.5.fc2.weight": "model-00004-of-00004.safetensors",
|
| 825 |
+
"vision_tower._vision_tower.layers.5.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 826 |
+
"vision_tower._vision_tower.layers.5.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 827 |
+
"vision_tower._vision_tower.layers.5.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 828 |
+
"vision_tower._vision_tower.layers.5.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 829 |
+
"vision_tower._vision_tower.layers.5.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 830 |
+
"vision_tower._vision_tower.layers.5.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 831 |
+
"vision_tower._vision_tower.layers.5.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 832 |
+
"vision_tower._vision_tower.layers.5.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 833 |
+
"vision_tower._vision_tower.layers.5.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 834 |
+
"vision_tower._vision_tower.layers.5.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 835 |
+
"vision_tower._vision_tower.layers.5.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 836 |
+
"vision_tower._vision_tower.layers.6.fc1.bias": "model-00004-of-00004.safetensors",
|
| 837 |
+
"vision_tower._vision_tower.layers.6.fc1.weight": "model-00004-of-00004.safetensors",
|
| 838 |
+
"vision_tower._vision_tower.layers.6.fc2.bias": "model-00004-of-00004.safetensors",
|
| 839 |
+
"vision_tower._vision_tower.layers.6.fc2.weight": "model-00004-of-00004.safetensors",
|
| 840 |
+
"vision_tower._vision_tower.layers.6.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 841 |
+
"vision_tower._vision_tower.layers.6.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 842 |
+
"vision_tower._vision_tower.layers.6.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 843 |
+
"vision_tower._vision_tower.layers.6.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 844 |
+
"vision_tower._vision_tower.layers.6.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 845 |
+
"vision_tower._vision_tower.layers.6.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 846 |
+
"vision_tower._vision_tower.layers.6.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 847 |
+
"vision_tower._vision_tower.layers.6.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 848 |
+
"vision_tower._vision_tower.layers.6.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 849 |
+
"vision_tower._vision_tower.layers.6.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 850 |
+
"vision_tower._vision_tower.layers.6.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 851 |
+
"vision_tower._vision_tower.layers.7.fc1.bias": "model-00004-of-00004.safetensors",
|
| 852 |
+
"vision_tower._vision_tower.layers.7.fc1.weight": "model-00004-of-00004.safetensors",
|
| 853 |
+
"vision_tower._vision_tower.layers.7.fc2.bias": "model-00004-of-00004.safetensors",
|
| 854 |
+
"vision_tower._vision_tower.layers.7.fc2.weight": "model-00004-of-00004.safetensors",
|
| 855 |
+
"vision_tower._vision_tower.layers.7.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 856 |
+
"vision_tower._vision_tower.layers.7.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 857 |
+
"vision_tower._vision_tower.layers.7.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 858 |
+
"vision_tower._vision_tower.layers.7.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 859 |
+
"vision_tower._vision_tower.layers.7.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 860 |
+
"vision_tower._vision_tower.layers.7.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 861 |
+
"vision_tower._vision_tower.layers.7.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 862 |
+
"vision_tower._vision_tower.layers.7.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 863 |
+
"vision_tower._vision_tower.layers.7.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 864 |
+
"vision_tower._vision_tower.layers.7.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 865 |
+
"vision_tower._vision_tower.layers.7.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 866 |
+
"vision_tower._vision_tower.layers.8.fc1.bias": "model-00004-of-00004.safetensors",
|
| 867 |
+
"vision_tower._vision_tower.layers.8.fc1.weight": "model-00004-of-00004.safetensors",
|
| 868 |
+
"vision_tower._vision_tower.layers.8.fc2.bias": "model-00004-of-00004.safetensors",
|
| 869 |
+
"vision_tower._vision_tower.layers.8.fc2.weight": "model-00004-of-00004.safetensors",
|
| 870 |
+
"vision_tower._vision_tower.layers.8.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 871 |
+
"vision_tower._vision_tower.layers.8.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 872 |
+
"vision_tower._vision_tower.layers.8.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 873 |
+
"vision_tower._vision_tower.layers.8.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 874 |
+
"vision_tower._vision_tower.layers.8.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 875 |
+
"vision_tower._vision_tower.layers.8.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 876 |
+
"vision_tower._vision_tower.layers.8.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 877 |
+
"vision_tower._vision_tower.layers.8.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 878 |
+
"vision_tower._vision_tower.layers.8.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 879 |
+
"vision_tower._vision_tower.layers.8.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 880 |
+
"vision_tower._vision_tower.layers.8.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 881 |
+
"vision_tower._vision_tower.layers.9.fc1.bias": "model-00004-of-00004.safetensors",
|
| 882 |
+
"vision_tower._vision_tower.layers.9.fc1.weight": "model-00004-of-00004.safetensors",
|
| 883 |
+
"vision_tower._vision_tower.layers.9.fc2.bias": "model-00004-of-00004.safetensors",
|
| 884 |
+
"vision_tower._vision_tower.layers.9.fc2.weight": "model-00004-of-00004.safetensors",
|
| 885 |
+
"vision_tower._vision_tower.layers.9.final_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 886 |
+
"vision_tower._vision_tower.layers.9.final_layer_norm.weight": "model-00004-of-00004.safetensors",
|
| 887 |
+
"vision_tower._vision_tower.layers.9.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
| 888 |
+
"vision_tower._vision_tower.layers.9.self_attn.out_proj.bias": "model-00004-of-00004.safetensors",
|
| 889 |
+
"vision_tower._vision_tower.layers.9.self_attn.out_proj.weight": "model-00004-of-00004.safetensors",
|
| 890 |
+
"vision_tower._vision_tower.layers.9.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
|
| 891 |
+
"vision_tower._vision_tower.layers.9.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
| 892 |
+
"vision_tower._vision_tower.layers.9.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
|
| 893 |
+
"vision_tower._vision_tower.layers.9.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
| 894 |
+
"vision_tower._vision_tower.layers.9.self_attn_layer_norm.bias": "model-00004-of-00004.safetensors",
|
| 895 |
+
"vision_tower._vision_tower.layers.9.self_attn_layer_norm.weight": "model-00004-of-00004.safetensors"
|
| 896 |
+
}
|
| 897 |
+
}
|
modeling_mufun.py
ADDED
|
@@ -0,0 +1,601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
import re
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from transformers import PreTrainedModel
|
| 7 |
+
from transformers.generation.utils import GenerationMixin
|
| 8 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 9 |
+
from transformers.generation.utils import GenerateOutput
|
| 10 |
+
from transformers import AutoConfig, AutoModelForCausalLM, Qwen3ForCausalLM, WhisperForConditionalGeneration, StoppingCriteria, AutoProcessor
|
| 11 |
+
|
| 12 |
+
from .audio_preprocess import AudioPreprocess, load_audios
|
| 13 |
+
from .text_preprocess import TextPreprocess
|
| 14 |
+
from .message import Message
|
| 15 |
+
|
| 16 |
+
from .configuration import TinyLlavaConfig, IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
| 17 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = 30
|
| 18 |
+
WORKER_HEART_BEAT_INTERVAL = 15
|
| 19 |
+
LOGDIR = "."
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class KeywordsStoppingCriteria(StoppingCriteria):
|
| 23 |
+
def __init__(self, keywords, tokenizer, input_ids):
|
| 24 |
+
self.keywords = keywords
|
| 25 |
+
self.keyword_ids = []
|
| 26 |
+
self.max_keyword_len = 0
|
| 27 |
+
for keyword in keywords:
|
| 28 |
+
cur_keyword_ids = tokenizer(keyword).input_ids
|
| 29 |
+
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
|
| 30 |
+
cur_keyword_ids = cur_keyword_ids[1:]
|
| 31 |
+
if len(cur_keyword_ids) > self.max_keyword_len:
|
| 32 |
+
self.max_keyword_len = len(cur_keyword_ids)
|
| 33 |
+
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
|
| 34 |
+
self.tokenizer = tokenizer
|
| 35 |
+
self.start_len = input_ids.shape[1]
|
| 36 |
+
|
| 37 |
+
def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 38 |
+
offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
|
| 39 |
+
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
|
| 40 |
+
for keyword_id in self.keyword_ids:
|
| 41 |
+
if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():
|
| 42 |
+
return True
|
| 43 |
+
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
|
| 44 |
+
for keyword in self.keywords:
|
| 45 |
+
if keyword in outputs:
|
| 46 |
+
return True
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 50 |
+
outputs = []
|
| 51 |
+
for i in range(output_ids.shape[0]):
|
| 52 |
+
outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))
|
| 53 |
+
return all(outputs)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
ACT_TYPE = {
|
| 57 |
+
'relu': nn.ReLU,
|
| 58 |
+
'gelu': nn.GELU
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
class CNet(nn.Module):
|
| 62 |
+
def __init__(self, config):
|
| 63 |
+
super().__init__()
|
| 64 |
+
def extract_numbers(s):
|
| 65 |
+
match = re.findall(r'(\d+)[ix]', s)
|
| 66 |
+
if len(match) == 2:
|
| 67 |
+
return tuple(map(int, match))
|
| 68 |
+
return None
|
| 69 |
+
ix, hx = extract_numbers(config.connector_type)
|
| 70 |
+
act_type = 'gelu'
|
| 71 |
+
self.act=ACT_TYPE[act_type]()
|
| 72 |
+
|
| 73 |
+
vdim = config.vision_hidden_size*ix
|
| 74 |
+
ldim = config.hidden_size
|
| 75 |
+
|
| 76 |
+
self.linear1 = nn.Linear(vdim, hx*vdim)
|
| 77 |
+
self.linear2 = nn.Linear(hx*vdim, ldim)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def forward(self, x):
|
| 81 |
+
x = self.act(self.linear1(x))
|
| 82 |
+
return self.linear2(x)
|
| 83 |
+
|
| 84 |
+
class Connector(nn.Module):
|
| 85 |
+
def __init__(self, config=None):
|
| 86 |
+
super().__init__()
|
| 87 |
+
self._connector = None
|
| 88 |
+
|
| 89 |
+
def load_model(self, **kwargs):
|
| 90 |
+
pretrained_connector_path = kwargs.get('pretrained_connector_path', None)
|
| 91 |
+
if pretrained_connector_path is not None:
|
| 92 |
+
pretrained_connector_path = os.path.join(pretrained_connector_path, 'pytorch_model.bin')
|
| 93 |
+
connector_weights = torch.load(pretrained_connector_path, map_location='cpu')
|
| 94 |
+
def get_w(weights, keyword):
|
| 95 |
+
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
|
| 96 |
+
self._connector.load_state_dict(get_w(connector_weights, '_connector'))
|
| 97 |
+
print(f'Loading connector from {pretrained_connector_path}...')
|
| 98 |
+
|
| 99 |
+
for p in self._connector.parameters():
|
| 100 |
+
p.requires_grad = False
|
| 101 |
+
|
| 102 |
+
def forward(self, x):
|
| 103 |
+
return self._connector(x)
|
| 104 |
+
|
| 105 |
+
class MLPConnector(Connector):
|
| 106 |
+
def __init__(self, config):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self._connector = CNet(config)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def get_value_from_kwargs(kwargs, name):
|
| 112 |
+
if name in kwargs:
|
| 113 |
+
return kwargs.pop(name)
|
| 114 |
+
else:
|
| 115 |
+
return None
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class AudioTower(nn.Module):
|
| 119 |
+
def __init__(self, cfg):
|
| 120 |
+
super().__init__()
|
| 121 |
+
self._vision_tower = None
|
| 122 |
+
self._image_processor = None
|
| 123 |
+
self.config = cfg
|
| 124 |
+
|
| 125 |
+
def load_model(self, vision_tower_name, **kwargs):
|
| 126 |
+
self._load_model(vision_tower_name, **kwargs)
|
| 127 |
+
self._vision_tower.requires_grad_(False)
|
| 128 |
+
|
| 129 |
+
def _load_model(self, vision_tower_name, **kwargs):
|
| 130 |
+
pretrained_vision_tower_path = get_value_from_kwargs(kwargs, 'pretrained_vision_tower_path')
|
| 131 |
+
if isinstance(self._vision_tower, PreTrainedModel): # hf model
|
| 132 |
+
if pretrained_vision_tower_path is not None:
|
| 133 |
+
vision_tower_name = pretrained_vision_tower_path
|
| 134 |
+
self._vision_tower = self._vision_tower.from_pretrained(vision_tower_name, **kwargs)
|
| 135 |
+
else: # nn.Module
|
| 136 |
+
if pretrained_vision_tower_path is not None:
|
| 137 |
+
vision_tower_weights = torch.load(os.path.join(pretrained_vision_tower_path, 'pytorch_model.bin'), map_location='cpu')
|
| 138 |
+
def get_w(weights, keyword):
|
| 139 |
+
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
|
| 140 |
+
self._vision_tower.load_state_dict(vision_tower_weights)
|
| 141 |
+
|
| 142 |
+
print("Loading vision tower from ", vision_tower_name)
|
| 143 |
+
|
| 144 |
+
def forward(self, x, **kwargs):
|
| 145 |
+
image_features = self._vision_tower(x, output_hidden_states=True)
|
| 146 |
+
image_features = image_features.hidden_states[kwargs.get('vision_feature_layer', -2)]
|
| 147 |
+
|
| 148 |
+
if kwargs.get('vision_feature_select_strategy', 'patch') == 'patch':
|
| 149 |
+
image_features = image_features[:, 1:]
|
| 150 |
+
elif kwargs.get('vision_feature_select_strategy', 'patch') == 'cls_patch':
|
| 151 |
+
image_features = image_features
|
| 152 |
+
else:
|
| 153 |
+
raise ValueError(f"Unexpected select feature: {kwargs.get('vision_feature_select_strategy')}")
|
| 154 |
+
|
| 155 |
+
return image_features
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def vision_tower(self):
|
| 159 |
+
return self._vision_tower
|
| 160 |
+
|
| 161 |
+
@vision_tower.setter
|
| 162 |
+
def vision_tower(self, vision_tower):
|
| 163 |
+
self._vision_tower = vision_tower
|
| 164 |
+
|
| 165 |
+
class WpmAudioTower(AudioTower):
|
| 166 |
+
def __init__(self, cfg):
|
| 167 |
+
super().__init__(cfg)
|
| 168 |
+
|
| 169 |
+
self._vision_tower = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v3").get_encoder()
|
| 170 |
+
self._image_processor = AutoProcessor.from_pretrained("openai/whisper-large-v3")
|
| 171 |
+
self.pool_stride = 5
|
| 172 |
+
self.avg_pooler = nn.AvgPool1d(self.pool_stride, stride=self.pool_stride)
|
| 173 |
+
self.features_layers = [0, 7, 15, 32]
|
| 174 |
+
|
| 175 |
+
def _load_model(self, vision_tower_name, **kwargs):
|
| 176 |
+
pretrained_vision_tower_path = kwargs.pop('pretrained_vision_tower_path', None)
|
| 177 |
+
if pretrained_vision_tower_path is None:
|
| 178 |
+
|
| 179 |
+
print("Loading vision tower1 from ", vision_tower_name)
|
| 180 |
+
else: # nn.Module
|
| 181 |
+
if pretrained_vision_tower_path is not None:
|
| 182 |
+
vision_tower_weights = torch.load(os.path.join(pretrained_vision_tower_path, 'pytorch_model.bin'), map_location='cpu')
|
| 183 |
+
def get_w(weights, keyword):
|
| 184 |
+
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
|
| 185 |
+
self._vision_tower.load_state_dict(vision_tower_weights)
|
| 186 |
+
print("Loading vision tower from ", pretrained_vision_tower_path)
|
| 187 |
+
|
| 188 |
+
def forward(self, x, **kwargs):
|
| 189 |
+
if len(x.shape)==4:
|
| 190 |
+
x=torch.squeeze(x, 1)
|
| 191 |
+
image_features = self._vision_tower(x, output_hidden_states=True).hidden_states
|
| 192 |
+
hidden_states = torch.cat([image_features[il] for il in self.features_layers], dim=-1)
|
| 193 |
+
|
| 194 |
+
hidden_states = hidden_states.permute(0, 2, 1)
|
| 195 |
+
hidden_states = self.avg_pooler(hidden_states)
|
| 196 |
+
hidden_states = hidden_states.permute(0, 2, 1)
|
| 197 |
+
|
| 198 |
+
return hidden_states
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class TinyLlavaPreTrainedModel(PreTrainedModel):
|
| 203 |
+
config_class = TinyLlavaConfig
|
| 204 |
+
base_model_prefix = "model"
|
| 205 |
+
supports_gradient_checkpointing = True
|
| 206 |
+
_no_split_modules = ["LlavaVisionAttention"]
|
| 207 |
+
_skip_keys_device_placement = "past_key_values"
|
| 208 |
+
_supports_flash_attn_2 = True
|
| 209 |
+
|
| 210 |
+
def _init_weights(self, module):
|
| 211 |
+
std = (
|
| 212 |
+
self.config.initializer_range
|
| 213 |
+
if hasattr(self.config, "initializer_range")
|
| 214 |
+
else self.config.text_config.initializer_range
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
if hasattr(module, "class_embedding"):
|
| 218 |
+
module.class_embedding.data.normal_(mean=0.0, std=std)
|
| 219 |
+
|
| 220 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 221 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 222 |
+
if module.bias is not None:
|
| 223 |
+
module.bias.data.zero_()
|
| 224 |
+
elif isinstance(module, nn.Embedding):
|
| 225 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 226 |
+
if module.padding_idx is not None:
|
| 227 |
+
module.weight.data[module.padding_idx].zero_()
|
| 228 |
+
|
| 229 |
+
@property
|
| 230 |
+
def _supports_sdpa(self):
|
| 231 |
+
return self.language_model._supports_sdpa
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class TinyLlavaForConditionalGeneration(TinyLlavaPreTrainedModel, GenerationMixin):
|
| 235 |
+
def __init__(self, config: TinyLlavaConfig):
|
| 236 |
+
|
| 237 |
+
super().__init__(config)
|
| 238 |
+
|
| 239 |
+
# apply_liger_kernel_to_qwen3()
|
| 240 |
+
self.language_model = Qwen3ForCausalLM(config.text_config)
|
| 241 |
+
self.vision_tower = WpmAudioTower(config.vision_config)
|
| 242 |
+
self.connector = MLPConnector(config)
|
| 243 |
+
|
| 244 |
+
self.post_init()
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def get_input_embeddings(self):
|
| 248 |
+
return self.language_model.get_input_embeddings()
|
| 249 |
+
|
| 250 |
+
def set_input_embeddings(self, value):
|
| 251 |
+
self.language_model.set_input_embeddings(value)
|
| 252 |
+
|
| 253 |
+
def get_output_embeddings(self):
|
| 254 |
+
return self.language_model.get_output_embeddings()
|
| 255 |
+
|
| 256 |
+
def set_output_embeddings(self, new_embeddings):
|
| 257 |
+
self.language_model.set_output_embeddings(new_embeddings)
|
| 258 |
+
|
| 259 |
+
def set_decoder(self, decoder):
|
| 260 |
+
self.language_model.set_decoder(decoder)
|
| 261 |
+
|
| 262 |
+
def get_decoder(self):
|
| 263 |
+
return self.language_model.get_decoder()
|
| 264 |
+
|
| 265 |
+
def tie_weights(self):
|
| 266 |
+
return self.language_model.tie_weights()
|
| 267 |
+
|
| 268 |
+
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
|
| 269 |
+
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
| 270 |
+
# update vocab size
|
| 271 |
+
self.config.text_config.vocab_size = model_embeds.num_embeddings
|
| 272 |
+
self.config.vocab_size = model_embeds.num_embeddings
|
| 273 |
+
self.vocab_size = model_embeds.num_embeddings
|
| 274 |
+
return model_embeds
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def forward(
|
| 278 |
+
self,
|
| 279 |
+
input_ids: torch.LongTensor = None,
|
| 280 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 281 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 282 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 283 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 284 |
+
labels: Optional[torch.LongTensor] = None,
|
| 285 |
+
use_cache: Optional[bool] = None,
|
| 286 |
+
output_attentions: Optional[bool] = None,
|
| 287 |
+
output_hidden_states: Optional[bool] = None,
|
| 288 |
+
images: Optional[torch.FloatTensor] = None,
|
| 289 |
+
image_sizes: Optional[List[List[int]]] = None,
|
| 290 |
+
return_dict: Optional[bool] = None,
|
| 291 |
+
logits_to_keep = None
|
| 292 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 293 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 294 |
+
if inputs_embeds is None:
|
| 295 |
+
(
|
| 296 |
+
input_ids,
|
| 297 |
+
position_ids,
|
| 298 |
+
attention_mask,
|
| 299 |
+
past_key_values,
|
| 300 |
+
inputs_embeds,
|
| 301 |
+
labels
|
| 302 |
+
) = self.prepare_inputs_labels_for_multimodal(
|
| 303 |
+
input_ids,
|
| 304 |
+
position_ids,
|
| 305 |
+
attention_mask,
|
| 306 |
+
past_key_values,
|
| 307 |
+
labels,
|
| 308 |
+
images,
|
| 309 |
+
image_sizes
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
return self.language_model.forward(
|
| 313 |
+
input_ids=input_ids,
|
| 314 |
+
attention_mask=attention_mask,
|
| 315 |
+
position_ids=position_ids,
|
| 316 |
+
past_key_values=past_key_values,
|
| 317 |
+
inputs_embeds=inputs_embeds,
|
| 318 |
+
labels=labels,
|
| 319 |
+
use_cache=use_cache,
|
| 320 |
+
output_attentions=output_attentions,
|
| 321 |
+
output_hidden_states=output_hidden_states,
|
| 322 |
+
return_dict=return_dict
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
@torch.no_grad()
|
| 326 |
+
def generate(
|
| 327 |
+
self,
|
| 328 |
+
inputs: Optional[torch.Tensor] = None,
|
| 329 |
+
images: Optional[torch.Tensor] = None,
|
| 330 |
+
image_sizes: Optional[torch.Tensor] = None,
|
| 331 |
+
**kwargs,
|
| 332 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 333 |
+
position_ids = kwargs.pop("position_ids", None)
|
| 334 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
| 335 |
+
if "inputs_embeds" in kwargs:
|
| 336 |
+
raise NotImplementedError("`inputs_embeds` is not supported")
|
| 337 |
+
if isinstance(images, list) and (images != []):
|
| 338 |
+
images = torch.cat(images, dim=0)
|
| 339 |
+
if images is not None:
|
| 340 |
+
(
|
| 341 |
+
inputs,
|
| 342 |
+
position_ids,
|
| 343 |
+
attention_mask,
|
| 344 |
+
_,
|
| 345 |
+
inputs_embeds,
|
| 346 |
+
_
|
| 347 |
+
) = self.prepare_inputs_labels_for_multimodal(
|
| 348 |
+
inputs,
|
| 349 |
+
position_ids,
|
| 350 |
+
attention_mask,
|
| 351 |
+
None,
|
| 352 |
+
None,
|
| 353 |
+
images,
|
| 354 |
+
image_sizes=image_sizes
|
| 355 |
+
)
|
| 356 |
+
else:
|
| 357 |
+
inputs_embeds = self.language_model.get_input_embeddings()(inputs)
|
| 358 |
+
|
| 359 |
+
return self.language_model.generate(
|
| 360 |
+
position_ids=position_ids,
|
| 361 |
+
attention_mask=attention_mask,
|
| 362 |
+
inputs_embeds=inputs_embeds,
|
| 363 |
+
**kwargs
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
def encode_images(self, images):
|
| 367 |
+
kwargs = {}
|
| 368 |
+
kwargs['vision_feature_layer'] = self.config.vision_feature_layer
|
| 369 |
+
kwargs['vision_feature_select_strategy'] = self.config.vision_feature_select_strategy
|
| 370 |
+
images = images.to(device=self.device, dtype=self.dtype)
|
| 371 |
+
if images.shape[-1] != 3000:
|
| 372 |
+
splits = torch.split(images, 3000, dim=-1)
|
| 373 |
+
image_features = torch.cat([self.connector(self.vision_tower(x, **kwargs)) for x in splits], dim=-1)
|
| 374 |
+
else:
|
| 375 |
+
image_features = self.vision_tower(images, **kwargs)
|
| 376 |
+
image_features = self.connector(image_features)
|
| 377 |
+
|
| 378 |
+
return image_features
|
| 379 |
+
|
| 380 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
|
| 381 |
+
inputs_embeds=None, **kwargs):
|
| 382 |
+
images = kwargs.pop("images", None)
|
| 383 |
+
image_sizes = kwargs.pop("image_sizes", None)
|
| 384 |
+
inputs = self.language_model.prepare_inputs_for_generation(
|
| 385 |
+
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
|
| 386 |
+
)
|
| 387 |
+
if images is not None:
|
| 388 |
+
inputs['images'] = images
|
| 389 |
+
if image_sizes is not None:
|
| 390 |
+
inputs['image_sizes'] = image_sizes
|
| 391 |
+
return inputs
|
| 392 |
+
|
| 393 |
+
def prepare_inputs_labels_for_multimodal(
|
| 394 |
+
self, input_ids, position_ids, attention_mask, past_key_values, labels,
|
| 395 |
+
images, image_sizes=None
|
| 396 |
+
):
|
| 397 |
+
vision_tower = self.vision_tower
|
| 398 |
+
if vision_tower is None or images is None or input_ids.shape[1] == 1:
|
| 399 |
+
return input_ids, position_ids, attention_mask, past_key_values, None, labels
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
image_features = self.encode_images(images)
|
| 403 |
+
# TODO: image start / end is not implemented here to support pretraining.
|
| 404 |
+
if getattr(self.config, 'tune_mm_mlp_adapter', False):
|
| 405 |
+
raise NotImplementedError
|
| 406 |
+
|
| 407 |
+
# Let's just add dummy tensors if they do not exist,
|
| 408 |
+
# it is a headache to deal with None all the time.
|
| 409 |
+
# But it is not ideal, and if you have a better idea,
|
| 410 |
+
# please open an issue / submit a PR, thanks.
|
| 411 |
+
_labels = labels
|
| 412 |
+
_position_ids = position_ids
|
| 413 |
+
_attention_mask = attention_mask
|
| 414 |
+
if attention_mask is None:
|
| 415 |
+
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
|
| 416 |
+
else:
|
| 417 |
+
attention_mask = attention_mask.bool()
|
| 418 |
+
if position_ids is None:
|
| 419 |
+
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
|
| 420 |
+
if labels is None:
|
| 421 |
+
labels = torch.full_like(input_ids, IGNORE_INDEX)
|
| 422 |
+
|
| 423 |
+
# remove the padding using attention_mask -- FIXME
|
| 424 |
+
_input_ids = input_ids
|
| 425 |
+
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
|
| 426 |
+
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
|
| 427 |
+
|
| 428 |
+
new_input_embeds = []
|
| 429 |
+
new_labels = []
|
| 430 |
+
cur_image_idx = 0
|
| 431 |
+
for batch_idx, cur_input_ids in enumerate(input_ids):
|
| 432 |
+
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
|
| 433 |
+
cur_image_size = image_sizes[batch_idx] if image_sizes is not None else None
|
| 434 |
+
if num_images == 0:
|
| 435 |
+
# cur_image_features = image_features[cur_image_idx]
|
| 436 |
+
cur_input_embeds_1 = self.language_model.get_input_embeddings()(cur_input_ids)
|
| 437 |
+
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
|
| 438 |
+
new_input_embeds.append(cur_input_embeds_1)
|
| 439 |
+
new_labels.append(labels[batch_idx])
|
| 440 |
+
# cur_image_idx += 1
|
| 441 |
+
continue
|
| 442 |
+
|
| 443 |
+
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
|
| 444 |
+
cur_input_ids_noim = []
|
| 445 |
+
cur_labels = labels[batch_idx]
|
| 446 |
+
cur_labels_noim = []
|
| 447 |
+
for i in range(len(image_token_indices) - 1):
|
| 448 |
+
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
|
| 449 |
+
cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
|
| 450 |
+
split_sizes = [x.shape[0] for x in cur_labels_noim]
|
| 451 |
+
cur_input_embeds = self.language_model.get_input_embeddings()(torch.cat(cur_input_ids_noim))
|
| 452 |
+
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
|
| 453 |
+
cur_new_input_embeds = []
|
| 454 |
+
cur_new_labels = []
|
| 455 |
+
|
| 456 |
+
for i in range(num_images + 1):
|
| 457 |
+
cur_new_input_embeds.append(cur_input_embeds_no_im[i])
|
| 458 |
+
cur_new_labels.append(cur_labels_noim[i])
|
| 459 |
+
if i < num_images:
|
| 460 |
+
img_size = cur_image_size[i]
|
| 461 |
+
cur_image_features = image_features[cur_image_idx:cur_image_idx + img_size]
|
| 462 |
+
cur_image_features = [img.squeeze(0) for img in cur_image_features]
|
| 463 |
+
cur_image_features = torch.cat(cur_image_features, dim=0)
|
| 464 |
+
cur_image_idx += img_size
|
| 465 |
+
cur_new_input_embeds.append(cur_image_features)
|
| 466 |
+
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
|
| 467 |
+
|
| 468 |
+
cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
|
| 469 |
+
|
| 470 |
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
|
| 471 |
+
cur_new_labels = torch.cat(cur_new_labels)
|
| 472 |
+
|
| 473 |
+
new_input_embeds.append(cur_new_input_embeds)
|
| 474 |
+
new_labels.append(cur_new_labels)
|
| 475 |
+
|
| 476 |
+
# Truncate sequences to max length as image embeddings can make the sequence longer
|
| 477 |
+
tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
|
| 478 |
+
if tokenizer_model_max_length is not None:
|
| 479 |
+
new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]
|
| 480 |
+
new_labels = [x[:tokenizer_model_max_length] for x in new_labels]
|
| 481 |
+
|
| 482 |
+
# Combine them
|
| 483 |
+
max_len = max(x.shape[0] for x in new_input_embeds)
|
| 484 |
+
# print(f"max_len: {max_len}")
|
| 485 |
+
batch_size = len(new_input_embeds)
|
| 486 |
+
|
| 487 |
+
new_input_embeds_padded = []
|
| 488 |
+
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
|
| 489 |
+
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
|
| 490 |
+
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
|
| 491 |
+
|
| 492 |
+
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
|
| 493 |
+
cur_len = cur_new_embed.shape[0]
|
| 494 |
+
if getattr(self.config, 'tokenizer_padding_side', 'right') == "left":
|
| 495 |
+
new_input_embeds_padded.append(torch.cat((
|
| 496 |
+
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),
|
| 497 |
+
cur_new_embed
|
| 498 |
+
), dim=0))
|
| 499 |
+
if cur_len > 0:
|
| 500 |
+
new_labels_padded[i, -cur_len:] = cur_new_labels
|
| 501 |
+
attention_mask[i, -cur_len:] = True
|
| 502 |
+
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
| 503 |
+
else:
|
| 504 |
+
new_input_embeds_padded.append(torch.cat((
|
| 505 |
+
cur_new_embed,
|
| 506 |
+
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
|
| 507 |
+
), dim=0))
|
| 508 |
+
if cur_len > 0:
|
| 509 |
+
new_labels_padded[i, :cur_len] = cur_new_labels
|
| 510 |
+
attention_mask[i, :cur_len] = True
|
| 511 |
+
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
| 512 |
+
|
| 513 |
+
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
|
| 514 |
+
|
| 515 |
+
if _labels is None:
|
| 516 |
+
new_labels = None
|
| 517 |
+
else:
|
| 518 |
+
new_labels = new_labels_padded
|
| 519 |
+
|
| 520 |
+
if _attention_mask is None:
|
| 521 |
+
attention_mask = None
|
| 522 |
+
else:
|
| 523 |
+
attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
|
| 524 |
+
|
| 525 |
+
if _position_ids is None:
|
| 526 |
+
position_ids = None
|
| 527 |
+
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def load_llm(self, **kwargs):
|
| 533 |
+
language_model_name = get_value_from_kwargs(kwargs, 'model_name_or_path')
|
| 534 |
+
pretrained_llm_path = get_value_from_kwargs(kwargs, 'pretrained_llm_path')
|
| 535 |
+
if pretrained_llm_path is not None:
|
| 536 |
+
language_model_name = pretrained_llm_path
|
| 537 |
+
if language_model_name is not None:
|
| 538 |
+
self.language_model = self.language_model.from_pretrained(
|
| 539 |
+
language_model_name, **kwargs
|
| 540 |
+
)
|
| 541 |
+
print('loading language model from ', language_model_name)
|
| 542 |
+
self.language_model.requires_grad_(False)
|
| 543 |
+
|
| 544 |
+
self.config.text_config.torch_dtype = kwargs.get('torch_dtype', None)
|
| 545 |
+
self.config.pad_token = getattr(self.tokenizer, 'pad_token', None)
|
| 546 |
+
self.config.pad_token_id = getattr(self.tokenizer, 'pad_token_id', None)
|
| 547 |
+
#self.config.tokenizer_padding_side = getattr(self.tokenizer, 'padding_side', None)
|
| 548 |
+
#self.config.tokenizer_model_max_length = getattr(self.tokenizer, 'model_max_length', None)
|
| 549 |
+
|
| 550 |
+
def load_vision_tower(self, **kwargs):
|
| 551 |
+
vision_tower_name = get_value_from_kwargs(kwargs, 'model_name_or_path')
|
| 552 |
+
self.vision_tower.load_model(vision_tower_name, **kwargs)
|
| 553 |
+
|
| 554 |
+
def load_connector(self, **kwargs):
|
| 555 |
+
self.connector.load_model(**kwargs)
|
| 556 |
+
|
| 557 |
+
def chat(
|
| 558 |
+
self,
|
| 559 |
+
tokenizer,
|
| 560 |
+
prompt,
|
| 561 |
+
audio_files,
|
| 562 |
+
segs = None,
|
| 563 |
+
max_new_tokens = 512,
|
| 564 |
+
temperature= 0.5,
|
| 565 |
+
top_k = 50,
|
| 566 |
+
top_p = 1.0,
|
| 567 |
+
):
|
| 568 |
+
text_processor =TextPreprocess(tokenizer, 'qwen2_instruct')
|
| 569 |
+
audio_processor = AudioPreprocess(self.vision_tower._image_processor, self.config)
|
| 570 |
+
msg = Message()
|
| 571 |
+
audio_tensor, audio_size = load_audios(audio_processor, audio_files, segs)
|
| 572 |
+
if (audio_tensor) and ('<audio>' not in prompt):
|
| 573 |
+
prompt = '<audio>\n' + prompt
|
| 574 |
+
msg.add_message(prompt)
|
| 575 |
+
result = text_processor(msg.messages, mode='eval')
|
| 576 |
+
input_ids = result['input_ids'].unsqueeze(0).to(self.device)
|
| 577 |
+
stop_str = text_processor.template.separator.apply()[1]
|
| 578 |
+
keywords = [stop_str]
|
| 579 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
| 580 |
+
with torch.inference_mode():
|
| 581 |
+
output_ids = self.generate(
|
| 582 |
+
input_ids,
|
| 583 |
+
images=audio_tensor,
|
| 584 |
+
do_sample=True if temperature > 0 else False,
|
| 585 |
+
temperature=temperature,
|
| 586 |
+
top_k=top_k,
|
| 587 |
+
top_p=top_p,
|
| 588 |
+
max_new_tokens=max_new_tokens,
|
| 589 |
+
use_cache=True,
|
| 590 |
+
pad_token_id = tokenizer.eos_token_id,
|
| 591 |
+
image_sizes=[audio_size] if audio_tensor is not None else None,
|
| 592 |
+
stopping_criteria=[stopping_criteria]
|
| 593 |
+
)
|
| 594 |
+
gen_text = tokenizer.decode(output_ids[0])
|
| 595 |
+
if gen_text.endswith(stop_str):
|
| 596 |
+
gen_text = gen_text[:-len(stop_str)]
|
| 597 |
+
return gen_text
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
AutoConfig.register("tinyllava", TinyLlavaConfig)
|
| 601 |
+
AutoModelForCausalLM.register(TinyLlavaConfig, TinyLlavaForConditionalGeneration)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
},
|
| 31 |
+
"unk_token": "<|endoftext|>"
|
| 32 |
+
}
|
text_preprocess.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
|
| 3 |
+
import copy
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# IGNORE_INDEX = -100
|
| 8 |
+
# IMAGE_TOKEN_INDEX = -200
|
| 9 |
+
# DEFAULT_IMAGE_TOKEN = "<audio>"
|
| 10 |
+
from .configuration import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
| 11 |
+
|
| 12 |
+
from transformers import PreTrainedTokenizer
|
| 13 |
+
import torch
|
| 14 |
+
from abc import ABC, abstractmethod
|
| 15 |
+
# from dataclasses import dataclass
|
| 16 |
+
# from typing import Dict, Union, List
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
SLOT = Union[str, List[str], Dict[str, str]]
|
| 20 |
+
|
| 21 |
+
@dataclass
|
| 22 |
+
class Formatter(ABC):
|
| 23 |
+
slot: SLOT = ""
|
| 24 |
+
|
| 25 |
+
@abstractmethod
|
| 26 |
+
def apply(self, **kwargs) -> SLOT: ...
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class EmptyFormatter(Formatter):
|
| 32 |
+
def apply(self, **kwargs) -> SLOT:
|
| 33 |
+
return self.slot
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class StringFormatter(Formatter):
|
| 38 |
+
def apply(self, **kwargs) -> SLOT:
|
| 39 |
+
msg = ""
|
| 40 |
+
for name, value in kwargs.items():
|
| 41 |
+
if value is None:
|
| 42 |
+
msg = self.slot.split(':')[0] + ":"
|
| 43 |
+
return msg
|
| 44 |
+
if not isinstance(value, str):
|
| 45 |
+
raise RuntimeError("Expected a string, got {}".format(value))
|
| 46 |
+
msg = self.slot.replace("{{" + name + "}}", value, 1)
|
| 47 |
+
return msg
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@dataclass
|
| 52 |
+
class Template:
|
| 53 |
+
format_image_token: "Formatter"
|
| 54 |
+
format_user: "Formatter"
|
| 55 |
+
format_assistant: "Formatter"
|
| 56 |
+
system: "Formatter"
|
| 57 |
+
separator: "Formatter"
|
| 58 |
+
|
| 59 |
+
def encode(self, messages, tokenizer, mode='train'):
|
| 60 |
+
"""
|
| 61 |
+
1. get list form messages(conversations:[{from:human, value:message}, {from:gpt, value:message}])
|
| 62 |
+
===> human_list, value_list
|
| 63 |
+
2. prompt two list
|
| 64 |
+
3. tokenize prompt
|
| 65 |
+
4. make target
|
| 66 |
+
"""
|
| 67 |
+
question_list, answer_list = self.get_list_from_message(messages)
|
| 68 |
+
if mode == 'rl':
|
| 69 |
+
gt = answer_list[-1]
|
| 70 |
+
answer_list[-1] = '' # last answer is empty in RL mode
|
| 71 |
+
prompt = self.prompt(question_list, answer_list)
|
| 72 |
+
if mode == 'rl' and prompt.endswith(self.separator.apply()[1]):
|
| 73 |
+
prompt = prompt[:-len(self.separator.apply()[1])]
|
| 74 |
+
input_ids = self.tokenizer_image_token(prompt, tokenizer, return_tensors='pt')
|
| 75 |
+
if mode == 'train':
|
| 76 |
+
labels = self.make_labels(input_ids, prompt, tokenizer)
|
| 77 |
+
return dict(
|
| 78 |
+
input_ids=input_ids,
|
| 79 |
+
labels=labels
|
| 80 |
+
)
|
| 81 |
+
elif mode == 'rl':
|
| 82 |
+
return dict(
|
| 83 |
+
input_ids=input_ids,
|
| 84 |
+
prompt=prompt,
|
| 85 |
+
gt=gt
|
| 86 |
+
)
|
| 87 |
+
else:
|
| 88 |
+
return dict(input_ids=input_ids, prompt=prompt)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def get_list_from_message(self, messages):
|
| 92 |
+
return self._get_list_from_message(messages)
|
| 93 |
+
|
| 94 |
+
def _get_list_from_message(self, messages):
|
| 95 |
+
"""
|
| 96 |
+
messages ====> [{from:human, value:message}, {from:gpt, value:message}]
|
| 97 |
+
"""
|
| 98 |
+
question_list = []
|
| 99 |
+
answer_list = []
|
| 100 |
+
first_is_not_question = 0
|
| 101 |
+
for i, message in enumerate(messages):
|
| 102 |
+
if i == 0 and message['from'] != 'human':
|
| 103 |
+
first_is_not_question = 1
|
| 104 |
+
continue
|
| 105 |
+
if i % 2 == first_is_not_question:
|
| 106 |
+
question_list.append(message['value'])
|
| 107 |
+
else:
|
| 108 |
+
answer_list.append(message['value'])
|
| 109 |
+
|
| 110 |
+
assert len(question_list) == len(answer_list) , \
|
| 111 |
+
f"qa is not match : length_q:{len(question_list)} vs length_a:{len(answer_list)}"
|
| 112 |
+
return question_list, answer_list
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def prompt(
|
| 116 |
+
self,
|
| 117 |
+
question_list, answer_list
|
| 118 |
+
):
|
| 119 |
+
if type(question_list) is str:
|
| 120 |
+
question_list = [question_list]
|
| 121 |
+
if type(answer_list) is str:
|
| 122 |
+
answer_list = [answer_list]
|
| 123 |
+
msg = self._prompt(question_list, answer_list)
|
| 124 |
+
return msg
|
| 125 |
+
|
| 126 |
+
def _prompt(
|
| 127 |
+
self,
|
| 128 |
+
question_list, answer_list,
|
| 129 |
+
):
|
| 130 |
+
msg = ""
|
| 131 |
+
for i, (question, answer) in enumerate(zip(question_list, answer_list)):
|
| 132 |
+
if i == 0:
|
| 133 |
+
msg += self.system.apply()
|
| 134 |
+
# if DEFAULT_IMAGE_TOKEN in question:
|
| 135 |
+
# question = question.replace(DEFAULT_IMAGE_TOKEN, '').strip()
|
| 136 |
+
# question = self.format_image_token.apply(content=question).strip()
|
| 137 |
+
msg += self.format_user.apply(content=question)
|
| 138 |
+
msg += self.format_assistant.apply(content=answer)
|
| 139 |
+
return msg
|
| 140 |
+
|
| 141 |
+
def make_labels(self, input_ids, prompt, tokenizer):
|
| 142 |
+
labels = copy.deepcopy(input_ids)
|
| 143 |
+
sep, eos_token = self.separator.apply()
|
| 144 |
+
total_len = int(labels.ne(tokenizer.pad_token_id).sum())
|
| 145 |
+
if tokenizer.pad_token_id == tokenizer.eos_token_id:
|
| 146 |
+
total_len += prompt.count(eos_token)
|
| 147 |
+
rounds = prompt.split(eos_token)
|
| 148 |
+
eos_token_length = len(tokenizer.encode(eos_token))
|
| 149 |
+
labels, cur_len = self._make_masks(labels, tokenizer, sep, eos_token_length, rounds)
|
| 150 |
+
if cur_len < tokenizer.model_max_length:
|
| 151 |
+
# import time
|
| 152 |
+
if (cur_len != total_len) and ( (cur_len+1) != total_len):
|
| 153 |
+
print(
|
| 154 |
+
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
|
| 155 |
+
f" (ignored)"
|
| 156 |
+
)
|
| 157 |
+
print("number of rounds: ", len(rounds) - 1)
|
| 158 |
+
print("rounds: ", rounds[:-1])
|
| 159 |
+
print("prompt: ", prompt)
|
| 160 |
+
print(labels)
|
| 161 |
+
print(input_ids)
|
| 162 |
+
# time.sleep(5)
|
| 163 |
+
# labels[:] = IGNORE_INDEX
|
| 164 |
+
return labels
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def _make_masks(self, labels, tokenizer, sep, eos_token_length, rounds):
|
| 169 |
+
cur_len = 0
|
| 170 |
+
for rou in rounds:
|
| 171 |
+
if rou == "":
|
| 172 |
+
break
|
| 173 |
+
parts = rou.split(sep)
|
| 174 |
+
if len(parts) != 2:
|
| 175 |
+
break
|
| 176 |
+
parts[0] += sep
|
| 177 |
+
round_len = len(self.tokenizer_image_token(rou, tokenizer)) + eos_token_length
|
| 178 |
+
instruction_len = len(self.tokenizer_image_token(parts[0], tokenizer)) - 1
|
| 179 |
+
labels[cur_len : cur_len + instruction_len] = IGNORE_INDEX
|
| 180 |
+
cur_len += round_len
|
| 181 |
+
labels[cur_len:] = IGNORE_INDEX
|
| 182 |
+
return labels, cur_len
|
| 183 |
+
|
| 184 |
+
@classmethod
|
| 185 |
+
def tokenizer_image_token(cls, prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
|
| 186 |
+
def _insert_separator(X, sep):
|
| 187 |
+
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
|
| 188 |
+
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<audio>')]
|
| 189 |
+
|
| 190 |
+
input_ids = []
|
| 191 |
+
offset = 0
|
| 192 |
+
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
|
| 193 |
+
offset = 1
|
| 194 |
+
input_ids.append(prompt_chunks[0][0])
|
| 195 |
+
|
| 196 |
+
for x in _insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
|
| 197 |
+
input_ids.extend(x[offset:])
|
| 198 |
+
|
| 199 |
+
if return_tensors is not None:
|
| 200 |
+
if return_tensors == 'pt':
|
| 201 |
+
return torch.tensor(input_ids, dtype=torch.long)
|
| 202 |
+
raise ValueError(f'Unsupported tensor type: {return_tensors}')
|
| 203 |
+
return input_ids
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
TEMPlATE_FACTORY: Dict[str, Template] = {}
|
| 207 |
+
|
| 208 |
+
def TemplateFactory(version):
|
| 209 |
+
template = TEMPlATE_FACTORY.get(version, None)
|
| 210 |
+
assert template, f"{version} is not implmentation"
|
| 211 |
+
return template
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def register_template(name):
|
| 215 |
+
def register_template_cls(cls):
|
| 216 |
+
if name in TEMPlATE_FACTORY:
|
| 217 |
+
return TEMPlATE_FACTORY[name]
|
| 218 |
+
|
| 219 |
+
TEMPlATE_FACTORY[name] = cls
|
| 220 |
+
return cls
|
| 221 |
+
|
| 222 |
+
return register_template_cls
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
system = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
|
| 226 |
+
|
| 227 |
+
@register_template('qwen2_instruct')
|
| 228 |
+
@dataclass
|
| 229 |
+
class Qwen2InstructTemplate(Template):
|
| 230 |
+
format_image_token: "Formatter" = field(default_factory=lambda: StringFormatter(slot="<audio>\n{{content}}"))
|
| 231 |
+
format_user: "Formatter" = field(default_factory=lambda: StringFormatter(slot="USER" + ": " + "{{content}}" + " "))
|
| 232 |
+
format_assistant: "Formatter" = field(default_factory=lambda: StringFormatter(slot="ASSISTANT" + ": " + "{{content}}" + "<|im_end|>"))
|
| 233 |
+
system: "Formatter" = field(default_factory=lambda: EmptyFormatter(slot=system+" "))
|
| 234 |
+
separator: "Formatter" = field(default_factory=lambda: EmptyFormatter(slot=[' ASSISTANT: ', '<|im_end|>']))
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class TextPreprocess:
|
| 238 |
+
def __init__(self, tokenizer, version):
|
| 239 |
+
self.tokenizer = tokenizer
|
| 240 |
+
self.template = TemplateFactory(version)()
|
| 241 |
+
|
| 242 |
+
def __call__(self, messages, mode='eval'):
|
| 243 |
+
return self.template.encode(messages, self.tokenizer, mode)
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
"additional_special_tokens": [
|
| 215 |
+
"<|im_start|>",
|
| 216 |
+
"<|im_end|>",
|
| 217 |
+
"<|object_ref_start|>",
|
| 218 |
+
"<|object_ref_end|>",
|
| 219 |
+
"<|box_start|>",
|
| 220 |
+
"<|box_end|>",
|
| 221 |
+
"<|quad_start|>",
|
| 222 |
+
"<|quad_end|>",
|
| 223 |
+
"<|vision_start|>",
|
| 224 |
+
"<|vision_end|>",
|
| 225 |
+
"<|vision_pad|>",
|
| 226 |
+
"<|image_pad|>",
|
| 227 |
+
"<|video_pad|>"
|
| 228 |
+
],
|
| 229 |
+
"bos_token": null,
|
| 230 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is defined and message.reasoning_content is not none %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in message.content %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\\n').split('<think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n<think>\\n' + reasoning_content.strip('\\n') + '\\n</think>\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}",
|
| 231 |
+
"clean_up_tokenization_spaces": false,
|
| 232 |
+
"eos_token": "<|im_end|>",
|
| 233 |
+
"errors": "replace",
|
| 234 |
+
"extra_special_tokens": {},
|
| 235 |
+
"model_max_length": 32768,
|
| 236 |
+
"pad_token": "<|endoftext|>",
|
| 237 |
+
"padding_side": "right",
|
| 238 |
+
"split_special_tokens": false,
|
| 239 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 240 |
+
"unk_token": "<|endoftext|>"
|
| 241 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|