torettomarui commited on
Commit
235a39c
·
verified ·
1 Parent(s): 4c4d621

Delete model

Browse files
model/.DS_Store DELETED
Binary file (8.2 kB)
 
model/added_tokens.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "</tool_call>": 151658,
3
- "<tool_call>": 151657,
4
- "<|box_end|>": 151649,
5
- "<|box_start|>": 151648,
6
- "<|endoftext|>": 151643,
7
- "<|file_sep|>": 151664,
8
- "<|fim_middle|>": 151660,
9
- "<|fim_pad|>": 151662,
10
- "<|fim_prefix|>": 151659,
11
- "<|fim_suffix|>": 151661,
12
- "<|im_end|>": 151645,
13
- "<|im_start|>": 151644,
14
- "<|image_pad|>": 151655,
15
- "<|object_ref_end|>": 151647,
16
- "<|object_ref_start|>": 151646,
17
- "<|quad_end|>": 151651,
18
- "<|quad_start|>": 151650,
19
- "<|repo_name|>": 151663,
20
- "<|video_pad|>": 151656,
21
- "<|vision_end|>": 151653,
22
- "<|vision_pad|>": 151654,
23
- "<|vision_start|>": 151652
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/config.json DELETED
@@ -1,197 +0,0 @@
1
- {
2
- "_commit_hash": null,
3
- "_name_or_path": "LlavaQw",
4
- "architectures": [
5
- "LlavaQwModel"
6
- ],
7
- "auto_map": {
8
- "AutoConfig": "configuration_llavaqw.LlavaQwConfig",
9
- "AutoModel": "modeling_llavaqw.LlavaQwModel",
10
- "AutoModelForCausalLM": "modeling_llavaqw.LlavaQwModel"
11
- },
12
- "downsample_ratio": 0.5,
13
- "dynamic_image_size": true,
14
- "force_image_size": 448,
15
- "llm_config": {
16
- "_name_or_path": "Qwen2.5-7B-Instruct",
17
- "add_cross_attention": false,
18
- "architectures": [
19
- "Qwen2ForCausalLM"
20
- ],
21
- "attention_dropout": 0.0,
22
- "bad_words_ids": null,
23
- "begin_suppress_tokens": null,
24
- "bos_token_id": 151643,
25
- "chunk_size_feed_forward": 0,
26
- "cross_attention_hidden_size": null,
27
- "decoder_start_token_id": null,
28
- "diversity_penalty": 0.0,
29
- "do_sample": false,
30
- "early_stopping": false,
31
- "encoder_no_repeat_ngram_size": 0,
32
- "eos_token_id": 151645,
33
- "exponential_decay_length_penalty": null,
34
- "finetuning_task": null,
35
- "forced_bos_token_id": null,
36
- "forced_eos_token_id": null,
37
- "hidden_act": "silu",
38
- "hidden_size": 3584,
39
- "id2label": {
40
- "0": "LABEL_0",
41
- "1": "LABEL_1"
42
- },
43
- "initializer_range": 0.02,
44
- "intermediate_size": 18944,
45
- "is_decoder": false,
46
- "is_encoder_decoder": false,
47
- "label2id": {
48
- "LABEL_0": 0,
49
- "LABEL_1": 1
50
- },
51
- "length_penalty": 1.0,
52
- "max_length": 20,
53
- "max_position_embeddings": 32768,
54
- "max_window_layers": 28,
55
- "min_length": 0,
56
- "model_type": "qwen2",
57
- "no_repeat_ngram_size": 0,
58
- "num_attention_heads": 28,
59
- "num_beam_groups": 1,
60
- "num_beams": 1,
61
- "num_hidden_layers": 28,
62
- "num_key_value_heads": 4,
63
- "num_return_sequences": 1,
64
- "output_attentions": false,
65
- "output_hidden_states": false,
66
- "output_scores": false,
67
- "pad_token_id": null,
68
- "prefix": null,
69
- "problem_type": null,
70
- "pruned_heads": {},
71
- "remove_invalid_values": false,
72
- "repetition_penalty": 1.0,
73
- "return_dict": true,
74
- "return_dict_in_generate": false,
75
- "rms_norm_eps": 1e-06,
76
- "rope_scaling": null,
77
- "rope_theta": 1000000.0,
78
- "sep_token_id": null,
79
- "sliding_window": null,
80
- "suppress_tokens": null,
81
- "task_specific_params": null,
82
- "temperature": 1.0,
83
- "tf_legacy_loss": false,
84
- "tie_encoder_decoder": false,
85
- "tie_word_embeddings": false,
86
- "tokenizer_class": null,
87
- "top_k": 50,
88
- "top_p": 1.0,
89
- "torch_dtype": "bfloat16",
90
- "torchscript": false,
91
- "transformers_version": "4.45.0",
92
- "typical_p": 1.0,
93
- "use_bfloat16": false,
94
- "use_cache": true,
95
- "use_sliding_window": false,
96
- "vocab_size": 152064
97
- },
98
- "max_dynamic_patch": 6,
99
- "min_dynamic_patch": 1,
100
- "model_type": "LlavaQw",
101
- "ps_version": "v2",
102
- "select_layer": -1,
103
- "template": "chatml",
104
- "torch_dtype": "bfloat16",
105
- "transformers_version": null,
106
- "use_backbone_lora": 0,
107
- "use_llm_lora": 0,
108
- "use_thumbnail": true,
109
- "vision_config": {
110
- "_name_or_path": "",
111
- "add_cross_attention": false,
112
- "architectures": [
113
- "InternVisionModel"
114
- ],
115
- "attention_dropout": 0.0,
116
- "auto_map": {
117
- "AutoConfig": "configuration_intern_vit.InternVisionConfig",
118
- "AutoModel": "modeling_intern_vit.InternVisionModel"
119
- },
120
- "bad_words_ids": null,
121
- "begin_suppress_tokens": null,
122
- "bos_token_id": null,
123
- "chunk_size_feed_forward": 0,
124
- "cross_attention_hidden_size": null,
125
- "decoder_start_token_id": null,
126
- "diversity_penalty": 0.0,
127
- "do_sample": false,
128
- "drop_path_rate": 0.1,
129
- "dropout": 0.0,
130
- "early_stopping": false,
131
- "encoder_no_repeat_ngram_size": 0,
132
- "eos_token_id": null,
133
- "exponential_decay_length_penalty": null,
134
- "finetuning_task": null,
135
- "forced_bos_token_id": null,
136
- "forced_eos_token_id": null,
137
- "hidden_act": "gelu",
138
- "hidden_size": 1024,
139
- "id2label": {
140
- "0": "LABEL_0",
141
- "1": "LABEL_1"
142
- },
143
- "image_size": 448,
144
- "initializer_factor": 1.0,
145
- "initializer_range": 0.02,
146
- "intermediate_size": 4096,
147
- "is_decoder": false,
148
- "is_encoder_decoder": false,
149
- "label2id": {
150
- "LABEL_0": 0,
151
- "LABEL_1": 1
152
- },
153
- "layer_norm_eps": 1e-06,
154
- "length_penalty": 1.0,
155
- "max_length": 20,
156
- "min_length": 0,
157
- "model_type": "intern_vit_6b",
158
- "no_repeat_ngram_size": 0,
159
- "norm_type": "layer_norm",
160
- "num_attention_heads": 16,
161
- "num_beam_groups": 1,
162
- "num_beams": 1,
163
- "num_channels": 3,
164
- "num_hidden_layers": 24,
165
- "num_return_sequences": 1,
166
- "output_attentions": false,
167
- "output_hidden_states": false,
168
- "output_scores": false,
169
- "pad_token_id": null,
170
- "patch_size": 14,
171
- "prefix": null,
172
- "problem_type": null,
173
- "pruned_heads": {},
174
- "qk_normalization": false,
175
- "qkv_bias": true,
176
- "remove_invalid_values": false,
177
- "repetition_penalty": 1.0,
178
- "return_dict": true,
179
- "return_dict_in_generate": false,
180
- "sep_token_id": null,
181
- "suppress_tokens": null,
182
- "task_specific_params": null,
183
- "temperature": 1.0,
184
- "tf_legacy_loss": false,
185
- "tie_encoder_decoder": false,
186
- "tie_word_embeddings": true,
187
- "tokenizer_class": null,
188
- "top_k": 50,
189
- "top_p": 1.0,
190
- "torch_dtype": "bfloat16",
191
- "torchscript": false,
192
- "transformers_version": "4.45.0",
193
- "typical_p": 1.0,
194
- "use_bfloat16": false,
195
- "use_flash_attn": true
196
- }
197
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/configuration_intern_vit.py DELETED
@@ -1,119 +0,0 @@
1
- # --------------------------------------------------------
2
- # InternVL
3
- # Copyright (c) 2023 OpenGVLab
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # --------------------------------------------------------
6
- import os
7
- from typing import Union
8
-
9
- from transformers.configuration_utils import PretrainedConfig
10
- from transformers.utils import logging
11
-
12
- logger = logging.get_logger(__name__)
13
-
14
-
15
- class InternVisionConfig(PretrainedConfig):
16
- r"""
17
- This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
18
- instantiate a vision encoder according to the specified arguments, defining the model architecture.
19
-
20
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
- documentation from [`PretrainedConfig`] for more information.
22
-
23
- Args:
24
- num_channels (`int`, *optional*, defaults to 3):
25
- Number of color channels in the input images (e.g., 3 for RGB).
26
- patch_size (`int`, *optional*, defaults to 14):
27
- The size (resolution) of each patch.
28
- image_size (`int`, *optional*, defaults to 224):
29
- The size (resolution) of each image.
30
- qkv_bias (`bool`, *optional*, defaults to `False`):
31
- Whether to add a bias to the queries and values in the self-attention layers.
32
- hidden_size (`int`, *optional*, defaults to 3200):
33
- Dimensionality of the encoder layers and the pooler layer.
34
- num_attention_heads (`int`, *optional*, defaults to 25):
35
- Number of attention heads for each attention layer in the Transformer encoder.
36
- intermediate_size (`int`, *optional*, defaults to 12800):
37
- Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
38
- qk_normalization (`bool`, *optional*, defaults to `True`):
39
- Whether to normalize the queries and keys in the self-attention layers.
40
- num_hidden_layers (`int`, *optional*, defaults to 48):
41
- Number of hidden layers in the Transformer encoder.
42
- use_flash_attn (`bool`, *optional*, defaults to `True`):
43
- Whether to use flash attention mechanism.
44
- hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
45
- The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
46
- `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
47
- layer_norm_eps (`float`, *optional*, defaults to 1e-6):
48
- The epsilon used by the layer normalization layers.
49
- dropout (`float`, *optional*, defaults to 0.0):
50
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
51
- drop_path_rate (`float`, *optional*, defaults to 0.0):
52
- Dropout rate for stochastic depth.
53
- attention_dropout (`float`, *optional*, defaults to 0.0):
54
- The dropout ratio for the attention probabilities.
55
- initializer_range (`float`, *optional*, defaults to 0.02):
56
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
- initializer_factor (`float`, *optional*, defaults to 0.1):
58
- A factor for layer scale.
59
- """
60
-
61
- model_type = 'intern_vit_6b'
62
-
63
- def __init__(
64
- self,
65
- num_channels=3,
66
- patch_size=14,
67
- image_size=224,
68
- qkv_bias=False,
69
- hidden_size=3200,
70
- num_attention_heads=25,
71
- intermediate_size=12800,
72
- qk_normalization=True,
73
- num_hidden_layers=48,
74
- use_flash_attn=True,
75
- hidden_act='gelu',
76
- norm_type='rms_norm',
77
- layer_norm_eps=1e-6,
78
- dropout=0.0,
79
- drop_path_rate=0.0,
80
- attention_dropout=0.0,
81
- initializer_range=0.02,
82
- initializer_factor=0.1,
83
- **kwargs,
84
- ):
85
- super().__init__(**kwargs)
86
-
87
- self.hidden_size = hidden_size
88
- self.intermediate_size = intermediate_size
89
- self.dropout = dropout
90
- self.drop_path_rate = drop_path_rate
91
- self.num_hidden_layers = num_hidden_layers
92
- self.num_attention_heads = num_attention_heads
93
- self.num_channels = num_channels
94
- self.patch_size = patch_size
95
- self.image_size = image_size
96
- self.initializer_range = initializer_range
97
- self.initializer_factor = initializer_factor
98
- self.attention_dropout = attention_dropout
99
- self.layer_norm_eps = layer_norm_eps
100
- self.hidden_act = hidden_act
101
- self.norm_type = norm_type
102
- self.qkv_bias = qkv_bias
103
- self.qk_normalization = qk_normalization
104
- self.use_flash_attn = use_flash_attn
105
-
106
- @classmethod
107
- def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
108
- config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
109
-
110
- if 'vision_config' in config_dict:
111
- config_dict = config_dict['vision_config']
112
-
113
- if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
114
- logger.warning(
115
- f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
116
- f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
117
- )
118
-
119
- return cls.from_dict(config_dict, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/configuration_llavaqw.py DELETED
@@ -1,99 +0,0 @@
1
- # --------------------------------------------------------
2
- # Adapted from https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B under MIT License
3
- # LICENSE is in incl_licenses directory.
4
- # --------------------------------------------------------
5
-
6
- import copy
7
-
8
- from transformers import AutoConfig, Qwen2Config
9
- from transformers.configuration_utils import PretrainedConfig
10
- from transformers.utils import logging
11
-
12
- from .configuration_intern_vit import InternVisionConfig
13
-
14
- logger = logging.get_logger(__name__)
15
-
16
-
17
- class LlavaQwConfig(PretrainedConfig):
18
- model_type = 'LlavaQw'
19
- is_composition = True
20
-
21
- def __init__(
22
- self,
23
- vision_config=None,
24
- llm_config=None,
25
- use_backbone_lora=0,
26
- use_llm_lora=0,
27
- select_layer=-1,
28
- force_image_size=None,
29
- downsample_ratio=0.5,
30
- template=None,
31
- dynamic_image_size=False,
32
- use_thumbnail=False,
33
- ps_version='v1',
34
- min_dynamic_patch=1,
35
- max_dynamic_patch=6,
36
- **kwargs
37
- ):
38
- super().__init__(**kwargs)
39
-
40
- if vision_config is None:
41
- vision_config = {'architectures': ['InternVisionModel']}
42
- logger.info('vision_config is None. Initializing InternVisionConfig with default values.')
43
-
44
- # Handle llm_config initialization
45
- if llm_config is None:
46
- llm_config = {'architectures': ['Qwen2ForCausalLM']}
47
- logger.info('llm_config is None. Initializing LLM Config with default values.')
48
-
49
- self.vision_config = InternVisionConfig(**vision_config)
50
-
51
- # Check for supported architecture
52
- if llm_config.get('architectures', [None])[0] == 'Qwen2ForCausalLM':
53
- self.llm_config = Qwen2Config(**llm_config)
54
- else:
55
- raise ValueError(f"Unsupported architecture: {llm_config.get('architectures', [None])[0]}")
56
-
57
- # Assign configuration values
58
- self.use_backbone_lora = use_backbone_lora
59
- self.use_llm_lora = use_llm_lora
60
- self.select_layer = select_layer
61
- self.force_image_size = force_image_size
62
- self.downsample_ratio = downsample_ratio
63
- self.template = template
64
- self.dynamic_image_size = dynamic_image_size
65
- self.use_thumbnail = use_thumbnail
66
- self.ps_version = ps_version # Pixel shuffle version
67
- self.min_dynamic_patch = min_dynamic_patch
68
- self.max_dynamic_patch = max_dynamic_patch
69
-
70
- # Log important parameters
71
- logger.info(f'vision_select_layer: {self.select_layer}')
72
- logger.info(f'ps_version: {self.ps_version}')
73
- logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
74
- logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
75
-
76
- def to_dict(self):
77
- """
78
- Serializes this instance to a Python dictionary. Overrides the default `PretrainedConfig.to_dict`.
79
-
80
- Returns:
81
- Dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
82
- """
83
- output = copy.deepcopy(self.__dict__)
84
- output['vision_config'] = self.vision_config.to_dict()
85
- output['llm_config'] = self.llm_config.to_dict()
86
- output['model_type'] = self.model_type
87
- output['use_backbone_lora'] = self.use_backbone_lora
88
- output['use_llm_lora'] = self.use_llm_lora
89
- output['select_layer'] = self.select_layer
90
- output['force_image_size'] = self.force_image_size
91
- output['downsample_ratio'] = self.downsample_ratio
92
- output['template'] = self.template
93
- output['dynamic_image_size'] = self.dynamic_image_size
94
- output['use_thumbnail'] = self.use_thumbnail
95
- output['ps_version'] = self.ps_version
96
- output['min_dynamic_patch'] = self.min_dynamic_patch
97
- output['max_dynamic_patch'] = self.max_dynamic_patch
98
-
99
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/conversation.py DELETED
@@ -1,358 +0,0 @@
1
- """
2
- Adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py under the Apache License 2.0.
3
- LICENSE is in incl_licenses directory.
4
-
5
- Conversation prompt templates.
6
-
7
- We kindly request that you import fastchat instead of copying this file if you wish to use it.
8
- If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
9
- """
10
-
11
- import dataclasses
12
- from enum import IntEnum, auto
13
- from typing import Any, Dict, List, Tuple, Union
14
-
15
-
16
- class SeparatorStyle(IntEnum):
17
- """Separator styles."""
18
-
19
- ADD_COLON_SINGLE = auto()
20
- ADD_COLON_TWO = auto()
21
- ADD_COLON_SPACE_SINGLE = auto()
22
- NO_COLON_SINGLE = auto()
23
- NO_COLON_TWO = auto()
24
- ADD_NEW_LINE_SINGLE = auto()
25
- LLAMA2 = auto()
26
- CHATGLM = auto()
27
- CHATML = auto()
28
- CHATINTERN = auto()
29
- DOLLY = auto()
30
- RWKV = auto()
31
- PHOENIX = auto()
32
- ROBIN = auto()
33
- FALCON_CHAT = auto()
34
- CHATGLM3 = auto()
35
- INTERNVL_ZH = auto()
36
- MPT = auto()
37
-
38
-
39
- @dataclasses.dataclass
40
- class Conversation:
41
- """A class that manages prompt templates and keeps all conversation history."""
42
-
43
- # The name of this template
44
- name: str
45
- # The template of the system prompt
46
- system_template: str = '{system_message}'
47
- # The system message
48
- system_message: str = ''
49
- # The names of two roles
50
- roles: Tuple[str] = ('USER', 'ASSISTANT')
51
- # All messages. Each item is (role, message).
52
- messages: List[List[str]] = ()
53
- # The number of few shot examples
54
- offset: int = 0
55
- # The separator style and configurations
56
- sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
57
- sep: str = '\n'
58
- sep2: str = None
59
- # Stop criteria (the default one is EOS token)
60
- stop_str: Union[str, List[str]] = None
61
- # Stops generation if meeting any token in this list
62
- stop_token_ids: List[int] = None
63
-
64
- def get_prompt(self) -> str:
65
- """Get the prompt for generation."""
66
- system_prompt = self.system_template.format(system_message=self.system_message)
67
- if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
68
- ret = system_prompt + self.sep
69
- for role, message in self.messages:
70
- if message:
71
- ret += role + ': ' + message + self.sep
72
- else:
73
- ret += role + ':'
74
- return ret
75
- elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
76
- seps = [self.sep, self.sep2]
77
- ret = system_prompt + seps[0]
78
- for i, (role, message) in enumerate(self.messages):
79
- if message:
80
- ret += role + ': ' + message + seps[i % 2]
81
- else:
82
- ret += role + ':'
83
- return ret
84
- elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
85
- ret = system_prompt + self.sep
86
- for role, message in self.messages:
87
- if message:
88
- ret += role + ': ' + message + self.sep
89
- else:
90
- ret += role + ': ' # must be end with a space
91
- return ret
92
- elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
93
- ret = '' if system_prompt == '' else system_prompt + self.sep
94
- for role, message in self.messages:
95
- if message:
96
- ret += role + '\n' + message + self.sep
97
- else:
98
- ret += role + '\n'
99
- return ret
100
- elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
101
- ret = system_prompt
102
- for role, message in self.messages:
103
- if message:
104
- ret += role + message + self.sep
105
- else:
106
- ret += role
107
- return ret
108
- elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
109
- seps = [self.sep, self.sep2]
110
- ret = system_prompt
111
- for i, (role, message) in enumerate(self.messages):
112
- if message:
113
- ret += role + message + seps[i % 2]
114
- else:
115
- ret += role
116
- return ret
117
- elif self.sep_style == SeparatorStyle.RWKV:
118
- ret = system_prompt
119
- for i, (role, message) in enumerate(self.messages):
120
- if message:
121
- ret += (
122
- role
123
- + ': '
124
- + message.replace('\r\n', '\n').replace('\n\n', '\n')
125
- )
126
- ret += '\n\n'
127
- else:
128
- ret += role + ':'
129
- return ret
130
- elif self.sep_style == SeparatorStyle.LLAMA2:
131
- seps = [self.sep, self.sep2]
132
- if self.system_message:
133
- ret = system_prompt
134
- else:
135
- ret = '[INST] '
136
- for i, (role, message) in enumerate(self.messages):
137
- tag = self.roles[i % 2]
138
- if message:
139
- if i == 0:
140
- ret += message + ' '
141
- else:
142
- ret += tag + ' ' + message + seps[i % 2]
143
- else:
144
- ret += tag
145
- return ret
146
- elif self.sep_style == SeparatorStyle.CHATGLM:
147
- # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
148
- # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
149
- round_add_n = 1 if self.name == 'chatglm2' else 0
150
- if system_prompt:
151
- ret = system_prompt + self.sep
152
- else:
153
- ret = ''
154
-
155
- for i, (role, message) in enumerate(self.messages):
156
- if i % 2 == 0:
157
- ret += f'[Round {i//2 + round_add_n}]{self.sep}'
158
-
159
- if message:
160
- ret += f'{role}:{message}{self.sep}'
161
- else:
162
- ret += f'{role}:'
163
- return ret
164
- elif self.sep_style == SeparatorStyle.CHATML:
165
- ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
166
- for role, message in self.messages:
167
- if message:
168
- ret += role + '\n' + message + self.sep + '\n'
169
- else:
170
- ret += role + '\n'
171
- return ret
172
- elif self.sep_style == SeparatorStyle.CHATGLM3:
173
- ret = ''
174
- if self.system_message:
175
- ret += system_prompt
176
- for role, message in self.messages:
177
- if message:
178
- ret += role + '\n' + ' ' + message
179
- else:
180
- ret += role
181
- return ret
182
- elif self.sep_style == SeparatorStyle.CHATINTERN:
183
- # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
184
- seps = [self.sep, self.sep2]
185
- ret = system_prompt
186
- for i, (role, message) in enumerate(self.messages):
187
- # if i % 2 == 0:
188
- # ret += "<s>"
189
- if message:
190
- ret += role + ':' + message + seps[i % 2] + '\n'
191
- else:
192
- ret += role + ':'
193
- return ret
194
- elif self.sep_style == SeparatorStyle.DOLLY:
195
- seps = [self.sep, self.sep2]
196
- ret = system_prompt
197
- for i, (role, message) in enumerate(self.messages):
198
- if message:
199
- ret += role + ':\n' + message + seps[i % 2]
200
- if i % 2 == 1:
201
- ret += '\n\n'
202
- else:
203
- ret += role + ':\n'
204
- return ret
205
- elif self.sep_style == SeparatorStyle.PHOENIX:
206
- ret = system_prompt
207
- for role, message in self.messages:
208
- if message:
209
- ret += role + ': ' + '<s>' + message + '</s>'
210
- else:
211
- ret += role + ': ' + '<s>'
212
- return ret
213
- elif self.sep_style == SeparatorStyle.ROBIN:
214
- ret = system_prompt + self.sep
215
- for role, message in self.messages:
216
- if message:
217
- ret += role + ':\n' + message + self.sep
218
- else:
219
- ret += role + ':\n'
220
- return ret
221
- elif self.sep_style == SeparatorStyle.FALCON_CHAT:
222
- ret = ''
223
- if self.system_message:
224
- ret += system_prompt + self.sep
225
- for role, message in self.messages:
226
- if message:
227
- ret += role + ': ' + message + self.sep
228
- else:
229
- ret += role + ':'
230
-
231
- return ret
232
- elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
233
- seps = [self.sep, self.sep2]
234
- ret = self.system_message + seps[0]
235
- for i, (role, message) in enumerate(self.messages):
236
- if message:
237
- ret += role + ': ' + message + seps[i % 2]
238
- else:
239
- ret += role + ':'
240
- return ret
241
- elif self.sep_style == SeparatorStyle.MPT:
242
- ret = ''
243
- for role, message in self.messages:
244
- if message:
245
- if type(message) is tuple:
246
- message, _, _ = message
247
- ret += role + message + self.sep
248
- else:
249
- ret += role
250
- return ret
251
- else:
252
- raise ValueError(f'Invalid style: {self.sep_style}')
253
-
254
- def set_system_message(self, system_message: str):
255
- """Set the system message."""
256
- self.system_message = system_message
257
-
258
- def append_message(self, role: str, message: str):
259
- """Append a new message."""
260
- self.messages.append([role, message])
261
-
262
- def update_last_message(self, message: str):
263
- """Update the last output.
264
-
265
- The last message is typically set to be None when constructing the prompt,
266
- so we need to update it in-place after getting the response from a model.
267
- """
268
- self.messages[-1][1] = message
269
-
270
- def to_gradio_chatbot(self):
271
- """Convert the conversation to gradio chatbot format."""
272
- ret = []
273
- for i, (role, msg) in enumerate(self.messages[self.offset :]):
274
- if i % 2 == 0:
275
- ret.append([msg, None])
276
- else:
277
- ret[-1][-1] = msg
278
- return ret
279
-
280
- def to_openai_api_messages(self):
281
- """Convert the conversation to OpenAI chat completion format."""
282
- ret = [{'role': 'system', 'content': self.system_message}]
283
-
284
- for i, (_, msg) in enumerate(self.messages[self.offset :]):
285
- if i % 2 == 0:
286
- ret.append({'role': 'user', 'content': msg})
287
- else:
288
- if msg is not None:
289
- ret.append({'role': 'assistant', 'content': msg})
290
- return ret
291
-
292
- def copy(self):
293
- return Conversation(
294
- name=self.name,
295
- system_template=self.system_template,
296
- system_message=self.system_message,
297
- roles=self.roles,
298
- messages=[[x, y] for x, y in self.messages],
299
- offset=self.offset,
300
- sep_style=self.sep_style,
301
- sep=self.sep,
302
- sep2=self.sep2,
303
- stop_str=self.stop_str,
304
- stop_token_ids=self.stop_token_ids,
305
- )
306
-
307
- def dict(self):
308
- return {
309
- 'template_name': self.name,
310
- 'system_message': self.system_message,
311
- 'roles': self.roles,
312
- 'messages': self.messages,
313
- 'offset': self.offset,
314
- }
315
-
316
-
317
- # A global registry for all conversation templates
318
- conv_templates: Dict[str, Conversation] = {}
319
-
320
-
321
- def register_conv_template(template: Conversation, override: bool = False):
322
- """Register a new conversation template."""
323
- if not override:
324
- assert (
325
- template.name not in conv_templates
326
- ), f'{template.name} has been registered.'
327
-
328
- conv_templates[template.name] = template
329
-
330
-
331
- def get_conv_template(name: str) -> Conversation:
332
- """Get a conversation template."""
333
- return conv_templates[name].copy()
334
-
335
-
336
- # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
337
- # is that during training, the preprocessing function for the Hermes-2 template doesn't add
338
- # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
339
- # Therefore, they are completely equivalent during inference.
340
-
341
- register_conv_template(
342
- Conversation(
343
- name='chatml',
344
- system_template='<|im_start|>system\n{system_message}',
345
- # note: The new system prompt was not used here to avoid changes in benchmark performance.
346
- system_message='Answer the questions.',
347
- roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
348
- sep_style=SeparatorStyle.MPT,
349
- sep='<|im_end|>',
350
- stop_token_ids=[
351
- 2,
352
- 92543,
353
- 92542
354
- ]
355
- )
356
- )
357
-
358
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/flash_attention.py DELETED
@@ -1,75 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from einops import rearrange
4
-
5
- try: # v1
6
- from flash_attn.flash_attn_interface import \
7
- flash_attn_unpadded_qkvpacked_func
8
- except: # v2
9
- from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
10
-
11
- from flash_attn.bert_padding import pad_input, unpad_input
12
-
13
-
14
- class FlashAttention(nn.Module):
15
- """Implement the scaled dot product attention with softmax.
16
- Arguments
17
- ---------
18
- softmax_scale: The temperature to use for the softmax attention.
19
- (default: 1/sqrt(d_keys) where d_keys is computed at
20
- runtime)
21
- attention_dropout: The dropout rate to apply to the attention
22
- (default: 0.0)
23
- """
24
-
25
- def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
26
- super().__init__()
27
- self.softmax_scale = softmax_scale
28
- self.dropout_p = attention_dropout
29
-
30
- def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
31
- max_s=None, need_weights=False):
32
- """Implements the multihead softmax attention.
33
- Arguments
34
- ---------
35
- qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
36
- if unpadded: (nnz, 3, h, d)
37
- key_padding_mask: a bool tensor of shape (B, S)
38
- """
39
- assert not need_weights
40
- assert qkv.dtype in [torch.float16, torch.bfloat16]
41
- assert qkv.is_cuda
42
-
43
- if cu_seqlens is None:
44
- batch_size = qkv.shape[0]
45
- seqlen = qkv.shape[1]
46
- if key_padding_mask is None:
47
- qkv = rearrange(qkv, 'b s ... -> (b s) ...')
48
- max_s = seqlen
49
- cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
50
- device=qkv.device)
51
- output = flash_attn_unpadded_qkvpacked_func(
52
- qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
53
- softmax_scale=self.softmax_scale, causal=causal
54
- )
55
- output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
56
- else:
57
- nheads = qkv.shape[-2]
58
- x = rearrange(qkv, 'b s three h d -> b s (three h d)')
59
- x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
60
- x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
61
- output_unpad = flash_attn_unpadded_qkvpacked_func(
62
- x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
63
- softmax_scale=self.softmax_scale, causal=causal
64
- )
65
- output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
66
- indices, batch_size, seqlen),
67
- 'b s (h d) -> b s h d', h=nheads)
68
- else:
69
- assert max_s is not None
70
- output = flash_attn_unpadded_qkvpacked_func(
71
- qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
72
- softmax_scale=self.softmax_scale, causal=causal
73
- )
74
-
75
- return output, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/generation_config.json DELETED
@@ -1,4 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "transformers_version": "4.45.0"
4
- }
 
 
 
 
 
model/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
model/model-00004-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca76e6a409549c80001b5af64a538530bbb785ce60105468a29f84f6ce2b43c0
3
- size 1380991480
 
 
 
 
model/model.safetensors.index.json DELETED
@@ -1,690 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 16130253824
4
- },
5
- "weight_map": {
6
- "language_model.lm_head.weight": "model-00004-of-00004.safetensors",
7
- "language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
- "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
- "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
- "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
- "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
- "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
- "language_model.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
14
- "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
- "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
- "language_model.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
17
- "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
- "language_model.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
19
- "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
- "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
- "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
- "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
- "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
- "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
- "language_model.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
26
- "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
- "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
- "language_model.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
29
- "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
- "language_model.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
31
- "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
32
- "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
- "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
- "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
- "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
- "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
- "language_model.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
38
- "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
- "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
- "language_model.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
41
- "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
- "language_model.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
43
- "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
- "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
- "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
- "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
- "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
- "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
- "language_model.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
50
- "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
- "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
- "language_model.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
53
- "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
54
- "language_model.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
55
- "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
56
- "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
57
- "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
58
- "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
59
- "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
60
- "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
- "language_model.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
62
- "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
63
- "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
- "language_model.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
65
- "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
66
- "language_model.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
67
- "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
68
- "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
69
- "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
70
- "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
71
- "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
72
- "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
- "language_model.model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
74
- "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
75
- "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
- "language_model.model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
77
- "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
- "language_model.model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
79
- "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
- "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
- "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
- "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
- "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
- "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
- "language_model.model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
86
- "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
- "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
- "language_model.model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
89
- "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
90
- "language_model.model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
91
- "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
92
- "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
93
- "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
94
- "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
95
- "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
96
- "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
97
- "language_model.model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
98
- "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
99
- "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
100
- "language_model.model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
101
- "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
102
- "language_model.model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
103
- "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
104
- "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
105
- "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
106
- "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
107
- "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
108
- "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
109
- "language_model.model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
110
- "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
111
- "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
112
- "language_model.model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
113
- "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
114
- "language_model.model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
115
- "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
- "language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
117
- "language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
118
- "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
119
- "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
120
- "language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
121
- "language_model.model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
122
- "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
123
- "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
124
- "language_model.model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
125
- "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
126
- "language_model.model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
127
- "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
128
- "language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
129
- "language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
130
- "language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
131
- "language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
132
- "language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
133
- "language_model.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
134
- "language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
135
- "language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
136
- "language_model.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
137
- "language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
138
- "language_model.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
139
- "language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
140
- "language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
141
- "language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
142
- "language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
143
- "language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
144
- "language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
- "language_model.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
146
- "language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
147
- "language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
148
- "language_model.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
149
- "language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
- "language_model.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
151
- "language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
- "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
153
- "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
154
- "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
155
- "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
156
- "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
157
- "language_model.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
158
- "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
159
- "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
160
- "language_model.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
161
- "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
162
- "language_model.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
163
- "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
164
- "language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
165
- "language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
166
- "language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
167
- "language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
168
- "language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
169
- "language_model.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
170
- "language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
171
- "language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
172
- "language_model.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
173
- "language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
174
- "language_model.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
175
- "language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
176
- "language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
- "language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
178
- "language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
179
- "language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
180
- "language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
181
- "language_model.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
182
- "language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
183
- "language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
184
- "language_model.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
185
- "language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
186
- "language_model.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
187
- "language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
- "language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
- "language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
- "language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
- "language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
- "language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
- "language_model.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
194
- "language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
- "language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
- "language_model.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
197
- "language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
198
- "language_model.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
199
- "language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
200
- "language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
201
- "language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
202
- "language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
203
- "language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
204
- "language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
205
- "language_model.model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
206
- "language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
207
- "language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
208
- "language_model.model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
209
- "language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
210
- "language_model.model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
211
- "language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
212
- "language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
213
- "language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
214
- "language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
215
- "language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
216
- "language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
217
- "language_model.model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
218
- "language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
219
- "language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
220
- "language_model.model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
221
- "language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
222
- "language_model.model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
223
- "language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
224
- "language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
- "language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
- "language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
- "language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
- "language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
- "language_model.model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
230
- "language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
- "language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
- "language_model.model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
233
- "language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
234
- "language_model.model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
235
- "language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
236
- "language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
237
- "language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
238
- "language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
239
- "language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
240
- "language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
241
- "language_model.model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
242
- "language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
243
- "language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
- "language_model.model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
245
- "language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
246
- "language_model.model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
247
- "language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
248
- "language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
249
- "language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
250
- "language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
251
- "language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
252
- "language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
253
- "language_model.model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
254
- "language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
255
- "language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
256
- "language_model.model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
257
- "language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
258
- "language_model.model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
259
- "language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
260
- "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
- "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
- "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
- "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
- "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
- "language_model.model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
266
- "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
- "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
- "language_model.model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
269
- "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
270
- "language_model.model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
271
- "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
- "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
273
- "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
274
- "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
275
- "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
276
- "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
277
- "language_model.model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
278
- "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
279
- "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
280
- "language_model.model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
281
- "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
282
- "language_model.model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
283
- "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
284
- "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
285
- "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
286
- "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
287
- "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
288
- "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
289
- "language_model.model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
290
- "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
291
- "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
292
- "language_model.model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
293
- "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
294
- "language_model.model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
295
- "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
- "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
297
- "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
298
- "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
299
- "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
300
- "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
301
- "language_model.model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
302
- "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
303
- "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
304
- "language_model.model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
305
- "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
306
- "language_model.model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
307
- "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
308
- "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
309
- "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
310
- "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
311
- "language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
312
- "language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
313
- "language_model.model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
314
- "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
315
- "language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
316
- "language_model.model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
317
- "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
318
- "language_model.model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
319
- "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
320
- "language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
321
- "language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
322
- "language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
323
- "language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
324
- "language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
325
- "language_model.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
326
- "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
327
- "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
328
- "language_model.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
329
- "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
330
- "language_model.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
331
- "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
332
- "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
333
- "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
334
- "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
335
- "language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
336
- "language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
337
- "language_model.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
338
- "language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
339
- "language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
340
- "language_model.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
341
- "language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
342
- "language_model.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
343
- "language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
344
- "language_model.model.norm.weight": "model-00003-of-00004.safetensors",
345
- "mlp1.0.bias": "model-00004-of-00004.safetensors",
346
- "mlp1.0.weight": "model-00004-of-00004.safetensors",
347
- "mlp1.1.weight": "model-00004-of-00004.safetensors",
348
- "mlp1.3.weight": "model-00004-of-00004.safetensors",
349
- "vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
350
- "vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
351
- "vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
352
- "vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
353
- "vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
354
- "vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
355
- "vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
356
- "vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
357
- "vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
358
- "vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
359
- "vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
360
- "vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
361
- "vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
362
- "vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
363
- "vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
364
- "vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
365
- "vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
366
- "vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
367
- "vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
368
- "vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
369
- "vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
370
- "vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
371
- "vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
372
- "vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
373
- "vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
374
- "vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
375
- "vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
376
- "vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
377
- "vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
378
- "vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
379
- "vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
380
- "vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
381
- "vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
382
- "vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
383
- "vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
384
- "vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
385
- "vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
386
- "vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
387
- "vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
388
- "vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
389
- "vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
390
- "vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
391
- "vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
392
- "vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
393
- "vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
394
- "vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
395
- "vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
396
- "vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
397
- "vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
398
- "vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
399
- "vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
400
- "vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
401
- "vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
402
- "vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
403
- "vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
404
- "vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
405
- "vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
406
- "vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
407
- "vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
408
- "vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
409
- "vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
410
- "vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
411
- "vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
412
- "vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
413
- "vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
414
- "vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
415
- "vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
416
- "vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
417
- "vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
418
- "vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
419
- "vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
420
- "vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
421
- "vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
422
- "vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
423
- "vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
424
- "vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
425
- "vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
426
- "vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
427
- "vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
428
- "vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
429
- "vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
430
- "vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
431
- "vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
432
- "vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
433
- "vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
434
- "vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
435
- "vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
436
- "vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
437
- "vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
438
- "vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
439
- "vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
440
- "vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
441
- "vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
442
- "vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
443
- "vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
444
- "vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
445
- "vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
446
- "vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
447
- "vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
448
- "vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
449
- "vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
450
- "vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
451
- "vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
452
- "vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
453
- "vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
454
- "vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
455
- "vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
456
- "vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
457
- "vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
458
- "vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
459
- "vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
460
- "vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
461
- "vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
462
- "vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
463
- "vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
464
- "vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
465
- "vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
466
- "vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
467
- "vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
468
- "vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
469
- "vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
470
- "vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
471
- "vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
472
- "vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
473
- "vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
474
- "vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
475
- "vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
476
- "vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
477
- "vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
478
- "vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
479
- "vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
480
- "vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
481
- "vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
482
- "vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
483
- "vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
484
- "vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
485
- "vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
486
- "vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
487
- "vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
488
- "vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
489
- "vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
490
- "vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
491
- "vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
492
- "vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
493
- "vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
494
- "vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
495
- "vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
496
- "vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
497
- "vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
498
- "vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
499
- "vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
500
- "vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
501
- "vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
502
- "vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
503
- "vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
504
- "vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
505
- "vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
506
- "vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
507
- "vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
508
- "vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
509
- "vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
510
- "vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
511
- "vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
512
- "vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
513
- "vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
514
- "vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
515
- "vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
516
- "vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
517
- "vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
518
- "vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
519
- "vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
520
- "vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
521
- "vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
522
- "vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
523
- "vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
524
- "vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
525
- "vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
526
- "vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
527
- "vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
528
- "vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
529
- "vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
530
- "vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
531
- "vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
532
- "vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
533
- "vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
534
- "vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
535
- "vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
536
- "vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
537
- "vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
538
- "vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
539
- "vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
540
- "vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
541
- "vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
542
- "vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
543
- "vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
544
- "vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
545
- "vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
546
- "vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
547
- "vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
548
- "vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
549
- "vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
550
- "vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
551
- "vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
552
- "vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
553
- "vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
554
- "vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
555
- "vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
556
- "vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
557
- "vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
558
- "vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
559
- "vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
560
- "vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
561
- "vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
562
- "vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
563
- "vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
564
- "vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
565
- "vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
566
- "vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
567
- "vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
568
- "vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
569
- "vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
570
- "vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
571
- "vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
572
- "vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
573
- "vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
574
- "vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
575
- "vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
576
- "vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
577
- "vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
578
- "vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
579
- "vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
580
- "vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
581
- "vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
582
- "vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
583
- "vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
584
- "vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
585
- "vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
586
- "vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
587
- "vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
588
- "vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
589
- "vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
590
- "vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
591
- "vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
592
- "vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
593
- "vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
594
- "vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
595
- "vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
596
- "vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
597
- "vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
598
- "vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
599
- "vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
600
- "vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
601
- "vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
602
- "vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
603
- "vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
604
- "vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
605
- "vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
606
- "vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
607
- "vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
608
- "vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
609
- "vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
610
- "vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
611
- "vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
612
- "vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
613
- "vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
614
- "vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
615
- "vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
616
- "vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
617
- "vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
618
- "vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
619
- "vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
620
- "vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
621
- "vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
622
- "vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
623
- "vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
624
- "vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
625
- "vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
626
- "vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
627
- "vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
628
- "vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
629
- "vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
630
- "vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
631
- "vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
632
- "vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
633
- "vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
634
- "vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
635
- "vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
636
- "vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
637
- "vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
638
- "vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
639
- "vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
640
- "vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
641
- "vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
642
- "vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
643
- "vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
644
- "vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
645
- "vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
646
- "vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
647
- "vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
648
- "vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
649
- "vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
650
- "vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
651
- "vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
652
- "vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
653
- "vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
654
- "vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
655
- "vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
656
- "vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
657
- "vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
658
- "vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
659
- "vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
660
- "vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
661
- "vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
662
- "vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
663
- "vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
664
- "vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
665
- "vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
666
- "vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
667
- "vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
668
- "vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
669
- "vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
670
- "vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
671
- "vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
672
- "vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
673
- "vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
674
- "vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
675
- "vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
676
- "vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
677
- "vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
678
- "vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
679
- "vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
680
- "vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
681
- "vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
682
- "vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
683
- "vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
684
- "vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
685
- "vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
686
- "vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
687
- "vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
688
- "vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
689
- }
690
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/modeling_intern_vit.py DELETED
@@ -1,363 +0,0 @@
1
- # --------------------------------------------------------
2
- # InternVL
3
- # Copyright (c) 2023 OpenGVLab
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # --------------------------------------------------------
6
- from typing import Optional, Tuple, Union
7
-
8
- import torch
9
- import torch.nn.functional as F
10
- import torch.utils.checkpoint
11
- from einops import rearrange
12
- from timm.models.layers import DropPath
13
- from torch import nn
14
- from transformers.activations import ACT2FN
15
- from transformers.modeling_outputs import (BaseModelOutput,
16
- BaseModelOutputWithPooling)
17
- from transformers.modeling_utils import PreTrainedModel
18
- from transformers.utils import logging
19
-
20
- from .configuration_intern_vit import InternVisionConfig
21
-
22
- try:
23
- from .flash_attention import FlashAttention
24
- has_flash_attn = True
25
- except:
26
- print('FlashAttention is not installed.')
27
- has_flash_attn = False
28
-
29
-
30
- logger = logging.get_logger(__name__)
31
-
32
-
33
- class InternRMSNorm(nn.Module):
34
- def __init__(self, hidden_size, eps=1e-6):
35
- super().__init__()
36
- self.weight = nn.Parameter(torch.ones(hidden_size))
37
- self.variance_epsilon = eps
38
-
39
- def forward(self, hidden_states):
40
- input_dtype = hidden_states.dtype
41
- hidden_states = hidden_states.to(torch.float32)
42
- variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
- return self.weight * hidden_states.to(input_dtype)
45
-
46
-
47
- try:
48
- from apex.normalization import FusedRMSNorm
49
-
50
- InternRMSNorm = FusedRMSNorm # noqa
51
-
52
- logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
- except ImportError:
54
- # using the normal InternRMSNorm
55
- pass
56
- except Exception:
57
- logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
- pass
59
-
60
-
61
- NORM2FN = {
62
- 'rms_norm': InternRMSNorm,
63
- 'layer_norm': nn.LayerNorm,
64
- }
65
-
66
-
67
- class InternVisionEmbeddings(nn.Module):
68
- def __init__(self, config: InternVisionConfig):
69
- super().__init__()
70
- self.config = config
71
- self.embed_dim = config.hidden_size
72
- self.image_size = config.image_size
73
- self.patch_size = config.patch_size
74
-
75
- self.class_embedding = nn.Parameter(
76
- torch.randn(1, 1, self.embed_dim),
77
- )
78
-
79
- self.patch_embedding = nn.Conv2d(
80
- in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
- )
82
-
83
- self.num_patches = (self.image_size // self.patch_size) ** 2
84
- self.num_positions = self.num_patches + 1
85
-
86
- self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
-
88
- def _get_pos_embed(self, pos_embed, H, W):
89
- target_dtype = pos_embed.dtype
90
- pos_embed = pos_embed.float().reshape(
91
- 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
- pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
93
- reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
- return pos_embed
95
-
96
- def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
- target_dtype = self.patch_embedding.weight.dtype
98
- patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
- batch_size, _, height, width = patch_embeds.shape
100
- patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
- class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
- embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
- position_embedding = torch.cat([
104
- self.position_embedding[:, :1, :],
105
- self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
- ], dim=1)
107
- embeddings = embeddings + position_embedding.to(target_dtype)
108
- return embeddings
109
-
110
-
111
- class InternAttention(nn.Module):
112
- """Multi-headed attention from 'Attention Is All You Need' paper"""
113
-
114
- def __init__(self, config: InternVisionConfig):
115
- super().__init__()
116
- self.config = config
117
- self.embed_dim = config.hidden_size
118
- self.num_heads = config.num_attention_heads
119
- self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
- if config.use_flash_attn and not has_flash_attn:
121
- print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
122
- self.head_dim = self.embed_dim // self.num_heads
123
- if self.head_dim * self.num_heads != self.embed_dim:
124
- raise ValueError(
125
- f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
126
- f' {self.num_heads}).'
127
- )
128
-
129
- self.scale = self.head_dim ** -0.5
130
- self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
131
- self.attn_drop = nn.Dropout(config.attention_dropout)
132
- self.proj_drop = nn.Dropout(config.dropout)
133
-
134
- self.qk_normalization = config.qk_normalization
135
-
136
- if self.qk_normalization:
137
- self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
- self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
-
140
- if self.use_flash_attn:
141
- self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
142
- self.proj = nn.Linear(self.embed_dim, self.embed_dim)
143
-
144
- def _naive_attn(self, x):
145
- B, N, C = x.shape
146
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
147
- q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
148
-
149
- if self.qk_normalization:
150
- B_, H_, N_, D_ = q.shape
151
- q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
- k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
-
154
- attn = ((q * self.scale) @ k.transpose(-2, -1))
155
- attn = attn.softmax(dim=-1)
156
- attn = self.attn_drop(attn)
157
-
158
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
159
- x = self.proj(x)
160
- x = self.proj_drop(x)
161
- return x
162
-
163
- def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
164
- qkv = self.qkv(x)
165
- qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
166
-
167
- if self.qk_normalization:
168
- q, k, v = qkv.unbind(2)
169
- q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
170
- k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
171
- qkv = torch.stack([q, k, v], dim=2)
172
-
173
- context, _ = self.inner_attn(
174
- qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
175
- )
176
- outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
177
- outs = self.proj_drop(outs)
178
- return outs
179
-
180
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
181
- x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
182
- return x
183
-
184
-
185
- class InternMLP(nn.Module):
186
- def __init__(self, config: InternVisionConfig):
187
- super().__init__()
188
- self.config = config
189
- self.act = ACT2FN[config.hidden_act]
190
- self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
191
- self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
192
-
193
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
194
- hidden_states = self.fc1(hidden_states)
195
- hidden_states = self.act(hidden_states)
196
- hidden_states = self.fc2(hidden_states)
197
- return hidden_states
198
-
199
-
200
- class InternVisionEncoderLayer(nn.Module):
201
- def __init__(self, config: InternVisionConfig, drop_path_rate: float):
202
- super().__init__()
203
- self.embed_dim = config.hidden_size
204
- self.intermediate_size = config.intermediate_size
205
- self.norm_type = config.norm_type
206
-
207
- self.attn = InternAttention(config)
208
- self.mlp = InternMLP(config)
209
- self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
- self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
-
212
- self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
- self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
- self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
- self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
-
217
- def forward(
218
- self,
219
- hidden_states: torch.Tensor,
220
- ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
221
- """
222
- Args:
223
- hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
224
- """
225
- hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
226
-
227
- hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
228
-
229
- return hidden_states
230
-
231
-
232
- class InternVisionEncoder(nn.Module):
233
- """
234
- Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
235
- [`InternEncoderLayer`].
236
-
237
- Args:
238
- config (`InternConfig`):
239
- The corresponding vision configuration for the `InternEncoder`.
240
- """
241
-
242
- def __init__(self, config: InternVisionConfig):
243
- super().__init__()
244
- self.config = config
245
- # stochastic depth decay rule
246
- dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
247
- self.layers = nn.ModuleList([
248
- InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
249
- self.gradient_checkpointing = True
250
-
251
- def forward(
252
- self,
253
- inputs_embeds,
254
- output_hidden_states: Optional[bool] = None,
255
- return_dict: Optional[bool] = None,
256
- ) -> Union[Tuple, BaseModelOutput]:
257
- r"""
258
- Args:
259
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
260
- Embedded representation of the inputs. Should be float, not int tokens.
261
- output_hidden_states (`bool`, *optional*):
262
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
263
- for more detail.
264
- return_dict (`bool`, *optional*):
265
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
266
- """
267
- output_hidden_states = (
268
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
269
- )
270
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
271
-
272
- encoder_states = () if output_hidden_states else None
273
- hidden_states = inputs_embeds
274
-
275
- for idx, encoder_layer in enumerate(self.layers):
276
- if output_hidden_states:
277
- encoder_states = encoder_states + (hidden_states,)
278
- if self.gradient_checkpointing and self.training:
279
- layer_outputs = torch.utils.checkpoint.checkpoint(
280
- encoder_layer,
281
- hidden_states)
282
- else:
283
- layer_outputs = encoder_layer(
284
- hidden_states,
285
- )
286
- hidden_states = layer_outputs
287
-
288
- if output_hidden_states:
289
- encoder_states = encoder_states + (hidden_states,)
290
-
291
- if not return_dict:
292
- return tuple(v for v in [hidden_states, encoder_states] if v is not None)
293
- return BaseModelOutput(
294
- last_hidden_state=hidden_states, hidden_states=encoder_states
295
- )
296
-
297
-
298
- class InternVisionModel(PreTrainedModel):
299
- main_input_name = 'pixel_values'
300
- config_class = InternVisionConfig
301
- _no_split_modules = ['InternVisionEncoderLayer']
302
-
303
- def __init__(self, config: InternVisionConfig):
304
- super().__init__(config)
305
- self.config = config
306
-
307
- self.embeddings = InternVisionEmbeddings(config)
308
- self.encoder = InternVisionEncoder(config)
309
-
310
- def resize_pos_embeddings(self, old_size, new_size, patch_size):
311
- pos_emb = self.embeddings.position_embedding
312
- _, num_positions, embed_dim = pos_emb.shape
313
- cls_emb = pos_emb[:, :1, :]
314
- pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
315
- pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
316
- pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
317
- pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
318
- self.embeddings.position_embedding = nn.Parameter(pos_emb)
319
- self.embeddings.image_size = new_size
320
- logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
321
-
322
- def get_input_embeddings(self):
323
- return self.embeddings
324
-
325
- def forward(
326
- self,
327
- pixel_values: Optional[torch.FloatTensor] = None,
328
- output_hidden_states: Optional[bool] = None,
329
- return_dict: Optional[bool] = None,
330
- pixel_embeds: Optional[torch.FloatTensor] = None,
331
- ) -> Union[Tuple, BaseModelOutputWithPooling]:
332
- output_hidden_states = (
333
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
334
- )
335
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
336
-
337
- if pixel_values is None and pixel_embeds is None:
338
- raise ValueError('You have to specify pixel_values or pixel_embeds')
339
-
340
- if pixel_embeds is not None:
341
- hidden_states = pixel_embeds
342
- else:
343
- if len(pixel_values.shape) == 4:
344
- hidden_states = self.embeddings(pixel_values)
345
- else:
346
- raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
347
- encoder_outputs = self.encoder(
348
- inputs_embeds=hidden_states,
349
- output_hidden_states=output_hidden_states,
350
- return_dict=return_dict,
351
- )
352
- last_hidden_state = encoder_outputs.last_hidden_state
353
- pooled_output = last_hidden_state[:, 0, :]
354
-
355
- if not return_dict:
356
- return (last_hidden_state, pooled_output) + encoder_outputs[1:]
357
-
358
- return BaseModelOutputWithPooling(
359
- last_hidden_state=last_hidden_state,
360
- pooler_output=pooled_output,
361
- hidden_states=encoder_outputs.hidden_states,
362
- attentions=encoder_outputs.attentions,
363
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/modeling_llavaqw.py DELETED
@@ -1,573 +0,0 @@
1
- import warnings
2
- from typing import Any, List, Optional, Tuple, Union
3
- import torch.utils.checkpoint
4
- import transformers
5
- from torch import nn
6
- from torch.nn import CrossEntropyLoss
7
- from transformers import GenerationConfig
8
- from transformers import Qwen2ForCausalLM
9
- from transformers.modeling_outputs import CausalLMOutputWithPast
10
- from transformers.modeling_utils import PreTrainedModel
11
- from transformers.utils import ModelOutput, logging
12
- import torch.nn.functional as F
13
-
14
- from .configuration_llavaqw import LlavaQwConfig
15
- from .conversation import get_conv_template
16
- from .modeling_intern_vit import InternVisionModel, has_flash_attn
17
-
18
- logger = logging.get_logger(__name__)
19
-
20
-
21
- def version_cmp(v1, v2, op='eq'):
22
- import operator
23
-
24
- from packaging import version
25
- op_func = getattr(operator, op)
26
- return op_func(version.parse(v1), version.parse(v2))
27
-
28
-
29
- class LlavaQwModel(PreTrainedModel):
30
- config_class = LlavaQwConfig
31
- main_input_name = 'pixel_values'
32
- _supports_flash_attn_2 = True
33
- _no_split_modules = ['InternVisionModel', 'Qwen2DecoderLayer']
34
-
35
- def __init__(self, config: LlavaQwConfig, vision_model=None, language_model=None, use_flash_attn=True):
36
- super().__init__(config)
37
-
38
- assert version_cmp(transformers.__version__, '4.44.2', 'ge')
39
- image_size = config.force_image_size or config.vision_config.image_size
40
- patch_size = config.vision_config.patch_size
41
- self.patch_size = patch_size
42
- self.select_layer = config.select_layer
43
- self.llm_arch_name = config.llm_config.architectures[0]
44
- self.template = config.template
45
- self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
46
- self.downsample_ratio = config.downsample_ratio
47
- self.ps_version = config.ps_version
48
- use_flash_attn = use_flash_attn if has_flash_attn else False
49
- config.vision_config.use_flash_attn = True if use_flash_attn else False
50
- config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
51
-
52
- logger.info(f'num_image_token: {self.num_image_token}')
53
- logger.info(f'ps_version: {self.ps_version}')
54
- if vision_model is not None:
55
- self.vision_model = vision_model
56
- else:
57
- self.vision_model = InternVisionModel(config.vision_config)
58
- if language_model is not None:
59
- self.language_model = language_model
60
- else:
61
- if config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
62
- self.language_model = Qwen2ForCausalLM(config.llm_config)
63
- else:
64
- raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
65
-
66
- vit_hidden_size = config.vision_config.hidden_size
67
- llm_intermediate_size = config.llm_config.intermediate_size
68
- llm_hidden_size = config.llm_config.hidden_size
69
-
70
- self.mlp1 = nn.Sequential(
71
- nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
72
- nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_intermediate_size, bias=False),
73
- nn.GELU(),
74
- nn.Linear(llm_intermediate_size, llm_hidden_size, bias=False)
75
- )
76
-
77
- self.img_context_token_id = 151654
78
- self.conv_template = get_conv_template(self.template)
79
- self.system_message = self.conv_template.system_message
80
-
81
- def forward(
82
- self,
83
- pixel_values: torch.FloatTensor,
84
- input_ids: torch.LongTensor = None,
85
- attention_mask: Optional[torch.Tensor] = None,
86
- position_ids: Optional[torch.LongTensor] = None,
87
- image_flags: Optional[torch.LongTensor] = None,
88
- past_key_values: Optional[List[torch.FloatTensor]] = None,
89
- labels: Optional[torch.LongTensor] = None,
90
- use_cache: Optional[bool] = None,
91
- output_attentions: Optional[bool] = None,
92
- output_hidden_states: Optional[bool] = None,
93
- return_dict: Optional[bool] = None,
94
- ) -> Union[Tuple, CausalLMOutputWithPast]:
95
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
96
-
97
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
98
-
99
- vit_embeds = self.extract_feature(pixel_values)
100
- vit_batch_size = pixel_values.shape[0]
101
-
102
- B, N, C = input_embeds.shape
103
- input_embeds = input_embeds.reshape(B * N, C)
104
-
105
- input_ids = input_ids.reshape(B * N)
106
- selected = (input_ids == self.img_context_token_id)
107
- try:
108
- input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
109
- except Exception as e:
110
- vit_embeds = vit_embeds.reshape(-1, C)
111
- print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
112
- f'vit_embeds.shape={vit_embeds.shape}')
113
- n_token = selected.sum()
114
- input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
115
-
116
- input_embeds = input_embeds.reshape(B, N, C)
117
-
118
- outputs = self.language_model(
119
- inputs_embeds=input_embeds,
120
- attention_mask=attention_mask,
121
- position_ids=position_ids,
122
- past_key_values=past_key_values,
123
- use_cache=use_cache,
124
- output_attentions=output_attentions,
125
- output_hidden_states=output_hidden_states,
126
- return_dict=return_dict,
127
- )
128
- logits = outputs.logits
129
-
130
- loss = None
131
- if labels is not None:
132
- # Shift so that tokens < n predict n
133
- shift_logits = logits[..., :-1, :].contiguous()
134
- shift_labels = labels[..., 1:].contiguous()
135
- # Flatten the tokens
136
- loss_fct = CrossEntropyLoss()
137
- shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
138
- shift_labels = shift_labels.view(-1)
139
-
140
- # Enable model parallelism
141
- shift_labels = shift_labels.to(shift_logits.device)
142
- loss = loss_fct(shift_logits, shift_labels)
143
-
144
- if not return_dict:
145
- output = (logits,) + outputs[1:]
146
- return (loss,) + output if loss is not None else output
147
-
148
- return CausalLMOutputWithPast(
149
- loss=loss,
150
- logits=logits,
151
- past_key_values=outputs.past_key_values,
152
- hidden_states=outputs.hidden_states,
153
- attentions=outputs.attentions,
154
- )
155
-
156
- def pixel_shuffle(self, x, scale_factor=0.5):
157
- n, w, h, c = x.size()
158
- # N, W, H, C --> N, W, H * scale, C // scale
159
- x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
160
- # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
161
- x = x.permute(0, 2, 1, 3).contiguous()
162
- # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
163
- x = x.view(n, int(h * scale_factor), int(w * scale_factor),
164
- int(c / (scale_factor * scale_factor)))
165
- if self.ps_version == 'v1':
166
- warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
167
- 'which results in a transposed image.')
168
- else:
169
- x = x.permute(0, 2, 1, 3).contiguous()
170
- return x
171
-
172
- def extract_feature(self, pixel_values):
173
- if self.select_layer == -1:
174
- vit_embeds = self.vision_model(
175
- pixel_values=pixel_values,
176
- output_hidden_states=False,
177
- return_dict=True).last_hidden_state
178
- else:
179
- vit_embeds = self.vision_model(
180
- pixel_values=pixel_values,
181
- output_hidden_states=True,
182
- return_dict=True).hidden_states[self.select_layer]
183
- vit_embeds = vit_embeds[:, 1:, :]
184
-
185
- h = w = int(vit_embeds.shape[1] ** 0.5)
186
- vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
187
- vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
188
- vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
189
- vit_embeds = self.mlp1(vit_embeds)
190
- return vit_embeds
191
-
192
- def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
193
- num_patches_list=None, IMG_START_TOKEN='<|vision_start|>', IMG_END_TOKEN='<|vision_end|>',
194
- IMG_CONTEXT_TOKEN='<|vision_pad|>', verbose=False, visual_features=None):
195
-
196
- if history is None and pixel_values is not None and '<image>' not in question:
197
- question = '<image>\n' + question
198
-
199
- if num_patches_list is None:
200
- num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
201
- assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
202
-
203
- img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
204
- self.img_context_token_id = img_context_token_id
205
-
206
- template = get_conv_template(self.template)
207
- template.system_message = self.system_message
208
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
209
-
210
- history = [] if history is None else history
211
- for (old_question, old_answer) in history:
212
- template.append_message(template.roles[0], old_question)
213
- template.append_message(template.roles[1], old_answer)
214
- template.append_message(template.roles[0], question)
215
- template.append_message(template.roles[1], None)
216
- query = template.get_prompt()
217
- # print('query:', query)
218
-
219
- if verbose and pixel_values is not None:
220
- image_bs = pixel_values.shape[0]
221
- print(f'dynamic ViT batch size: {image_bs}')
222
-
223
- for num_patches in num_patches_list:
224
- tile_pos_identifiers = [f"<tile_{i}>" for i in range(1, num_patches)] + ["<tile_global_thumbnail>"]
225
- image_tokens = ''
226
- for tile_pos_identifier in tile_pos_identifiers:
227
- image_tokens += tile_pos_identifier + IMG_CONTEXT_TOKEN * self.num_image_token
228
- image_tokens = IMG_START_TOKEN + image_tokens + IMG_END_TOKEN
229
- query = query.replace('<image>', image_tokens, 1)
230
-
231
- model_inputs = tokenizer(query, return_tensors='pt')
232
- input_ids = model_inputs['input_ids'].cuda()
233
- attention_mask = model_inputs['attention_mask'].cuda()
234
- generation_config['eos_token_id'] = eos_token_id
235
- generation_output = self.generate(
236
- pixel_values=pixel_values,
237
- visual_features=visual_features,
238
- input_ids=input_ids,
239
- attention_mask=attention_mask,
240
- **generation_config
241
- )
242
- response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
243
- response = response.split(template.sep)[0].strip()
244
- history.append((question, response))
245
- if return_history:
246
- return response, history
247
- else:
248
- query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
249
- query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
250
- if verbose:
251
- print(query_to_print, response)
252
- return response
253
-
254
- def chat_without_sys_prompt(self, tokenizer, pixel_values, question, generation_config, history=None,
255
- return_history=False,
256
- num_patches_list=None, IMG_START_TOKEN='<|vision_start|>',
257
- IMG_END_TOKEN='<|vision_end|>',
258
- IMG_CONTEXT_TOKEN='<|vision_pad|>', verbose=False, visual_features=None):
259
-
260
- if history is None and pixel_values is not None and '<image>' not in question:
261
- question = '<image>\n' + question
262
-
263
- if num_patches_list is None:
264
- num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
265
- assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
266
-
267
- img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
268
- self.img_context_token_id = img_context_token_id
269
-
270
- template = get_conv_template(self.template)
271
- system_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>" # override dummy system prompt
272
- template.system_message = system_prompt
273
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
274
-
275
- history = [] if history is None else history
276
- for (old_question, old_answer) in history:
277
- template.append_message(template.roles[0], old_question)
278
- template.append_message(template.roles[1], old_answer)
279
- template.append_message(template.roles[0], question)
280
- template.append_message(template.roles[1], None)
281
- query = template.get_prompt()
282
-
283
- if verbose and pixel_values is not None:
284
- image_bs = pixel_values.shape[0]
285
- print(f'dynamic ViT batch size: {image_bs}')
286
-
287
- query = query[len(system_prompt):]
288
-
289
- for num_patches in num_patches_list:
290
- tile_pos_identifiers = [f"<tile_{i}>" for i in range(1, num_patches)] + ["<tile_global_thumbnail>"]
291
- image_tokens = ''
292
- for tile_pos_identifier in tile_pos_identifiers:
293
- image_tokens += tile_pos_identifier + IMG_CONTEXT_TOKEN * self.num_image_token
294
- image_tokens = IMG_START_TOKEN + image_tokens + IMG_END_TOKEN
295
- query = query.replace('<image>', image_tokens, 1)
296
-
297
- model_inputs = tokenizer(query, return_tensors='pt')
298
- input_ids = model_inputs['input_ids'].cuda()
299
- attention_mask = model_inputs['attention_mask'].cuda()
300
- generation_config['eos_token_id'] = eos_token_id
301
- generation_output = self.generate(
302
- pixel_values=pixel_values,
303
- visual_features=visual_features,
304
- input_ids=input_ids,
305
- attention_mask=attention_mask,
306
- **generation_config
307
- )
308
- response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
309
- response = response.split(template.sep)[0].strip()
310
- history.append((question, response))
311
- if return_history:
312
- return response, history
313
- else:
314
- query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
315
- query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
316
- if verbose:
317
- print(query_to_print, response)
318
- return response
319
-
320
- def chat_without_chat_prompt(self, tokenizer, pixel_values, question, generation_config,
321
- num_patches_list=None, IMG_START_TOKEN='<|vision_start|>',
322
- IMG_END_TOKEN='<|vision_end|>',
323
- IMG_CONTEXT_TOKEN='<|vision_pad|>', verbose=False, visual_features=None):
324
-
325
- if pixel_values is not None and '<image>' not in question:
326
- question = '<image>\n' + question
327
-
328
- if num_patches_list is None:
329
- num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
330
- assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
331
-
332
- img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
333
- self.img_context_token_id = img_context_token_id
334
-
335
- template = get_conv_template(self.template)
336
- template.system_message = self.system_message
337
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
338
-
339
- if verbose and pixel_values is not None:
340
- image_bs = pixel_values.shape[0]
341
- print(f'dynamic ViT batch size: {image_bs}')
342
-
343
- query = question
344
-
345
- for num_patches in num_patches_list:
346
- tile_pos_identifiers = [f"<tile_{i}>" for i in range(1, num_patches)] + ["<tile_global_thumbnail>"]
347
- image_tokens = ''
348
- for tile_pos_identifier in tile_pos_identifiers:
349
- image_tokens += tile_pos_identifier + IMG_CONTEXT_TOKEN * self.num_image_token
350
- image_tokens = IMG_START_TOKEN + image_tokens + IMG_END_TOKEN
351
- query = query.replace('<image>', image_tokens, 1)
352
-
353
- model_inputs = tokenizer(query, return_tensors='pt')
354
- input_ids = model_inputs['input_ids'].cuda()
355
- attention_mask = model_inputs['attention_mask'].cuda()
356
- generation_config['eos_token_id'] = eos_token_id
357
- generation_output = self.generate(
358
- pixel_values=pixel_values,
359
- visual_features=visual_features,
360
- input_ids=input_ids,
361
- attention_mask=attention_mask,
362
- **generation_config
363
- )
364
- response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
365
- response = response.split(template.sep)[0].strip()
366
-
367
- query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
368
- query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
369
- if verbose:
370
- print(query_to_print, response)
371
- return response
372
-
373
- @torch.no_grad()
374
- def generate(
375
- self,
376
- pixel_values: Optional[torch.FloatTensor] = None,
377
- input_ids: Optional[torch.FloatTensor] = None,
378
- attention_mask: Optional[torch.LongTensor] = None,
379
- visual_features: Optional[torch.FloatTensor] = None,
380
- generation_config: Optional[GenerationConfig] = None,
381
- output_hidden_states: Optional[bool] = None,
382
- return_dict: Optional[bool] = None,
383
- **generate_kwargs,
384
- ) -> torch.LongTensor:
385
-
386
- # assert self.img_context_token_id is not None
387
- if pixel_values is not None:
388
- if visual_features is not None:
389
- vit_embeds = visual_features.cuda()
390
- vit_embeds = self.mlp1(vit_embeds)
391
- else:
392
- vit_embeds = self.extract_feature(pixel_values)
393
-
394
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
395
- B, N, C = input_embeds.shape
396
- input_embeds = input_embeds.reshape(B * N, C)
397
-
398
- input_ids = input_ids.reshape(B * N)
399
- selected = (input_ids == self.img_context_token_id)
400
- assert selected.sum() != 0
401
- input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
402
-
403
- input_embeds = input_embeds.reshape(B, N, C)
404
- else:
405
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
406
-
407
- outputs = self.language_model.generate(
408
- inputs_embeds=input_embeds,
409
- attention_mask=attention_mask,
410
- generation_config=generation_config,
411
- output_hidden_states=output_hidden_states,
412
- return_dict=return_dict,
413
- use_cache=True,
414
- **generate_kwargs,
415
- )
416
-
417
- return outputs
418
-
419
- def chat_batch(
420
- self,
421
- tokenizer,
422
- pixel_values_list,
423
- questions,
424
- generation_config,
425
- histories=None,
426
- return_histories=False,
427
- num_patches_lists=None,
428
- IMG_START_TOKEN='<|vision_start|>',
429
- IMG_END_TOKEN='<|vision_end|>',
430
- IMG_CONTEXT_TOKEN='<|vision_pad|>',
431
- verbose=False,
432
- visual_features_list=None
433
- ):
434
-
435
- if histories is None:
436
- histories = [[] for _ in questions]
437
-
438
- img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
439
- self.img_context_token_id = img_context_token_id
440
- # Get eos_token_id from the template
441
- template = get_conv_template(self.template)
442
- template.system_message = self.system_message
443
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
444
- generation_config['eos_token_id'] = eos_token_id
445
-
446
- queries = []
447
- input_ids_list = []
448
- attention_mask_list = []
449
-
450
- for idx in range(len(questions)):
451
- question = questions[idx]
452
- history = histories[idx]
453
- pixel_values = pixel_values_list[idx] if pixel_values_list[idx] is not None else None
454
- num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
455
-
456
- if not history and pixel_values is not None and '<image>' not in question:
457
- question = '<image>\n' + question
458
-
459
- template_i = get_conv_template(self.template)
460
- template_i.system_message = self.system_message
461
- for (old_question, old_answer) in history:
462
- template_i.append_message(template_i.roles[0], old_question)
463
- template_i.append_message(template_i.roles[1], old_answer)
464
- template_i.append_message(template_i.roles[0], question)
465
- template_i.append_message(template_i.roles[1], None)
466
- query = template_i.get_prompt()
467
- # Handle image tokens
468
- if pixel_values is not None:
469
- for num_patches in num_patches_list:
470
- tile_pos_identifiers = [f"<tile_{i}>" for i in range(1, num_patches)] + ["<tile_global_thumbnail>"]
471
- image_tokens = ''
472
- for tile_pos_identifier in tile_pos_identifiers:
473
- image_tokens += tile_pos_identifier + IMG_CONTEXT_TOKEN * self.num_image_token
474
- image_tokens = IMG_START_TOKEN + image_tokens + IMG_END_TOKEN
475
- query = query.replace('<image>', image_tokens, 1)
476
-
477
- model_inputs = tokenizer(
478
- query,
479
- return_tensors='pt',
480
- padding=True,
481
- truncation=True
482
- )
483
- input_ids = model_inputs['input_ids'].cuda()
484
- attention_mask = model_inputs['attention_mask'].cuda()
485
- input_ids_list.append(input_ids)
486
- attention_mask_list.append(attention_mask)
487
-
488
- # Call the generate function
489
- generation_output = self.generate_batch(
490
- pixel_values_list=pixel_values_list,
491
- input_ids_list=input_ids_list,
492
- attention_mask_list=attention_mask_list,
493
- **generation_config
494
- )
495
- responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
496
-
497
- outputs = []
498
- for idx, response in enumerate(responses):
499
- response = response.split(template.sep)[0].strip()
500
- histories[idx].append((questions[idx], response))
501
- outputs.append(response)
502
-
503
- if return_histories:
504
- return outputs, histories
505
- else:
506
- if verbose:
507
- for idx, query in enumerate(queries):
508
- query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
509
- query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
510
- print(query_to_print, outputs[idx])
511
- return outputs
512
-
513
- @torch.no_grad()
514
- def generate_batch(
515
- self,
516
- pixel_values_list: Optional[List[torch.FloatTensor]] = None,
517
- input_ids_list: Optional[List[torch.FloatTensor]] = None,
518
- attention_mask_list: Optional[List[torch.LongTensor]] = None,
519
- visual_features: Optional[torch.FloatTensor] = None,
520
- generation_config: Optional[GenerationConfig] = None,
521
- output_hidden_states: Optional[bool] = None,
522
- return_dict: Optional[bool] = None,
523
- **generate_kwargs,
524
- ) -> torch.LongTensor:
525
- input_embeds_list = []
526
- attention_mask_padded_list = []
527
-
528
- max_seq_length = max(input_ids.shape[1] for input_ids in input_ids_list)
529
-
530
- for pixel_values, input_ids, attention_mask in zip(pixel_values_list, input_ids_list, attention_mask_list):
531
- if pixel_values is not None:
532
- if visual_features is not None:
533
- vit_embeds = visual_features.cuda()
534
- vit_embeds = self.mlp1(vit_embeds)
535
- else:
536
- vit_embeds = self.extract_feature(pixel_values)
537
-
538
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
539
- B, N, C = input_embeds.shape
540
- input_embeds = input_embeds.reshape(B * N, C)
541
-
542
- input_ids = input_ids.reshape(B * N)
543
- selected = (input_ids == self.img_context_token_id)
544
- assert selected.sum() != 0, "No valid image context token IDs found."
545
- input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
546
-
547
- input_embeds = input_embeds.reshape(B, N, C)
548
- else:
549
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
550
-
551
- seq_length = input_embeds.shape[1]
552
- if seq_length < max_seq_length:
553
- pad_size = max_seq_length - seq_length
554
- input_embeds = F.pad(input_embeds, (0, 0, 0, pad_size))
555
- attention_mask = F.pad(attention_mask, (0, pad_size))
556
-
557
- input_embeds_list.append(input_embeds)
558
- attention_mask_padded_list.append(attention_mask)
559
-
560
- input_embeds = torch.cat(input_embeds_list, dim=0)
561
- attention_mask = torch.cat(attention_mask_padded_list, dim=0)
562
-
563
- outputs = self.language_model.generate(
564
- inputs_embeds=input_embeds,
565
- attention_mask=attention_mask,
566
- generation_config=generation_config,
567
- output_hidden_states=output_hidden_states,
568
- return_dict=return_dict,
569
- use_cache=True,
570
- **generate_kwargs,
571
- )
572
-
573
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/special_tokens_map.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|object_ref_start|>",
6
- "<|object_ref_end|>",
7
- "<|box_start|>",
8
- "<|box_end|>",
9
- "<|quad_start|>",
10
- "<|quad_end|>",
11
- "<|vision_start|>",
12
- "<|vision_end|>",
13
- "<|vision_pad|>",
14
- "<|image_pad|>",
15
- "<|video_pad|>"
16
- ],
17
- "eos_token": {
18
- "content": "<|im_end|>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- },
24
- "pad_token": {
25
- "content": "<|endoftext|>",
26
- "lstrip": false,
27
- "normalized": false,
28
- "rstrip": false,
29
- "single_word": false
30
- }
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/tokenizer_config.json DELETED
@@ -1,207 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_prefix_space": false,
4
- "added_tokens_decoder": {
5
- "151643": {
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "151644": {
14
- "content": "<|im_start|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "151645": {
22
- "content": "<|im_end|>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- },
29
- "151646": {
30
- "content": "<|object_ref_start|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- },
37
- "151647": {
38
- "content": "<|object_ref_end|>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false,
43
- "special": true
44
- },
45
- "151648": {
46
- "content": "<|box_start|>",
47
- "lstrip": false,
48
- "normalized": false,
49
- "rstrip": false,
50
- "single_word": false,
51
- "special": true
52
- },
53
- "151649": {
54
- "content": "<|box_end|>",
55
- "lstrip": false,
56
- "normalized": false,
57
- "rstrip": false,
58
- "single_word": false,
59
- "special": true
60
- },
61
- "151650": {
62
- "content": "<|quad_start|>",
63
- "lstrip": false,
64
- "normalized": false,
65
- "rstrip": false,
66
- "single_word": false,
67
- "special": true
68
- },
69
- "151651": {
70
- "content": "<|quad_end|>",
71
- "lstrip": false,
72
- "normalized": false,
73
- "rstrip": false,
74
- "single_word": false,
75
- "special": true
76
- },
77
- "151652": {
78
- "content": "<|vision_start|>",
79
- "lstrip": false,
80
- "normalized": false,
81
- "rstrip": false,
82
- "single_word": false,
83
- "special": true
84
- },
85
- "151653": {
86
- "content": "<|vision_end|>",
87
- "lstrip": false,
88
- "normalized": false,
89
- "rstrip": false,
90
- "single_word": false,
91
- "special": true
92
- },
93
- "151654": {
94
- "content": "<|vision_pad|>",
95
- "lstrip": false,
96
- "normalized": false,
97
- "rstrip": false,
98
- "single_word": false,
99
- "special": true
100
- },
101
- "151655": {
102
- "content": "<|image_pad|>",
103
- "lstrip": false,
104
- "normalized": false,
105
- "rstrip": false,
106
- "single_word": false,
107
- "special": true
108
- },
109
- "151656": {
110
- "content": "<|video_pad|>",
111
- "lstrip": false,
112
- "normalized": false,
113
- "rstrip": false,
114
- "single_word": false,
115
- "special": true
116
- },
117
- "151657": {
118
- "content": "<tool_call>",
119
- "lstrip": false,
120
- "normalized": false,
121
- "rstrip": false,
122
- "single_word": false,
123
- "special": false
124
- },
125
- "151658": {
126
- "content": "</tool_call>",
127
- "lstrip": false,
128
- "normalized": false,
129
- "rstrip": false,
130
- "single_word": false,
131
- "special": false
132
- },
133
- "151659": {
134
- "content": "<|fim_prefix|>",
135
- "lstrip": false,
136
- "normalized": false,
137
- "rstrip": false,
138
- "single_word": false,
139
- "special": false
140
- },
141
- "151660": {
142
- "content": "<|fim_middle|>",
143
- "lstrip": false,
144
- "normalized": false,
145
- "rstrip": false,
146
- "single_word": false,
147
- "special": false
148
- },
149
- "151661": {
150
- "content": "<|fim_suffix|>",
151
- "lstrip": false,
152
- "normalized": false,
153
- "rstrip": false,
154
- "single_word": false,
155
- "special": false
156
- },
157
- "151662": {
158
- "content": "<|fim_pad|>",
159
- "lstrip": false,
160
- "normalized": false,
161
- "rstrip": false,
162
- "single_word": false,
163
- "special": false
164
- },
165
- "151663": {
166
- "content": "<|repo_name|>",
167
- "lstrip": false,
168
- "normalized": false,
169
- "rstrip": false,
170
- "single_word": false,
171
- "special": false
172
- },
173
- "151664": {
174
- "content": "<|file_sep|>",
175
- "lstrip": false,
176
- "normalized": false,
177
- "rstrip": false,
178
- "single_word": false,
179
- "special": false
180
- }
181
- },
182
- "additional_special_tokens": [
183
- "<|im_start|>",
184
- "<|im_end|>",
185
- "<|object_ref_start|>",
186
- "<|object_ref_end|>",
187
- "<|box_start|>",
188
- "<|box_end|>",
189
- "<|quad_start|>",
190
- "<|quad_end|>",
191
- "<|vision_start|>",
192
- "<|vision_end|>",
193
- "<|vision_pad|>",
194
- "<|image_pad|>",
195
- "<|video_pad|>"
196
- ],
197
- "bos_token": null,
198
- "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
- "clean_up_tokenization_spaces": false,
200
- "eos_token": "<|im_end|>",
201
- "errors": "replace",
202
- "model_max_length": 131072,
203
- "pad_token": "<|endoftext|>",
204
- "split_special_tokens": false,
205
- "tokenizer_class": "Qwen2Tokenizer",
206
- "unk_token": null
207
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/vocab.json DELETED
The diff for this file is too large to render. See raw diff