Upload folder
Browse files- .gitattributes +6 -0
- blobs/119e9b54b859f352c00610e44376f0b82697b5eb +416 -0
- blobs/1f25bfd9ae759cb52f03d1814ef9d72aac0ed2eb +90 -0
- blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e +3 -0
- blobs/2352de0c55e3d6031a30d6b643b098889448b86d +35 -0
- blobs/268b330b31e43d4adb2ec97e6901b6a758280062 +0 -0
- blobs/277ca8cb1946ec18d42326f651938ffbb714ae7c +0 -0
- blobs/2e2b3fa6ce0369f00153ea102b23fb3fa4fd8a36 +119 -0
- blobs/312440223df86c67a0794043b915a0422a685971 +70 -0
- blobs/31349551d90c7606f325fe0f11bbb8bd5fa0d7c7 +0 -0
- blobs/33b7c097f9b0e730043d3707330811a85cffc15d +39 -0
- blobs/39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856 +3 -0
- blobs/4783fe10ac3adce15ac8f358ef5462739852c569 +0 -0
- blobs/61d1e516a33a15f1ba62a3065c8dd0efa15bc53d +7 -0
- blobs/746e9f9189d4f46b2e1c95ec60275e5e63076a1d +441 -0
- blobs/7a4a3ea2424c09fbe48d455aed1eaa94d9124835 +202 -0
- blobs/7da1e9cfdbb46b2ee0310ebb67a19c110799294f +752 -0
- blobs/80c7a2e27da60b66f80a04f3d710f99d9b9da929 +363 -0
- blobs/96805d61fbb9523fd27a09ab40451d04da09e9ba4b102341eac0184d8f82a0b1 +3 -0
- blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710 +3 -0
- blobs/a25c514d33074f195f0907523948db0428f78cda +115 -0
- blobs/a7b376e0a83f26eaa784db792ef61be7aac5494f +34 -0
- blobs/b2f155131ba1b6cb1664845ddde157100a30a2c5 +72 -0
- blobs/bbf7935cd9e3744c905911ff0971ec1fbebf17e1 +390 -0
- blobs/c47c173a6ee6ba9cdc52eafd51b7e6d679293b38 +1047 -0
- blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345 +3 -0
- blobs/e2a59915dd6a1c51ccb11be3addf4585fcf0840ac4f63f8e9fb629db58f8db6e +3 -0
- blobs/e543bcb3b7550029f450d28cf138706d7f9a5ef5 +4 -0
- blobs/f16150710559ce7304ebcfca7232f17c7791f6c5 +91 -0
- refs/main +1 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
blobs/96805d61fbb9523fd27a09ab40451d04da09e9ba4b102341eac0184d8f82a0b1 filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
blobs/e2a59915dd6a1c51ccb11be3addf4585fcf0840ac4f63f8e9fb629db58f8db6e filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
blobs/39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856 filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345 filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710 filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e filter=lfs diff=lfs merge=lfs -text
|
blobs/119e9b54b859f352c00610e44376f0b82697b5eb
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversation prompt templates.
|
| 3 |
+
|
| 4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
| 5 |
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
| 6 |
+
|
| 7 |
+
Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import dataclasses
|
| 11 |
+
from enum import IntEnum, auto
|
| 12 |
+
from typing import Dict, List, Tuple, Union
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class SeparatorStyle(IntEnum):
|
| 16 |
+
"""Separator styles."""
|
| 17 |
+
|
| 18 |
+
ADD_COLON_SINGLE = auto()
|
| 19 |
+
ADD_COLON_TWO = auto()
|
| 20 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
| 21 |
+
NO_COLON_SINGLE = auto()
|
| 22 |
+
NO_COLON_TWO = auto()
|
| 23 |
+
ADD_NEW_LINE_SINGLE = auto()
|
| 24 |
+
LLAMA2 = auto()
|
| 25 |
+
CHATGLM = auto()
|
| 26 |
+
CHATML = auto()
|
| 27 |
+
CHATINTERN = auto()
|
| 28 |
+
DOLLY = auto()
|
| 29 |
+
RWKV = auto()
|
| 30 |
+
PHOENIX = auto()
|
| 31 |
+
ROBIN = auto()
|
| 32 |
+
FALCON_CHAT = auto()
|
| 33 |
+
CHATGLM3 = auto()
|
| 34 |
+
INTERNVL_ZH = auto()
|
| 35 |
+
MPT = auto()
|
| 36 |
+
QIANFANVL = auto()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclasses.dataclass
|
| 40 |
+
class Conversation:
|
| 41 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
| 42 |
+
|
| 43 |
+
# The name of this template
|
| 44 |
+
name: str
|
| 45 |
+
# The template of the system prompt
|
| 46 |
+
system_template: str = '{system_message}'
|
| 47 |
+
# The system message
|
| 48 |
+
system_message: str = ''
|
| 49 |
+
# The names of two roles
|
| 50 |
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
| 51 |
+
# All messages. Each item is (role, message).
|
| 52 |
+
messages: List[List[str]] = ()
|
| 53 |
+
# The number of few shot examples
|
| 54 |
+
offset: int = 0
|
| 55 |
+
# The separator style and configurations
|
| 56 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
| 57 |
+
sep: str = '\n'
|
| 58 |
+
sep2: str = None
|
| 59 |
+
# Stop criteria (the default one is EOS token)
|
| 60 |
+
stop_str: Union[str, List[str]] = None
|
| 61 |
+
# Stops generation if meeting any token in this list
|
| 62 |
+
stop_token_ids: List[int] = None
|
| 63 |
+
|
| 64 |
+
def get_prompt(self) -> str:
|
| 65 |
+
"""Get the prompt for generation."""
|
| 66 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
| 67 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
| 68 |
+
ret = system_prompt + self.sep
|
| 69 |
+
for role, message in self.messages:
|
| 70 |
+
if message:
|
| 71 |
+
ret += role + ': ' + message + self.sep
|
| 72 |
+
else:
|
| 73 |
+
ret += role + ':'
|
| 74 |
+
return ret
|
| 75 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
| 76 |
+
seps = [self.sep, self.sep2]
|
| 77 |
+
ret = system_prompt + seps[0]
|
| 78 |
+
for i, (role, message) in enumerate(self.messages):
|
| 79 |
+
if message:
|
| 80 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 81 |
+
else:
|
| 82 |
+
ret += role + ':'
|
| 83 |
+
return ret
|
| 84 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
| 85 |
+
ret = system_prompt + self.sep
|
| 86 |
+
for role, message in self.messages:
|
| 87 |
+
if message:
|
| 88 |
+
ret += role + ': ' + message + self.sep
|
| 89 |
+
else:
|
| 90 |
+
ret += role + ': ' # must be end with a space
|
| 91 |
+
return ret
|
| 92 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
| 93 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
| 94 |
+
for role, message in self.messages:
|
| 95 |
+
if message:
|
| 96 |
+
ret += role + '\n' + message + self.sep
|
| 97 |
+
else:
|
| 98 |
+
ret += role + '\n'
|
| 99 |
+
return ret
|
| 100 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
| 101 |
+
ret = system_prompt
|
| 102 |
+
for role, message in self.messages:
|
| 103 |
+
if message:
|
| 104 |
+
ret += role + message + self.sep
|
| 105 |
+
else:
|
| 106 |
+
ret += role
|
| 107 |
+
return ret
|
| 108 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
| 109 |
+
seps = [self.sep, self.sep2]
|
| 110 |
+
ret = system_prompt
|
| 111 |
+
for i, (role, message) in enumerate(self.messages):
|
| 112 |
+
if message:
|
| 113 |
+
ret += role + message + seps[i % 2]
|
| 114 |
+
else:
|
| 115 |
+
ret += role
|
| 116 |
+
return ret
|
| 117 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
| 118 |
+
ret = system_prompt
|
| 119 |
+
for i, (role, message) in enumerate(self.messages):
|
| 120 |
+
if message:
|
| 121 |
+
ret += (
|
| 122 |
+
role
|
| 123 |
+
+ ': '
|
| 124 |
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
| 125 |
+
)
|
| 126 |
+
ret += '\n\n'
|
| 127 |
+
else:
|
| 128 |
+
ret += role + ':'
|
| 129 |
+
return ret
|
| 130 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
| 131 |
+
seps = [self.sep, self.sep2]
|
| 132 |
+
if self.system_message:
|
| 133 |
+
ret = system_prompt
|
| 134 |
+
else:
|
| 135 |
+
ret = '[INST] '
|
| 136 |
+
for i, (role, message) in enumerate(self.messages):
|
| 137 |
+
tag = self.roles[i % 2]
|
| 138 |
+
if message:
|
| 139 |
+
if i == 0:
|
| 140 |
+
ret += message + ' '
|
| 141 |
+
else:
|
| 142 |
+
ret += tag + ' ' + message + seps[i % 2]
|
| 143 |
+
else:
|
| 144 |
+
ret += tag
|
| 145 |
+
return ret
|
| 146 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
| 147 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
| 148 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
| 149 |
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
| 150 |
+
if system_prompt:
|
| 151 |
+
ret = system_prompt + self.sep
|
| 152 |
+
else:
|
| 153 |
+
ret = ''
|
| 154 |
+
|
| 155 |
+
for i, (role, message) in enumerate(self.messages):
|
| 156 |
+
if i % 2 == 0:
|
| 157 |
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
| 158 |
+
|
| 159 |
+
if message:
|
| 160 |
+
ret += f'{role}:{message}{self.sep}'
|
| 161 |
+
else:
|
| 162 |
+
ret += f'{role}:'
|
| 163 |
+
return ret
|
| 164 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
| 165 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
| 166 |
+
for role, message in self.messages:
|
| 167 |
+
if message:
|
| 168 |
+
ret += role + '\n' + message + self.sep + '\n'
|
| 169 |
+
else:
|
| 170 |
+
ret += role + '\n'
|
| 171 |
+
return ret
|
| 172 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
| 173 |
+
ret = ''
|
| 174 |
+
if self.system_message:
|
| 175 |
+
ret += system_prompt
|
| 176 |
+
for role, message in self.messages:
|
| 177 |
+
if message:
|
| 178 |
+
ret += role + '\n' + ' ' + message
|
| 179 |
+
else:
|
| 180 |
+
ret += role
|
| 181 |
+
return ret
|
| 182 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
| 183 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
| 184 |
+
seps = [self.sep, self.sep2]
|
| 185 |
+
ret = system_prompt
|
| 186 |
+
for i, (role, message) in enumerate(self.messages):
|
| 187 |
+
# if i % 2 == 0:
|
| 188 |
+
# ret += "<s>"
|
| 189 |
+
if message:
|
| 190 |
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
| 191 |
+
else:
|
| 192 |
+
ret += role + ':'
|
| 193 |
+
return ret
|
| 194 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
| 195 |
+
seps = [self.sep, self.sep2]
|
| 196 |
+
ret = system_prompt
|
| 197 |
+
for i, (role, message) in enumerate(self.messages):
|
| 198 |
+
if message:
|
| 199 |
+
ret += role + ':\n' + message + seps[i % 2]
|
| 200 |
+
if i % 2 == 1:
|
| 201 |
+
ret += '\n\n'
|
| 202 |
+
else:
|
| 203 |
+
ret += role + ':\n'
|
| 204 |
+
return ret
|
| 205 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
| 206 |
+
ret = system_prompt
|
| 207 |
+
for role, message in self.messages:
|
| 208 |
+
if message:
|
| 209 |
+
ret += role + ': ' + '<s>' + message + '</s>'
|
| 210 |
+
else:
|
| 211 |
+
ret += role + ': ' + '<s>'
|
| 212 |
+
return ret
|
| 213 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
| 214 |
+
ret = system_prompt + self.sep
|
| 215 |
+
for role, message in self.messages:
|
| 216 |
+
if message:
|
| 217 |
+
ret += role + ':\n' + message + self.sep
|
| 218 |
+
else:
|
| 219 |
+
ret += role + ':\n'
|
| 220 |
+
return ret
|
| 221 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
| 222 |
+
ret = ''
|
| 223 |
+
if self.system_message:
|
| 224 |
+
ret += system_prompt + self.sep
|
| 225 |
+
for role, message in self.messages:
|
| 226 |
+
if message:
|
| 227 |
+
ret += role + ': ' + message + self.sep
|
| 228 |
+
else:
|
| 229 |
+
ret += role + ':'
|
| 230 |
+
|
| 231 |
+
return ret
|
| 232 |
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
| 233 |
+
seps = [self.sep, self.sep2]
|
| 234 |
+
ret = self.system_message + seps[0]
|
| 235 |
+
for i, (role, message) in enumerate(self.messages):
|
| 236 |
+
if message:
|
| 237 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 238 |
+
else:
|
| 239 |
+
ret += role + ':'
|
| 240 |
+
return ret
|
| 241 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
| 242 |
+
ret = system_prompt + self.sep
|
| 243 |
+
for role, message in self.messages:
|
| 244 |
+
if message:
|
| 245 |
+
if type(message) is tuple:
|
| 246 |
+
message, _, _ = message
|
| 247 |
+
ret += role + message + self.sep
|
| 248 |
+
else:
|
| 249 |
+
ret += role
|
| 250 |
+
return ret
|
| 251 |
+
elif self.sep_style == SeparatorStyle.QIANFANVL:
|
| 252 |
+
ret = ''
|
| 253 |
+
if self.system_message:
|
| 254 |
+
ret = system_prompt + self.sep
|
| 255 |
+
for role, message in self.messages:
|
| 256 |
+
if message:
|
| 257 |
+
if type(message) is tuple:
|
| 258 |
+
message, _, _ = message
|
| 259 |
+
ret += role + message + self.sep
|
| 260 |
+
else:
|
| 261 |
+
ret += role
|
| 262 |
+
return ret
|
| 263 |
+
else:
|
| 264 |
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
| 265 |
+
|
| 266 |
+
def set_system_message(self, system_message: str):
|
| 267 |
+
"""Set the system message."""
|
| 268 |
+
self.system_message = system_message
|
| 269 |
+
|
| 270 |
+
def append_message(self, role: str, message: str):
|
| 271 |
+
"""Append a new message."""
|
| 272 |
+
self.messages.append([role, message])
|
| 273 |
+
|
| 274 |
+
def update_last_message(self, message: str):
|
| 275 |
+
"""Update the last output.
|
| 276 |
+
|
| 277 |
+
The last message is typically set to be None when constructing the prompt,
|
| 278 |
+
so we need to update it in-place after getting the response from a model.
|
| 279 |
+
"""
|
| 280 |
+
self.messages[-1][1] = message
|
| 281 |
+
|
| 282 |
+
def to_gradio_chatbot(self):
|
| 283 |
+
"""Convert the conversation to gradio chatbot format."""
|
| 284 |
+
ret = []
|
| 285 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
| 286 |
+
if i % 2 == 0:
|
| 287 |
+
ret.append([msg, None])
|
| 288 |
+
else:
|
| 289 |
+
ret[-1][-1] = msg
|
| 290 |
+
return ret
|
| 291 |
+
|
| 292 |
+
def to_openai_api_messages(self):
|
| 293 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
| 294 |
+
ret = [{'role': 'system', 'content': self.system_message}]
|
| 295 |
+
|
| 296 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
| 297 |
+
if i % 2 == 0:
|
| 298 |
+
ret.append({'role': 'user', 'content': msg})
|
| 299 |
+
else:
|
| 300 |
+
if msg is not None:
|
| 301 |
+
ret.append({'role': 'assistant', 'content': msg})
|
| 302 |
+
return ret
|
| 303 |
+
|
| 304 |
+
def copy(self):
|
| 305 |
+
return Conversation(
|
| 306 |
+
name=self.name,
|
| 307 |
+
system_template=self.system_template,
|
| 308 |
+
system_message=self.system_message,
|
| 309 |
+
roles=self.roles,
|
| 310 |
+
messages=[[x, y] for x, y in self.messages],
|
| 311 |
+
offset=self.offset,
|
| 312 |
+
sep_style=self.sep_style,
|
| 313 |
+
sep=self.sep,
|
| 314 |
+
sep2=self.sep2,
|
| 315 |
+
stop_str=self.stop_str,
|
| 316 |
+
stop_token_ids=self.stop_token_ids,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
def dict(self):
|
| 320 |
+
return {
|
| 321 |
+
'template_name': self.name,
|
| 322 |
+
'system_message': self.system_message,
|
| 323 |
+
'roles': self.roles,
|
| 324 |
+
'messages': self.messages,
|
| 325 |
+
'offset': self.offset,
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# A global registry for all conversation templates
|
| 330 |
+
conv_templates: Dict[str, Conversation] = {}
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
| 334 |
+
"""Register a new conversation template."""
|
| 335 |
+
if not override:
|
| 336 |
+
assert (
|
| 337 |
+
template.name not in conv_templates
|
| 338 |
+
), f'{template.name} has been registered.'
|
| 339 |
+
|
| 340 |
+
conv_templates[template.name] = template
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def get_conv_template(name: str) -> Conversation:
|
| 344 |
+
"""Get a conversation template."""
|
| 345 |
+
return conv_templates[name].copy()
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
| 349 |
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
| 350 |
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
| 351 |
+
# Therefore, they are completely equivalent during inference.
|
| 352 |
+
register_conv_template(
|
| 353 |
+
Conversation(
|
| 354 |
+
name='Hermes-2',
|
| 355 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 356 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 357 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 358 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 359 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 360 |
+
sep_style=SeparatorStyle.MPT,
|
| 361 |
+
sep='<|im_end|>',
|
| 362 |
+
stop_str='<|endoftext|>',
|
| 363 |
+
)
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
register_conv_template(
|
| 368 |
+
Conversation(
|
| 369 |
+
name='internlm2-chat',
|
| 370 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 371 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 372 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 373 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 374 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 375 |
+
sep_style=SeparatorStyle.MPT,
|
| 376 |
+
sep='<|im_end|>',
|
| 377 |
+
)
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
register_conv_template(
|
| 382 |
+
Conversation(
|
| 383 |
+
name='phi3-chat',
|
| 384 |
+
system_template='<|system|>\n{system_message}',
|
| 385 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 386 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、���华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 387 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 388 |
+
roles=('<|user|>\n', '<|assistant|>\n'),
|
| 389 |
+
sep_style=SeparatorStyle.MPT,
|
| 390 |
+
sep='<|end|>',
|
| 391 |
+
)
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
register_conv_template(
|
| 396 |
+
Conversation(
|
| 397 |
+
name='internvl2_5',
|
| 398 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 399 |
+
system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 400 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 401 |
+
sep_style=SeparatorStyle.MPT,
|
| 402 |
+
sep='<|im_end|>\n',
|
| 403 |
+
)
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
register_conv_template(
|
| 408 |
+
Conversation(
|
| 409 |
+
name='qianfanvl',
|
| 410 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 411 |
+
system_message='',
|
| 412 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 413 |
+
sep_style=SeparatorStyle.QIANFANVL,
|
| 414 |
+
sep='<|im_end|>\n',
|
| 415 |
+
)
|
| 416 |
+
)
|
blobs/1f25bfd9ae759cb52f03d1814ef9d72aac0ed2eb
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0].role == 'system' %}
|
| 4 |
+
{{- messages[0].content + '\n\n' }}
|
| 5 |
+
{%- endif %}
|
| 6 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 7 |
+
{%- for tool in tools %}
|
| 8 |
+
{{- "\n" }}
|
| 9 |
+
{{- tool | tojson }}
|
| 10 |
+
{%- endfor %}
|
| 11 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 12 |
+
{%- else %}
|
| 13 |
+
{%- if messages[0].role == 'system' %}
|
| 14 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
| 15 |
+
{%- endif %}
|
| 16 |
+
{%- endif %}
|
| 17 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
| 18 |
+
{%- for message in messages[::-1] %}
|
| 19 |
+
{%- set index = (messages|length - 1) - loop.index0 %}
|
| 20 |
+
{%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
|
| 21 |
+
{%- set ns.multi_step_tool = false %}
|
| 22 |
+
{%- set ns.last_query_index = index %}
|
| 23 |
+
{%- endif %}
|
| 24 |
+
{%- endfor %}
|
| 25 |
+
{%- for message in messages %}
|
| 26 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
| 27 |
+
{%- set append_think = (
|
| 28 |
+
enable_thinking is defined and enable_thinking and
|
| 29 |
+
message.role == "user" and loop.index0 == ns.last_query_index
|
| 30 |
+
) %}
|
| 31 |
+
{{- '<|im_start|>' + message.role + '\n' + message.content }}
|
| 32 |
+
{%- if append_think %}
|
| 33 |
+
{{- '\n<think>' }}
|
| 34 |
+
{%- endif %}
|
| 35 |
+
{{- '<|im_end|>' + '\n' }}
|
| 36 |
+
{%- elif message.role == "assistant" %}
|
| 37 |
+
{%- set content = message.content %}
|
| 38 |
+
{%- set reasoning_content = '' %}
|
| 39 |
+
{%- if message.reasoning_content is defined and message.reasoning_content is not none %}
|
| 40 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 41 |
+
{%- else %}
|
| 42 |
+
{%- if '</think>' in message.content %}
|
| 43 |
+
{%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
|
| 44 |
+
{%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 45 |
+
{%- endif %}
|
| 46 |
+
{%- endif %}
|
| 47 |
+
{%- if loop.index0 > ns.last_query_index %}
|
| 48 |
+
{%- if loop.last or (not loop.last and reasoning_content) %}
|
| 49 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
| 50 |
+
{%- else %}
|
| 51 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 52 |
+
{%- endif %}
|
| 53 |
+
{%- else %}
|
| 54 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 55 |
+
{%- endif %}
|
| 56 |
+
{%- if message.tool_calls %}
|
| 57 |
+
{%- for tool_call in message.tool_calls %}
|
| 58 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
| 59 |
+
{{- '\n' }}
|
| 60 |
+
{%- endif %}
|
| 61 |
+
{%- if tool_call.function %}
|
| 62 |
+
{%- set tool_call = tool_call.function %}
|
| 63 |
+
{%- endif %}
|
| 64 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 65 |
+
{{- tool_call.name }}
|
| 66 |
+
{{- '", "arguments": ' }}
|
| 67 |
+
{%- if tool_call.arguments is string %}
|
| 68 |
+
{{- tool_call.arguments }}
|
| 69 |
+
{%- else %}
|
| 70 |
+
{{- tool_call.arguments | tojson }}
|
| 71 |
+
{%- endif %}
|
| 72 |
+
{{- '}\n</tool_call>' }}
|
| 73 |
+
{%- endfor %}
|
| 74 |
+
{%- endif %}
|
| 75 |
+
{{- '<|im_end|>\n' }}
|
| 76 |
+
{%- elif message.role == "tool" %}
|
| 77 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 78 |
+
{{- '<|im_start|>user' }}
|
| 79 |
+
{%- endif %}
|
| 80 |
+
{{- '\n<tool_response>\n' }}
|
| 81 |
+
{{- message.content }}
|
| 82 |
+
{{- '\n</tool_response>' }}
|
| 83 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 84 |
+
{{- '<|im_end|>\n' }}
|
| 85 |
+
{%- endif %}
|
| 86 |
+
{%- endif %}
|
| 87 |
+
{%- endfor %}
|
| 88 |
+
{%- if add_generation_prompt %}
|
| 89 |
+
{{- '<|im_start|>assistant\n' }}
|
| 90 |
+
{%- endif %}
|
blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e
|
| 3 |
+
size 143158
|
blobs/2352de0c55e3d6031a30d6b643b098889448b86d
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Qianfan-OCR
|
| 2 |
+
Copyright (c) Baidu, Inc.
|
| 3 |
+
|
| 4 |
+
Unless otherwise noted, the contents of this repository are licensed under
|
| 5 |
+
the Apache License 2.0. See the LICENSE file for the full Apache 2.0 text.
|
| 6 |
+
|
| 7 |
+
The following files include code originating from InternVL by OpenGVLab and
|
| 8 |
+
are licensed under the MIT License:
|
| 9 |
+
|
| 10 |
+
- configuration_intern_vit.py
|
| 11 |
+
- configuration_internvl_chat.py
|
| 12 |
+
- modeling_intern_vit.py
|
| 13 |
+
- modeling_internvl_chat.py
|
| 14 |
+
|
| 15 |
+
MIT License
|
| 16 |
+
|
| 17 |
+
Copyright (c) 2024 OpenGVLab
|
| 18 |
+
|
| 19 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 20 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 21 |
+
in the Software without restriction, including without limitation the rights
|
| 22 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 23 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 24 |
+
furnished to do so, subject to the following conditions:
|
| 25 |
+
|
| 26 |
+
The above copyright notice and this permission notice shall be included in all
|
| 27 |
+
copies or substantial portions of the Software.
|
| 28 |
+
|
| 29 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 30 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 31 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 32 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 33 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 34 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 35 |
+
SOFTWARE.
|
blobs/268b330b31e43d4adb2ec97e6901b6a758280062
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
blobs/277ca8cb1946ec18d42326f651938ffbb714ae7c
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
blobs/2e2b3fa6ce0369f00153ea102b23fb3fa4fd8a36
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see NOTICE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
import os
|
| 7 |
+
from typing import Union
|
| 8 |
+
|
| 9 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 10 |
+
from transformers.utils import logging
|
| 11 |
+
|
| 12 |
+
logger = logging.get_logger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class InternVisionConfig(PretrainedConfig):
|
| 16 |
+
r"""
|
| 17 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
| 18 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
| 19 |
+
|
| 20 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 21 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 25 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
| 26 |
+
patch_size (`int`, *optional*, defaults to 14):
|
| 27 |
+
The size (resolution) of each patch.
|
| 28 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 29 |
+
The size (resolution) of each image.
|
| 30 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
| 31 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
| 32 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
| 33 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 34 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
| 35 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 36 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
| 37 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 38 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
| 39 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
| 40 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 41 |
+
Number of hidden layers in the Transformer encoder.
|
| 42 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
| 43 |
+
Whether to use flash attention mechanism.
|
| 44 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 45 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 46 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
| 47 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
| 48 |
+
The epsilon used by the layer normalization layers.
|
| 49 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
| 50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 51 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 52 |
+
Dropout rate for stochastic depth.
|
| 53 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 54 |
+
The dropout ratio for the attention probabilities.
|
| 55 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 56 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 57 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
| 58 |
+
A factor for layer scale.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
model_type = 'intern_vit_6b'
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
num_channels=3,
|
| 66 |
+
patch_size=14,
|
| 67 |
+
image_size=224,
|
| 68 |
+
qkv_bias=False,
|
| 69 |
+
hidden_size=3200,
|
| 70 |
+
num_attention_heads=25,
|
| 71 |
+
intermediate_size=12800,
|
| 72 |
+
qk_normalization=True,
|
| 73 |
+
num_hidden_layers=48,
|
| 74 |
+
use_flash_attn=True,
|
| 75 |
+
hidden_act='gelu',
|
| 76 |
+
norm_type='rms_norm',
|
| 77 |
+
layer_norm_eps=1e-6,
|
| 78 |
+
dropout=0.0,
|
| 79 |
+
drop_path_rate=0.0,
|
| 80 |
+
attention_dropout=0.0,
|
| 81 |
+
initializer_range=0.02,
|
| 82 |
+
initializer_factor=0.1,
|
| 83 |
+
**kwargs,
|
| 84 |
+
):
|
| 85 |
+
super().__init__(**kwargs)
|
| 86 |
+
|
| 87 |
+
self.hidden_size = hidden_size
|
| 88 |
+
self.intermediate_size = intermediate_size
|
| 89 |
+
self.dropout = dropout
|
| 90 |
+
self.drop_path_rate = drop_path_rate
|
| 91 |
+
self.num_hidden_layers = num_hidden_layers
|
| 92 |
+
self.num_attention_heads = num_attention_heads
|
| 93 |
+
self.num_channels = num_channels
|
| 94 |
+
self.patch_size = patch_size
|
| 95 |
+
self.image_size = image_size
|
| 96 |
+
self.initializer_range = initializer_range
|
| 97 |
+
self.initializer_factor = initializer_factor
|
| 98 |
+
self.attention_dropout = attention_dropout
|
| 99 |
+
self.layer_norm_eps = layer_norm_eps
|
| 100 |
+
self.hidden_act = hidden_act
|
| 101 |
+
self.norm_type = norm_type
|
| 102 |
+
self.qkv_bias = qkv_bias
|
| 103 |
+
self.qk_normalization = qk_normalization
|
| 104 |
+
self.use_flash_attn = use_flash_attn
|
| 105 |
+
|
| 106 |
+
@classmethod
|
| 107 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
| 108 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 109 |
+
|
| 110 |
+
if 'vision_config' in config_dict:
|
| 111 |
+
config_dict = config_dict['vision_config']
|
| 112 |
+
|
| 113 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
| 114 |
+
logger.warning(
|
| 115 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 116 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
return cls.from_dict(config_dict, **kwargs)
|
blobs/312440223df86c67a0794043b915a0422a685971
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_valid_kwargs_names": [
|
| 3 |
+
"do_convert_rgb",
|
| 4 |
+
"do_resize",
|
| 5 |
+
"size",
|
| 6 |
+
"size_divisor",
|
| 7 |
+
"default_to_square",
|
| 8 |
+
"resample",
|
| 9 |
+
"do_rescale",
|
| 10 |
+
"rescale_factor",
|
| 11 |
+
"do_normalize",
|
| 12 |
+
"image_mean",
|
| 13 |
+
"image_std",
|
| 14 |
+
"do_pad",
|
| 15 |
+
"do_center_crop",
|
| 16 |
+
"crop_size",
|
| 17 |
+
"data_format",
|
| 18 |
+
"input_data_format",
|
| 19 |
+
"device"
|
| 20 |
+
],
|
| 21 |
+
"crop_size": null,
|
| 22 |
+
"data_format": "channels_first",
|
| 23 |
+
"default_to_square": true,
|
| 24 |
+
"device": null,
|
| 25 |
+
"do_center_crop": null,
|
| 26 |
+
"do_convert_rgb": true,
|
| 27 |
+
"do_normalize": true,
|
| 28 |
+
"do_pad": null,
|
| 29 |
+
"do_rescale": true,
|
| 30 |
+
"do_resize": true,
|
| 31 |
+
"image_mean": [
|
| 32 |
+
0.48145466,
|
| 33 |
+
0.4578275,
|
| 34 |
+
0.40821073
|
| 35 |
+
],
|
| 36 |
+
"image_std": [
|
| 37 |
+
0.26862954,
|
| 38 |
+
0.26130258,
|
| 39 |
+
0.27577711
|
| 40 |
+
],
|
| 41 |
+
"input_data_format": null,
|
| 42 |
+
"model_valid_processing_keys": [
|
| 43 |
+
"do_convert_rgb",
|
| 44 |
+
"do_resize",
|
| 45 |
+
"size",
|
| 46 |
+
"size_divisor",
|
| 47 |
+
"default_to_square",
|
| 48 |
+
"resample",
|
| 49 |
+
"do_rescale",
|
| 50 |
+
"rescale_factor",
|
| 51 |
+
"do_normalize",
|
| 52 |
+
"image_mean",
|
| 53 |
+
"image_std",
|
| 54 |
+
"do_pad",
|
| 55 |
+
"do_center_crop",
|
| 56 |
+
"crop_size",
|
| 57 |
+
"data_format",
|
| 58 |
+
"input_data_format",
|
| 59 |
+
"device"
|
| 60 |
+
],
|
| 61 |
+
"processor_class": "InternVLProcessor",
|
| 62 |
+
"resample": 3,
|
| 63 |
+
"rescale_factor": 0.00392156862745098,
|
| 64 |
+
"size": {
|
| 65 |
+
"height": 384,
|
| 66 |
+
"width": 384
|
| 67 |
+
},
|
| 68 |
+
"size_divisor": null,
|
| 69 |
+
"video_processor_type": "InternVLVideoProcessor"
|
| 70 |
+
}
|
blobs/31349551d90c7606f325fe0f11bbb8bd5fa0d7c7
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
blobs/33b7c097f9b0e730043d3707330811a85cffc15d
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
examples/complex_document.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
examples/document.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
examples/invoice.jpg filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
blobs/39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856
|
| 3 |
+
size 248735
|
blobs/4783fe10ac3adce15ac8f358ef5462739852c569
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
blobs/61d1e516a33a15f1ba62a3065c8dd0efa15bc53d
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"do_sample": false,
|
| 4 |
+
"bos_token_id": 151643,
|
| 5 |
+
"eos_token_id": 151645,
|
| 6 |
+
"transformers_version": "4.55.0"
|
| 7 |
+
}
|
blobs/746e9f9189d4f46b2e1c95ec60275e5e63076a1d
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see NOTICE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from typing import Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from timm.layers import DropPath
|
| 14 |
+
from torch import nn
|
| 15 |
+
from transformers.activations import ACT2FN
|
| 16 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
| 17 |
+
BaseModelOutputWithPooling)
|
| 18 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 19 |
+
from transformers.utils import logging
|
| 20 |
+
|
| 21 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
| 25 |
+
from flash_attn.flash_attn_interface import \
|
| 26 |
+
flash_attn_varlen_qkvpacked_func
|
| 27 |
+
has_flash_attn = True
|
| 28 |
+
except:
|
| 29 |
+
print('FlashAttention2 is not installed.')
|
| 30 |
+
has_flash_attn = False
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class FlashAttention(nn.Module):
|
| 36 |
+
"""Implement the scaled dot product attention with softmax.
|
| 37 |
+
Arguments
|
| 38 |
+
---------
|
| 39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 41 |
+
runtime)
|
| 42 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 43 |
+
(default: 0.0)
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.softmax_scale = softmax_scale
|
| 49 |
+
self.dropout_p = attention_dropout
|
| 50 |
+
|
| 51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
| 52 |
+
max_s=None, need_weights=False):
|
| 53 |
+
"""Implements the multihead softmax attention.
|
| 54 |
+
Arguments
|
| 55 |
+
---------
|
| 56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 57 |
+
if unpadded: (nnz, 3, h, d)
|
| 58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
| 59 |
+
"""
|
| 60 |
+
assert not need_weights
|
| 61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 62 |
+
assert qkv.is_cuda
|
| 63 |
+
|
| 64 |
+
if cu_seqlens is None:
|
| 65 |
+
batch_size = qkv.shape[0]
|
| 66 |
+
seqlen = qkv.shape[1]
|
| 67 |
+
if key_padding_mask is None:
|
| 68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
| 69 |
+
max_s = seqlen
|
| 70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
| 71 |
+
device=qkv.device)
|
| 72 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 75 |
+
)
|
| 76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 77 |
+
else:
|
| 78 |
+
nheads = qkv.shape[-2]
|
| 79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
| 80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
| 81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
| 82 |
+
output_unpad = flash_attn_varlen_qkvpacked_func(
|
| 83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 85 |
+
)
|
| 86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
| 87 |
+
indices, batch_size, seqlen),
|
| 88 |
+
'b s (h d) -> b s h d', h=nheads)
|
| 89 |
+
else:
|
| 90 |
+
assert max_s is not None
|
| 91 |
+
output = flash_attn_varlen_qkvpacked_func(
|
| 92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return output, None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class InternRMSNorm(nn.Module):
|
| 100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 101 |
+
super().__init__()
|
| 102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 103 |
+
self.variance_epsilon = eps
|
| 104 |
+
|
| 105 |
+
def forward(self, hidden_states):
|
| 106 |
+
input_dtype = hidden_states.dtype
|
| 107 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 110 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
from apex.normalization import FusedRMSNorm
|
| 115 |
+
|
| 116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
| 117 |
+
|
| 118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
| 119 |
+
except ImportError:
|
| 120 |
+
# using the normal InternRMSNorm
|
| 121 |
+
pass
|
| 122 |
+
except Exception:
|
| 123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
NORM2FN = {
|
| 128 |
+
'rms_norm': InternRMSNorm,
|
| 129 |
+
'layer_norm': nn.LayerNorm,
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class InternVisionEmbeddings(nn.Module):
|
| 134 |
+
def __init__(self, config: InternVisionConfig):
|
| 135 |
+
super().__init__()
|
| 136 |
+
self.config = config
|
| 137 |
+
self.embed_dim = config.hidden_size
|
| 138 |
+
self.image_size = config.image_size
|
| 139 |
+
self.patch_size = config.patch_size
|
| 140 |
+
|
| 141 |
+
self.class_embedding = nn.Parameter(
|
| 142 |
+
torch.randn(1, 1, self.embed_dim),
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
self.patch_embedding = nn.Conv2d(
|
| 146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 150 |
+
self.num_positions = self.num_patches + 1
|
| 151 |
+
|
| 152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
| 153 |
+
|
| 154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
| 155 |
+
target_dtype = pos_embed.dtype
|
| 156 |
+
pos_embed = pos_embed.float().reshape(
|
| 157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
| 158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
| 159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
| 160 |
+
return pos_embed
|
| 161 |
+
|
| 162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
| 165 |
+
batch_size, _, height, width = patch_embeds.shape
|
| 166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
| 168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 169 |
+
position_embedding = torch.cat([
|
| 170 |
+
self.position_embedding[:, :1, :],
|
| 171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
| 172 |
+
], dim=1)
|
| 173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
| 174 |
+
return embeddings
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class InternAttention(nn.Module):
|
| 178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, config: InternVisionConfig):
|
| 181 |
+
super().__init__()
|
| 182 |
+
self.config = config
|
| 183 |
+
self.embed_dim = config.hidden_size
|
| 184 |
+
self.num_heads = config.num_attention_heads
|
| 185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
| 186 |
+
if config.use_flash_attn and not has_flash_attn:
|
| 187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
| 188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 190 |
+
raise ValueError(
|
| 191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
| 192 |
+
f' {self.num_heads}).'
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self.scale = self.head_dim ** -0.5
|
| 196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
| 197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
| 198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
| 199 |
+
|
| 200 |
+
self.qk_normalization = config.qk_normalization
|
| 201 |
+
|
| 202 |
+
if self.qk_normalization:
|
| 203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 205 |
+
|
| 206 |
+
if self.use_flash_attn:
|
| 207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
| 208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 209 |
+
|
| 210 |
+
def _naive_attn(self, x):
|
| 211 |
+
B, N, C = x.shape
|
| 212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
| 214 |
+
|
| 215 |
+
if self.qk_normalization:
|
| 216 |
+
B_, H_, N_, D_ = q.shape
|
| 217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 219 |
+
|
| 220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
| 221 |
+
attn = attn.softmax(dim=-1)
|
| 222 |
+
attn = self.attn_drop(attn)
|
| 223 |
+
|
| 224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 225 |
+
x = self.proj(x)
|
| 226 |
+
x = self.proj_drop(x)
|
| 227 |
+
return x
|
| 228 |
+
|
| 229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
| 230 |
+
qkv = self.qkv(x)
|
| 231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
| 232 |
+
|
| 233 |
+
if self.qk_normalization:
|
| 234 |
+
q, k, v = qkv.unbind(2)
|
| 235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
| 236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
| 237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 238 |
+
|
| 239 |
+
context, _ = self.inner_attn(
|
| 240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
| 241 |
+
)
|
| 242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
| 243 |
+
outs = self.proj_drop(outs)
|
| 244 |
+
return outs
|
| 245 |
+
|
| 246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
| 248 |
+
return x
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class InternMLP(nn.Module):
|
| 252 |
+
def __init__(self, config: InternVisionConfig):
|
| 253 |
+
super().__init__()
|
| 254 |
+
self.config = config
|
| 255 |
+
self.act = ACT2FN[config.hidden_act]
|
| 256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 258 |
+
|
| 259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 260 |
+
hidden_states = self.fc1(hidden_states)
|
| 261 |
+
hidden_states = self.act(hidden_states)
|
| 262 |
+
hidden_states = self.fc2(hidden_states)
|
| 263 |
+
return hidden_states
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class InternVisionEncoderLayer(nn.Module):
|
| 267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.embed_dim = config.hidden_size
|
| 270 |
+
self.intermediate_size = config.intermediate_size
|
| 271 |
+
self.norm_type = config.norm_type
|
| 272 |
+
|
| 273 |
+
self.attn = InternAttention(config)
|
| 274 |
+
self.mlp = InternMLP(config)
|
| 275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 277 |
+
|
| 278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 282 |
+
|
| 283 |
+
def forward(
|
| 284 |
+
self,
|
| 285 |
+
hidden_states: torch.Tensor,
|
| 286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
| 287 |
+
"""
|
| 288 |
+
Args:
|
| 289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 290 |
+
"""
|
| 291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
|
| 292 |
+
|
| 293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
|
| 294 |
+
|
| 295 |
+
return hidden_states
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class InternVisionEncoder(nn.Module):
|
| 299 |
+
"""
|
| 300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 301 |
+
[`InternEncoderLayer`].
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
config (`InternConfig`):
|
| 305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, config: InternVisionConfig):
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.config = config
|
| 311 |
+
|
| 312 |
+
n = config.num_hidden_layers
|
| 313 |
+
rate = float(config.drop_path_rate)
|
| 314 |
+
|
| 315 |
+
if n <= 1:
|
| 316 |
+
dpr = [0.0] * n
|
| 317 |
+
else:
|
| 318 |
+
step = rate / (n - 1)
|
| 319 |
+
dpr = [step * idx for idx in range(n)]
|
| 320 |
+
|
| 321 |
+
self.layers = nn.ModuleList([
|
| 322 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
| 323 |
+
self.gradient_checkpointing = True
|
| 324 |
+
|
| 325 |
+
def forward(
|
| 326 |
+
self,
|
| 327 |
+
inputs_embeds,
|
| 328 |
+
output_hidden_states: Optional[bool] = None,
|
| 329 |
+
return_dict: Optional[bool] = None,
|
| 330 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 331 |
+
r"""
|
| 332 |
+
Args:
|
| 333 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 334 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
| 335 |
+
output_hidden_states (`bool`, *optional*):
|
| 336 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 337 |
+
for more detail.
|
| 338 |
+
return_dict (`bool`, *optional*):
|
| 339 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 340 |
+
"""
|
| 341 |
+
output_hidden_states = (
|
| 342 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 343 |
+
)
|
| 344 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 345 |
+
|
| 346 |
+
encoder_states = () if output_hidden_states else None
|
| 347 |
+
hidden_states = inputs_embeds
|
| 348 |
+
|
| 349 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 350 |
+
if output_hidden_states:
|
| 351 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 352 |
+
if self.gradient_checkpointing and self.training:
|
| 353 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 354 |
+
encoder_layer,
|
| 355 |
+
hidden_states)
|
| 356 |
+
else:
|
| 357 |
+
layer_outputs = encoder_layer(
|
| 358 |
+
hidden_states,
|
| 359 |
+
)
|
| 360 |
+
hidden_states = layer_outputs
|
| 361 |
+
|
| 362 |
+
if output_hidden_states:
|
| 363 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 364 |
+
|
| 365 |
+
if not return_dict:
|
| 366 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
| 367 |
+
return BaseModelOutput(
|
| 368 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class InternVisionModel(PreTrainedModel):
|
| 373 |
+
main_input_name = 'pixel_values'
|
| 374 |
+
_supports_flash_attn_2 = True
|
| 375 |
+
supports_gradient_checkpointing = True
|
| 376 |
+
config_class = InternVisionConfig
|
| 377 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
| 378 |
+
# support transformers 4.51.+
|
| 379 |
+
_tp_plan = ''
|
| 380 |
+
|
| 381 |
+
def __init__(self, config: InternVisionConfig):
|
| 382 |
+
super().__init__(config)
|
| 383 |
+
self.config = config
|
| 384 |
+
|
| 385 |
+
self.embeddings = InternVisionEmbeddings(config)
|
| 386 |
+
self.encoder = InternVisionEncoder(config)
|
| 387 |
+
|
| 388 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
| 389 |
+
pos_emb = self.embeddings.position_embedding
|
| 390 |
+
_, num_positions, embed_dim = pos_emb.shape
|
| 391 |
+
cls_emb = pos_emb[:, :1, :]
|
| 392 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
| 393 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
| 394 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
| 395 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
| 396 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
| 397 |
+
self.embeddings.image_size = new_size
|
| 398 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
| 399 |
+
|
| 400 |
+
def get_input_embeddings(self):
|
| 401 |
+
return self.embeddings
|
| 402 |
+
|
| 403 |
+
def forward(
|
| 404 |
+
self,
|
| 405 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 406 |
+
output_hidden_states: Optional[bool] = None,
|
| 407 |
+
return_dict: Optional[bool] = None,
|
| 408 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
| 409 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 410 |
+
output_hidden_states = (
|
| 411 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 412 |
+
)
|
| 413 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 414 |
+
|
| 415 |
+
if pixel_values is None and pixel_embeds is None:
|
| 416 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
| 417 |
+
|
| 418 |
+
if pixel_embeds is not None:
|
| 419 |
+
hidden_states = pixel_embeds
|
| 420 |
+
else:
|
| 421 |
+
if len(pixel_values.shape) == 4:
|
| 422 |
+
hidden_states = self.embeddings(pixel_values)
|
| 423 |
+
else:
|
| 424 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
| 425 |
+
encoder_outputs = self.encoder(
|
| 426 |
+
inputs_embeds=hidden_states,
|
| 427 |
+
output_hidden_states=output_hidden_states,
|
| 428 |
+
return_dict=return_dict,
|
| 429 |
+
)
|
| 430 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
| 431 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 432 |
+
|
| 433 |
+
if not return_dict:
|
| 434 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 435 |
+
|
| 436 |
+
return BaseModelOutputWithPooling(
|
| 437 |
+
last_hidden_state=last_hidden_state,
|
| 438 |
+
pooler_output=pooled_output,
|
| 439 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 440 |
+
attentions=encoder_outputs.attentions,
|
| 441 |
+
)
|
blobs/7a4a3ea2424c09fbe48d455aed1eaa94d9124835
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
blobs/7da1e9cfdbb46b2ee0310ebb67a19c110799294f
ADDED
|
@@ -0,0 +1,752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 9464978432
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"language_model.lm_head.weight": "model-00001-of-00002.safetensors",
|
| 7 |
+
"language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
| 8 |
+
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 9 |
+
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 10 |
+
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 11 |
+
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 12 |
+
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 13 |
+
"language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 14 |
+
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 15 |
+
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 16 |
+
"language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 17 |
+
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 18 |
+
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 19 |
+
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 20 |
+
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 21 |
+
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 22 |
+
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 23 |
+
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 24 |
+
"language_model.model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 25 |
+
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 26 |
+
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 27 |
+
"language_model.model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 28 |
+
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 29 |
+
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 30 |
+
"language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 31 |
+
"language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 32 |
+
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 33 |
+
"language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 34 |
+
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 35 |
+
"language_model.model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 36 |
+
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 37 |
+
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 38 |
+
"language_model.model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 39 |
+
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 40 |
+
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 41 |
+
"language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 42 |
+
"language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 43 |
+
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 44 |
+
"language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 45 |
+
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 46 |
+
"language_model.model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 47 |
+
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 48 |
+
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 49 |
+
"language_model.model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 50 |
+
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 51 |
+
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 52 |
+
"language_model.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 53 |
+
"language_model.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 54 |
+
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 55 |
+
"language_model.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 56 |
+
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 57 |
+
"language_model.model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 58 |
+
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 59 |
+
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 60 |
+
"language_model.model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 61 |
+
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 62 |
+
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 63 |
+
"language_model.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 64 |
+
"language_model.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 65 |
+
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 66 |
+
"language_model.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 67 |
+
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 68 |
+
"language_model.model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 69 |
+
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 70 |
+
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 71 |
+
"language_model.model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 72 |
+
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 73 |
+
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 74 |
+
"language_model.model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 75 |
+
"language_model.model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 76 |
+
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 77 |
+
"language_model.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 78 |
+
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 79 |
+
"language_model.model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 80 |
+
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 81 |
+
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 82 |
+
"language_model.model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 83 |
+
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 84 |
+
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 85 |
+
"language_model.model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 86 |
+
"language_model.model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 87 |
+
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 88 |
+
"language_model.model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 89 |
+
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 90 |
+
"language_model.model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 91 |
+
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 92 |
+
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 93 |
+
"language_model.model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 94 |
+
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 95 |
+
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 96 |
+
"language_model.model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 97 |
+
"language_model.model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 98 |
+
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 99 |
+
"language_model.model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 100 |
+
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 101 |
+
"language_model.model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 102 |
+
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 103 |
+
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 104 |
+
"language_model.model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 105 |
+
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 106 |
+
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 107 |
+
"language_model.model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 108 |
+
"language_model.model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 109 |
+
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 110 |
+
"language_model.model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 111 |
+
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 112 |
+
"language_model.model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 113 |
+
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 114 |
+
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 115 |
+
"language_model.model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 116 |
+
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 117 |
+
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 118 |
+
"language_model.model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 119 |
+
"language_model.model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 120 |
+
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 121 |
+
"language_model.model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 122 |
+
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 123 |
+
"language_model.model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 124 |
+
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 125 |
+
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 126 |
+
"language_model.model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 127 |
+
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 128 |
+
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 129 |
+
"language_model.model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 130 |
+
"language_model.model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 131 |
+
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 132 |
+
"language_model.model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 133 |
+
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 134 |
+
"language_model.model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 135 |
+
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 136 |
+
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 137 |
+
"language_model.model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 138 |
+
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 139 |
+
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 140 |
+
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 141 |
+
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 142 |
+
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 143 |
+
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 144 |
+
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 145 |
+
"language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 146 |
+
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 147 |
+
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 148 |
+
"language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 149 |
+
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 150 |
+
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 151 |
+
"language_model.model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 152 |
+
"language_model.model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 153 |
+
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 154 |
+
"language_model.model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 155 |
+
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 156 |
+
"language_model.model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 157 |
+
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 158 |
+
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 159 |
+
"language_model.model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 160 |
+
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 161 |
+
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 162 |
+
"language_model.model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 163 |
+
"language_model.model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 164 |
+
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 165 |
+
"language_model.model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 166 |
+
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 167 |
+
"language_model.model.layers.21.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 168 |
+
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 169 |
+
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 170 |
+
"language_model.model.layers.21.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 171 |
+
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 172 |
+
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 173 |
+
"language_model.model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 174 |
+
"language_model.model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 175 |
+
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 176 |
+
"language_model.model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 177 |
+
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 178 |
+
"language_model.model.layers.22.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 179 |
+
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 180 |
+
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 181 |
+
"language_model.model.layers.22.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 182 |
+
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 183 |
+
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 184 |
+
"language_model.model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 185 |
+
"language_model.model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 186 |
+
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 187 |
+
"language_model.model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 188 |
+
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 189 |
+
"language_model.model.layers.23.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 190 |
+
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 191 |
+
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 192 |
+
"language_model.model.layers.23.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 193 |
+
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 194 |
+
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 195 |
+
"language_model.model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 196 |
+
"language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 197 |
+
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 198 |
+
"language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 199 |
+
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 200 |
+
"language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 201 |
+
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 202 |
+
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 203 |
+
"language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 204 |
+
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 205 |
+
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 206 |
+
"language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 207 |
+
"language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 208 |
+
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 209 |
+
"language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 210 |
+
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 211 |
+
"language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 212 |
+
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 213 |
+
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 214 |
+
"language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 215 |
+
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 216 |
+
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 217 |
+
"language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 218 |
+
"language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 219 |
+
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 220 |
+
"language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 221 |
+
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 222 |
+
"language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 223 |
+
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 224 |
+
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 225 |
+
"language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 226 |
+
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 227 |
+
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 228 |
+
"language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 229 |
+
"language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 230 |
+
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 231 |
+
"language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 232 |
+
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 233 |
+
"language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 234 |
+
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 235 |
+
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 236 |
+
"language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 237 |
+
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 238 |
+
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 239 |
+
"language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 240 |
+
"language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 241 |
+
"language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 242 |
+
"language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 243 |
+
"language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 244 |
+
"language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 245 |
+
"language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 246 |
+
"language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 247 |
+
"language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 248 |
+
"language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 249 |
+
"language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 250 |
+
"language_model.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 251 |
+
"language_model.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 252 |
+
"language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 253 |
+
"language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 254 |
+
"language_model.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 255 |
+
"language_model.model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 256 |
+
"language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 257 |
+
"language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 258 |
+
"language_model.model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 259 |
+
"language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 260 |
+
"language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 261 |
+
"language_model.model.layers.3.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 262 |
+
"language_model.model.layers.3.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 263 |
+
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 264 |
+
"language_model.model.layers.3.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 265 |
+
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 266 |
+
"language_model.model.layers.3.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 267 |
+
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 268 |
+
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 269 |
+
"language_model.model.layers.3.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 270 |
+
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 271 |
+
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 272 |
+
"language_model.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 273 |
+
"language_model.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 274 |
+
"language_model.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 275 |
+
"language_model.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 276 |
+
"language_model.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 277 |
+
"language_model.model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 278 |
+
"language_model.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 279 |
+
"language_model.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 280 |
+
"language_model.model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 281 |
+
"language_model.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 282 |
+
"language_model.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 283 |
+
"language_model.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 284 |
+
"language_model.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 285 |
+
"language_model.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 286 |
+
"language_model.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 287 |
+
"language_model.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 288 |
+
"language_model.model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 289 |
+
"language_model.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 290 |
+
"language_model.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 291 |
+
"language_model.model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 292 |
+
"language_model.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 293 |
+
"language_model.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 294 |
+
"language_model.model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 295 |
+
"language_model.model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 296 |
+
"language_model.model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 297 |
+
"language_model.model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 298 |
+
"language_model.model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 299 |
+
"language_model.model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 300 |
+
"language_model.model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 301 |
+
"language_model.model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 302 |
+
"language_model.model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 303 |
+
"language_model.model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 304 |
+
"language_model.model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 305 |
+
"language_model.model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 306 |
+
"language_model.model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 307 |
+
"language_model.model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 308 |
+
"language_model.model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 309 |
+
"language_model.model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 310 |
+
"language_model.model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 311 |
+
"language_model.model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 312 |
+
"language_model.model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 313 |
+
"language_model.model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 314 |
+
"language_model.model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 315 |
+
"language_model.model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 316 |
+
"language_model.model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 317 |
+
"language_model.model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 318 |
+
"language_model.model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 319 |
+
"language_model.model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 320 |
+
"language_model.model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 321 |
+
"language_model.model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 322 |
+
"language_model.model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 323 |
+
"language_model.model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 324 |
+
"language_model.model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 325 |
+
"language_model.model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 326 |
+
"language_model.model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 327 |
+
"language_model.model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 328 |
+
"language_model.model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 329 |
+
"language_model.model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 330 |
+
"language_model.model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 331 |
+
"language_model.model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 332 |
+
"language_model.model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 333 |
+
"language_model.model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 334 |
+
"language_model.model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 335 |
+
"language_model.model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 336 |
+
"language_model.model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 337 |
+
"language_model.model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 338 |
+
"language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 339 |
+
"language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 340 |
+
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 341 |
+
"language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 342 |
+
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 343 |
+
"language_model.model.layers.4.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 344 |
+
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 345 |
+
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 346 |
+
"language_model.model.layers.4.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 347 |
+
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 348 |
+
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 349 |
+
"language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 350 |
+
"language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 351 |
+
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 352 |
+
"language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 353 |
+
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 354 |
+
"language_model.model.layers.5.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 355 |
+
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 356 |
+
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 357 |
+
"language_model.model.layers.5.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 358 |
+
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 359 |
+
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 360 |
+
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 361 |
+
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 362 |
+
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 363 |
+
"language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 364 |
+
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 365 |
+
"language_model.model.layers.6.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 366 |
+
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 367 |
+
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 368 |
+
"language_model.model.layers.6.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 369 |
+
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 370 |
+
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 371 |
+
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 372 |
+
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 373 |
+
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 374 |
+
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 375 |
+
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 376 |
+
"language_model.model.layers.7.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 377 |
+
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 378 |
+
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 379 |
+
"language_model.model.layers.7.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 380 |
+
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 381 |
+
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 382 |
+
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 383 |
+
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 384 |
+
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 385 |
+
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 386 |
+
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 387 |
+
"language_model.model.layers.8.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 388 |
+
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 389 |
+
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 390 |
+
"language_model.model.layers.8.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 391 |
+
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 392 |
+
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 393 |
+
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 394 |
+
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 395 |
+
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 396 |
+
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 397 |
+
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 398 |
+
"language_model.model.layers.9.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 399 |
+
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 400 |
+
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 401 |
+
"language_model.model.layers.9.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 402 |
+
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 403 |
+
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 404 |
+
"language_model.model.norm.weight": "model-00002-of-00002.safetensors",
|
| 405 |
+
"mlp1.0.bias": "model-00002-of-00002.safetensors",
|
| 406 |
+
"mlp1.0.weight": "model-00002-of-00002.safetensors",
|
| 407 |
+
"mlp1.1.bias": "model-00002-of-00002.safetensors",
|
| 408 |
+
"mlp1.1.weight": "model-00002-of-00002.safetensors",
|
| 409 |
+
"mlp1.3.bias": "model-00002-of-00002.safetensors",
|
| 410 |
+
"mlp1.3.weight": "model-00002-of-00002.safetensors",
|
| 411 |
+
"vision_model.embeddings.class_embedding": "model-00002-of-00002.safetensors",
|
| 412 |
+
"vision_model.embeddings.patch_embedding.bias": "model-00002-of-00002.safetensors",
|
| 413 |
+
"vision_model.embeddings.patch_embedding.weight": "model-00002-of-00002.safetensors",
|
| 414 |
+
"vision_model.embeddings.position_embedding": "model-00002-of-00002.safetensors",
|
| 415 |
+
"vision_model.encoder.layers.0.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 416 |
+
"vision_model.encoder.layers.0.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 417 |
+
"vision_model.encoder.layers.0.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 418 |
+
"vision_model.encoder.layers.0.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 419 |
+
"vision_model.encoder.layers.0.ls1": "model-00002-of-00002.safetensors",
|
| 420 |
+
"vision_model.encoder.layers.0.ls2": "model-00002-of-00002.safetensors",
|
| 421 |
+
"vision_model.encoder.layers.0.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 422 |
+
"vision_model.encoder.layers.0.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 423 |
+
"vision_model.encoder.layers.0.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 424 |
+
"vision_model.encoder.layers.0.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 425 |
+
"vision_model.encoder.layers.0.norm1.bias": "model-00002-of-00002.safetensors",
|
| 426 |
+
"vision_model.encoder.layers.0.norm1.weight": "model-00002-of-00002.safetensors",
|
| 427 |
+
"vision_model.encoder.layers.0.norm2.bias": "model-00002-of-00002.safetensors",
|
| 428 |
+
"vision_model.encoder.layers.0.norm2.weight": "model-00002-of-00002.safetensors",
|
| 429 |
+
"vision_model.encoder.layers.1.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 430 |
+
"vision_model.encoder.layers.1.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 431 |
+
"vision_model.encoder.layers.1.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 432 |
+
"vision_model.encoder.layers.1.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 433 |
+
"vision_model.encoder.layers.1.ls1": "model-00002-of-00002.safetensors",
|
| 434 |
+
"vision_model.encoder.layers.1.ls2": "model-00002-of-00002.safetensors",
|
| 435 |
+
"vision_model.encoder.layers.1.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 436 |
+
"vision_model.encoder.layers.1.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 437 |
+
"vision_model.encoder.layers.1.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 438 |
+
"vision_model.encoder.layers.1.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 439 |
+
"vision_model.encoder.layers.1.norm1.bias": "model-00002-of-00002.safetensors",
|
| 440 |
+
"vision_model.encoder.layers.1.norm1.weight": "model-00002-of-00002.safetensors",
|
| 441 |
+
"vision_model.encoder.layers.1.norm2.bias": "model-00002-of-00002.safetensors",
|
| 442 |
+
"vision_model.encoder.layers.1.norm2.weight": "model-00002-of-00002.safetensors",
|
| 443 |
+
"vision_model.encoder.layers.10.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 444 |
+
"vision_model.encoder.layers.10.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 445 |
+
"vision_model.encoder.layers.10.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 446 |
+
"vision_model.encoder.layers.10.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 447 |
+
"vision_model.encoder.layers.10.ls1": "model-00002-of-00002.safetensors",
|
| 448 |
+
"vision_model.encoder.layers.10.ls2": "model-00002-of-00002.safetensors",
|
| 449 |
+
"vision_model.encoder.layers.10.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 450 |
+
"vision_model.encoder.layers.10.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 451 |
+
"vision_model.encoder.layers.10.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 452 |
+
"vision_model.encoder.layers.10.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 453 |
+
"vision_model.encoder.layers.10.norm1.bias": "model-00002-of-00002.safetensors",
|
| 454 |
+
"vision_model.encoder.layers.10.norm1.weight": "model-00002-of-00002.safetensors",
|
| 455 |
+
"vision_model.encoder.layers.10.norm2.bias": "model-00002-of-00002.safetensors",
|
| 456 |
+
"vision_model.encoder.layers.10.norm2.weight": "model-00002-of-00002.safetensors",
|
| 457 |
+
"vision_model.encoder.layers.11.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 458 |
+
"vision_model.encoder.layers.11.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 459 |
+
"vision_model.encoder.layers.11.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 460 |
+
"vision_model.encoder.layers.11.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 461 |
+
"vision_model.encoder.layers.11.ls1": "model-00002-of-00002.safetensors",
|
| 462 |
+
"vision_model.encoder.layers.11.ls2": "model-00002-of-00002.safetensors",
|
| 463 |
+
"vision_model.encoder.layers.11.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 464 |
+
"vision_model.encoder.layers.11.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 465 |
+
"vision_model.encoder.layers.11.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 466 |
+
"vision_model.encoder.layers.11.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 467 |
+
"vision_model.encoder.layers.11.norm1.bias": "model-00002-of-00002.safetensors",
|
| 468 |
+
"vision_model.encoder.layers.11.norm1.weight": "model-00002-of-00002.safetensors",
|
| 469 |
+
"vision_model.encoder.layers.11.norm2.bias": "model-00002-of-00002.safetensors",
|
| 470 |
+
"vision_model.encoder.layers.11.norm2.weight": "model-00002-of-00002.safetensors",
|
| 471 |
+
"vision_model.encoder.layers.12.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 472 |
+
"vision_model.encoder.layers.12.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 473 |
+
"vision_model.encoder.layers.12.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 474 |
+
"vision_model.encoder.layers.12.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 475 |
+
"vision_model.encoder.layers.12.ls1": "model-00002-of-00002.safetensors",
|
| 476 |
+
"vision_model.encoder.layers.12.ls2": "model-00002-of-00002.safetensors",
|
| 477 |
+
"vision_model.encoder.layers.12.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 478 |
+
"vision_model.encoder.layers.12.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 479 |
+
"vision_model.encoder.layers.12.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 480 |
+
"vision_model.encoder.layers.12.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 481 |
+
"vision_model.encoder.layers.12.norm1.bias": "model-00002-of-00002.safetensors",
|
| 482 |
+
"vision_model.encoder.layers.12.norm1.weight": "model-00002-of-00002.safetensors",
|
| 483 |
+
"vision_model.encoder.layers.12.norm2.bias": "model-00002-of-00002.safetensors",
|
| 484 |
+
"vision_model.encoder.layers.12.norm2.weight": "model-00002-of-00002.safetensors",
|
| 485 |
+
"vision_model.encoder.layers.13.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 486 |
+
"vision_model.encoder.layers.13.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 487 |
+
"vision_model.encoder.layers.13.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 488 |
+
"vision_model.encoder.layers.13.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 489 |
+
"vision_model.encoder.layers.13.ls1": "model-00002-of-00002.safetensors",
|
| 490 |
+
"vision_model.encoder.layers.13.ls2": "model-00002-of-00002.safetensors",
|
| 491 |
+
"vision_model.encoder.layers.13.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 492 |
+
"vision_model.encoder.layers.13.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 493 |
+
"vision_model.encoder.layers.13.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 494 |
+
"vision_model.encoder.layers.13.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 495 |
+
"vision_model.encoder.layers.13.norm1.bias": "model-00002-of-00002.safetensors",
|
| 496 |
+
"vision_model.encoder.layers.13.norm1.weight": "model-00002-of-00002.safetensors",
|
| 497 |
+
"vision_model.encoder.layers.13.norm2.bias": "model-00002-of-00002.safetensors",
|
| 498 |
+
"vision_model.encoder.layers.13.norm2.weight": "model-00002-of-00002.safetensors",
|
| 499 |
+
"vision_model.encoder.layers.14.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 500 |
+
"vision_model.encoder.layers.14.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 501 |
+
"vision_model.encoder.layers.14.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 502 |
+
"vision_model.encoder.layers.14.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 503 |
+
"vision_model.encoder.layers.14.ls1": "model-00002-of-00002.safetensors",
|
| 504 |
+
"vision_model.encoder.layers.14.ls2": "model-00002-of-00002.safetensors",
|
| 505 |
+
"vision_model.encoder.layers.14.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 506 |
+
"vision_model.encoder.layers.14.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 507 |
+
"vision_model.encoder.layers.14.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 508 |
+
"vision_model.encoder.layers.14.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 509 |
+
"vision_model.encoder.layers.14.norm1.bias": "model-00002-of-00002.safetensors",
|
| 510 |
+
"vision_model.encoder.layers.14.norm1.weight": "model-00002-of-00002.safetensors",
|
| 511 |
+
"vision_model.encoder.layers.14.norm2.bias": "model-00002-of-00002.safetensors",
|
| 512 |
+
"vision_model.encoder.layers.14.norm2.weight": "model-00002-of-00002.safetensors",
|
| 513 |
+
"vision_model.encoder.layers.15.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 514 |
+
"vision_model.encoder.layers.15.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 515 |
+
"vision_model.encoder.layers.15.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 516 |
+
"vision_model.encoder.layers.15.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 517 |
+
"vision_model.encoder.layers.15.ls1": "model-00002-of-00002.safetensors",
|
| 518 |
+
"vision_model.encoder.layers.15.ls2": "model-00002-of-00002.safetensors",
|
| 519 |
+
"vision_model.encoder.layers.15.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 520 |
+
"vision_model.encoder.layers.15.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 521 |
+
"vision_model.encoder.layers.15.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 522 |
+
"vision_model.encoder.layers.15.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 523 |
+
"vision_model.encoder.layers.15.norm1.bias": "model-00002-of-00002.safetensors",
|
| 524 |
+
"vision_model.encoder.layers.15.norm1.weight": "model-00002-of-00002.safetensors",
|
| 525 |
+
"vision_model.encoder.layers.15.norm2.bias": "model-00002-of-00002.safetensors",
|
| 526 |
+
"vision_model.encoder.layers.15.norm2.weight": "model-00002-of-00002.safetensors",
|
| 527 |
+
"vision_model.encoder.layers.16.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 528 |
+
"vision_model.encoder.layers.16.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 529 |
+
"vision_model.encoder.layers.16.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 530 |
+
"vision_model.encoder.layers.16.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 531 |
+
"vision_model.encoder.layers.16.ls1": "model-00002-of-00002.safetensors",
|
| 532 |
+
"vision_model.encoder.layers.16.ls2": "model-00002-of-00002.safetensors",
|
| 533 |
+
"vision_model.encoder.layers.16.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 534 |
+
"vision_model.encoder.layers.16.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 535 |
+
"vision_model.encoder.layers.16.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 536 |
+
"vision_model.encoder.layers.16.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 537 |
+
"vision_model.encoder.layers.16.norm1.bias": "model-00002-of-00002.safetensors",
|
| 538 |
+
"vision_model.encoder.layers.16.norm1.weight": "model-00002-of-00002.safetensors",
|
| 539 |
+
"vision_model.encoder.layers.16.norm2.bias": "model-00002-of-00002.safetensors",
|
| 540 |
+
"vision_model.encoder.layers.16.norm2.weight": "model-00002-of-00002.safetensors",
|
| 541 |
+
"vision_model.encoder.layers.17.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 542 |
+
"vision_model.encoder.layers.17.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 543 |
+
"vision_model.encoder.layers.17.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 544 |
+
"vision_model.encoder.layers.17.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 545 |
+
"vision_model.encoder.layers.17.ls1": "model-00002-of-00002.safetensors",
|
| 546 |
+
"vision_model.encoder.layers.17.ls2": "model-00002-of-00002.safetensors",
|
| 547 |
+
"vision_model.encoder.layers.17.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 548 |
+
"vision_model.encoder.layers.17.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 549 |
+
"vision_model.encoder.layers.17.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 550 |
+
"vision_model.encoder.layers.17.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 551 |
+
"vision_model.encoder.layers.17.norm1.bias": "model-00002-of-00002.safetensors",
|
| 552 |
+
"vision_model.encoder.layers.17.norm1.weight": "model-00002-of-00002.safetensors",
|
| 553 |
+
"vision_model.encoder.layers.17.norm2.bias": "model-00002-of-00002.safetensors",
|
| 554 |
+
"vision_model.encoder.layers.17.norm2.weight": "model-00002-of-00002.safetensors",
|
| 555 |
+
"vision_model.encoder.layers.18.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 556 |
+
"vision_model.encoder.layers.18.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 557 |
+
"vision_model.encoder.layers.18.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 558 |
+
"vision_model.encoder.layers.18.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 559 |
+
"vision_model.encoder.layers.18.ls1": "model-00002-of-00002.safetensors",
|
| 560 |
+
"vision_model.encoder.layers.18.ls2": "model-00002-of-00002.safetensors",
|
| 561 |
+
"vision_model.encoder.layers.18.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 562 |
+
"vision_model.encoder.layers.18.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 563 |
+
"vision_model.encoder.layers.18.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 564 |
+
"vision_model.encoder.layers.18.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 565 |
+
"vision_model.encoder.layers.18.norm1.bias": "model-00002-of-00002.safetensors",
|
| 566 |
+
"vision_model.encoder.layers.18.norm1.weight": "model-00002-of-00002.safetensors",
|
| 567 |
+
"vision_model.encoder.layers.18.norm2.bias": "model-00002-of-00002.safetensors",
|
| 568 |
+
"vision_model.encoder.layers.18.norm2.weight": "model-00002-of-00002.safetensors",
|
| 569 |
+
"vision_model.encoder.layers.19.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 570 |
+
"vision_model.encoder.layers.19.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 571 |
+
"vision_model.encoder.layers.19.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 572 |
+
"vision_model.encoder.layers.19.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 573 |
+
"vision_model.encoder.layers.19.ls1": "model-00002-of-00002.safetensors",
|
| 574 |
+
"vision_model.encoder.layers.19.ls2": "model-00002-of-00002.safetensors",
|
| 575 |
+
"vision_model.encoder.layers.19.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 576 |
+
"vision_model.encoder.layers.19.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 577 |
+
"vision_model.encoder.layers.19.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 578 |
+
"vision_model.encoder.layers.19.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 579 |
+
"vision_model.encoder.layers.19.norm1.bias": "model-00002-of-00002.safetensors",
|
| 580 |
+
"vision_model.encoder.layers.19.norm1.weight": "model-00002-of-00002.safetensors",
|
| 581 |
+
"vision_model.encoder.layers.19.norm2.bias": "model-00002-of-00002.safetensors",
|
| 582 |
+
"vision_model.encoder.layers.19.norm2.weight": "model-00002-of-00002.safetensors",
|
| 583 |
+
"vision_model.encoder.layers.2.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 584 |
+
"vision_model.encoder.layers.2.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 585 |
+
"vision_model.encoder.layers.2.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 586 |
+
"vision_model.encoder.layers.2.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 587 |
+
"vision_model.encoder.layers.2.ls1": "model-00002-of-00002.safetensors",
|
| 588 |
+
"vision_model.encoder.layers.2.ls2": "model-00002-of-00002.safetensors",
|
| 589 |
+
"vision_model.encoder.layers.2.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 590 |
+
"vision_model.encoder.layers.2.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 591 |
+
"vision_model.encoder.layers.2.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 592 |
+
"vision_model.encoder.layers.2.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 593 |
+
"vision_model.encoder.layers.2.norm1.bias": "model-00002-of-00002.safetensors",
|
| 594 |
+
"vision_model.encoder.layers.2.norm1.weight": "model-00002-of-00002.safetensors",
|
| 595 |
+
"vision_model.encoder.layers.2.norm2.bias": "model-00002-of-00002.safetensors",
|
| 596 |
+
"vision_model.encoder.layers.2.norm2.weight": "model-00002-of-00002.safetensors",
|
| 597 |
+
"vision_model.encoder.layers.20.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 598 |
+
"vision_model.encoder.layers.20.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 599 |
+
"vision_model.encoder.layers.20.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 600 |
+
"vision_model.encoder.layers.20.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 601 |
+
"vision_model.encoder.layers.20.ls1": "model-00002-of-00002.safetensors",
|
| 602 |
+
"vision_model.encoder.layers.20.ls2": "model-00002-of-00002.safetensors",
|
| 603 |
+
"vision_model.encoder.layers.20.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 604 |
+
"vision_model.encoder.layers.20.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 605 |
+
"vision_model.encoder.layers.20.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 606 |
+
"vision_model.encoder.layers.20.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 607 |
+
"vision_model.encoder.layers.20.norm1.bias": "model-00002-of-00002.safetensors",
|
| 608 |
+
"vision_model.encoder.layers.20.norm1.weight": "model-00002-of-00002.safetensors",
|
| 609 |
+
"vision_model.encoder.layers.20.norm2.bias": "model-00002-of-00002.safetensors",
|
| 610 |
+
"vision_model.encoder.layers.20.norm2.weight": "model-00002-of-00002.safetensors",
|
| 611 |
+
"vision_model.encoder.layers.21.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 612 |
+
"vision_model.encoder.layers.21.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 613 |
+
"vision_model.encoder.layers.21.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 614 |
+
"vision_model.encoder.layers.21.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 615 |
+
"vision_model.encoder.layers.21.ls1": "model-00002-of-00002.safetensors",
|
| 616 |
+
"vision_model.encoder.layers.21.ls2": "model-00002-of-00002.safetensors",
|
| 617 |
+
"vision_model.encoder.layers.21.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 618 |
+
"vision_model.encoder.layers.21.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 619 |
+
"vision_model.encoder.layers.21.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 620 |
+
"vision_model.encoder.layers.21.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 621 |
+
"vision_model.encoder.layers.21.norm1.bias": "model-00002-of-00002.safetensors",
|
| 622 |
+
"vision_model.encoder.layers.21.norm1.weight": "model-00002-of-00002.safetensors",
|
| 623 |
+
"vision_model.encoder.layers.21.norm2.bias": "model-00002-of-00002.safetensors",
|
| 624 |
+
"vision_model.encoder.layers.21.norm2.weight": "model-00002-of-00002.safetensors",
|
| 625 |
+
"vision_model.encoder.layers.22.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 626 |
+
"vision_model.encoder.layers.22.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 627 |
+
"vision_model.encoder.layers.22.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 628 |
+
"vision_model.encoder.layers.22.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 629 |
+
"vision_model.encoder.layers.22.ls1": "model-00002-of-00002.safetensors",
|
| 630 |
+
"vision_model.encoder.layers.22.ls2": "model-00002-of-00002.safetensors",
|
| 631 |
+
"vision_model.encoder.layers.22.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 632 |
+
"vision_model.encoder.layers.22.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 633 |
+
"vision_model.encoder.layers.22.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 634 |
+
"vision_model.encoder.layers.22.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 635 |
+
"vision_model.encoder.layers.22.norm1.bias": "model-00002-of-00002.safetensors",
|
| 636 |
+
"vision_model.encoder.layers.22.norm1.weight": "model-00002-of-00002.safetensors",
|
| 637 |
+
"vision_model.encoder.layers.22.norm2.bias": "model-00002-of-00002.safetensors",
|
| 638 |
+
"vision_model.encoder.layers.22.norm2.weight": "model-00002-of-00002.safetensors",
|
| 639 |
+
"vision_model.encoder.layers.23.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 640 |
+
"vision_model.encoder.layers.23.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 641 |
+
"vision_model.encoder.layers.23.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 642 |
+
"vision_model.encoder.layers.23.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 643 |
+
"vision_model.encoder.layers.23.ls1": "model-00002-of-00002.safetensors",
|
| 644 |
+
"vision_model.encoder.layers.23.ls2": "model-00002-of-00002.safetensors",
|
| 645 |
+
"vision_model.encoder.layers.23.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 646 |
+
"vision_model.encoder.layers.23.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 647 |
+
"vision_model.encoder.layers.23.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 648 |
+
"vision_model.encoder.layers.23.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 649 |
+
"vision_model.encoder.layers.23.norm1.bias": "model-00002-of-00002.safetensors",
|
| 650 |
+
"vision_model.encoder.layers.23.norm1.weight": "model-00002-of-00002.safetensors",
|
| 651 |
+
"vision_model.encoder.layers.23.norm2.bias": "model-00002-of-00002.safetensors",
|
| 652 |
+
"vision_model.encoder.layers.23.norm2.weight": "model-00002-of-00002.safetensors",
|
| 653 |
+
"vision_model.encoder.layers.3.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 654 |
+
"vision_model.encoder.layers.3.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 655 |
+
"vision_model.encoder.layers.3.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 656 |
+
"vision_model.encoder.layers.3.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 657 |
+
"vision_model.encoder.layers.3.ls1": "model-00002-of-00002.safetensors",
|
| 658 |
+
"vision_model.encoder.layers.3.ls2": "model-00002-of-00002.safetensors",
|
| 659 |
+
"vision_model.encoder.layers.3.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 660 |
+
"vision_model.encoder.layers.3.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 661 |
+
"vision_model.encoder.layers.3.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 662 |
+
"vision_model.encoder.layers.3.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 663 |
+
"vision_model.encoder.layers.3.norm1.bias": "model-00002-of-00002.safetensors",
|
| 664 |
+
"vision_model.encoder.layers.3.norm1.weight": "model-00002-of-00002.safetensors",
|
| 665 |
+
"vision_model.encoder.layers.3.norm2.bias": "model-00002-of-00002.safetensors",
|
| 666 |
+
"vision_model.encoder.layers.3.norm2.weight": "model-00002-of-00002.safetensors",
|
| 667 |
+
"vision_model.encoder.layers.4.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 668 |
+
"vision_model.encoder.layers.4.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 669 |
+
"vision_model.encoder.layers.4.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 670 |
+
"vision_model.encoder.layers.4.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 671 |
+
"vision_model.encoder.layers.4.ls1": "model-00002-of-00002.safetensors",
|
| 672 |
+
"vision_model.encoder.layers.4.ls2": "model-00002-of-00002.safetensors",
|
| 673 |
+
"vision_model.encoder.layers.4.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 674 |
+
"vision_model.encoder.layers.4.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 675 |
+
"vision_model.encoder.layers.4.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 676 |
+
"vision_model.encoder.layers.4.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 677 |
+
"vision_model.encoder.layers.4.norm1.bias": "model-00002-of-00002.safetensors",
|
| 678 |
+
"vision_model.encoder.layers.4.norm1.weight": "model-00002-of-00002.safetensors",
|
| 679 |
+
"vision_model.encoder.layers.4.norm2.bias": "model-00002-of-00002.safetensors",
|
| 680 |
+
"vision_model.encoder.layers.4.norm2.weight": "model-00002-of-00002.safetensors",
|
| 681 |
+
"vision_model.encoder.layers.5.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 682 |
+
"vision_model.encoder.layers.5.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 683 |
+
"vision_model.encoder.layers.5.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 684 |
+
"vision_model.encoder.layers.5.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 685 |
+
"vision_model.encoder.layers.5.ls1": "model-00002-of-00002.safetensors",
|
| 686 |
+
"vision_model.encoder.layers.5.ls2": "model-00002-of-00002.safetensors",
|
| 687 |
+
"vision_model.encoder.layers.5.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 688 |
+
"vision_model.encoder.layers.5.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 689 |
+
"vision_model.encoder.layers.5.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 690 |
+
"vision_model.encoder.layers.5.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 691 |
+
"vision_model.encoder.layers.5.norm1.bias": "model-00002-of-00002.safetensors",
|
| 692 |
+
"vision_model.encoder.layers.5.norm1.weight": "model-00002-of-00002.safetensors",
|
| 693 |
+
"vision_model.encoder.layers.5.norm2.bias": "model-00002-of-00002.safetensors",
|
| 694 |
+
"vision_model.encoder.layers.5.norm2.weight": "model-00002-of-00002.safetensors",
|
| 695 |
+
"vision_model.encoder.layers.6.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 696 |
+
"vision_model.encoder.layers.6.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 697 |
+
"vision_model.encoder.layers.6.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 698 |
+
"vision_model.encoder.layers.6.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 699 |
+
"vision_model.encoder.layers.6.ls1": "model-00002-of-00002.safetensors",
|
| 700 |
+
"vision_model.encoder.layers.6.ls2": "model-00002-of-00002.safetensors",
|
| 701 |
+
"vision_model.encoder.layers.6.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 702 |
+
"vision_model.encoder.layers.6.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 703 |
+
"vision_model.encoder.layers.6.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 704 |
+
"vision_model.encoder.layers.6.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 705 |
+
"vision_model.encoder.layers.6.norm1.bias": "model-00002-of-00002.safetensors",
|
| 706 |
+
"vision_model.encoder.layers.6.norm1.weight": "model-00002-of-00002.safetensors",
|
| 707 |
+
"vision_model.encoder.layers.6.norm2.bias": "model-00002-of-00002.safetensors",
|
| 708 |
+
"vision_model.encoder.layers.6.norm2.weight": "model-00002-of-00002.safetensors",
|
| 709 |
+
"vision_model.encoder.layers.7.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 710 |
+
"vision_model.encoder.layers.7.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 711 |
+
"vision_model.encoder.layers.7.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 712 |
+
"vision_model.encoder.layers.7.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 713 |
+
"vision_model.encoder.layers.7.ls1": "model-00002-of-00002.safetensors",
|
| 714 |
+
"vision_model.encoder.layers.7.ls2": "model-00002-of-00002.safetensors",
|
| 715 |
+
"vision_model.encoder.layers.7.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 716 |
+
"vision_model.encoder.layers.7.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 717 |
+
"vision_model.encoder.layers.7.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 718 |
+
"vision_model.encoder.layers.7.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 719 |
+
"vision_model.encoder.layers.7.norm1.bias": "model-00002-of-00002.safetensors",
|
| 720 |
+
"vision_model.encoder.layers.7.norm1.weight": "model-00002-of-00002.safetensors",
|
| 721 |
+
"vision_model.encoder.layers.7.norm2.bias": "model-00002-of-00002.safetensors",
|
| 722 |
+
"vision_model.encoder.layers.7.norm2.weight": "model-00002-of-00002.safetensors",
|
| 723 |
+
"vision_model.encoder.layers.8.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 724 |
+
"vision_model.encoder.layers.8.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 725 |
+
"vision_model.encoder.layers.8.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 726 |
+
"vision_model.encoder.layers.8.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 727 |
+
"vision_model.encoder.layers.8.ls1": "model-00002-of-00002.safetensors",
|
| 728 |
+
"vision_model.encoder.layers.8.ls2": "model-00002-of-00002.safetensors",
|
| 729 |
+
"vision_model.encoder.layers.8.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 730 |
+
"vision_model.encoder.layers.8.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 731 |
+
"vision_model.encoder.layers.8.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 732 |
+
"vision_model.encoder.layers.8.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 733 |
+
"vision_model.encoder.layers.8.norm1.bias": "model-00002-of-00002.safetensors",
|
| 734 |
+
"vision_model.encoder.layers.8.norm1.weight": "model-00002-of-00002.safetensors",
|
| 735 |
+
"vision_model.encoder.layers.8.norm2.bias": "model-00002-of-00002.safetensors",
|
| 736 |
+
"vision_model.encoder.layers.8.norm2.weight": "model-00002-of-00002.safetensors",
|
| 737 |
+
"vision_model.encoder.layers.9.attn.proj.bias": "model-00002-of-00002.safetensors",
|
| 738 |
+
"vision_model.encoder.layers.9.attn.proj.weight": "model-00002-of-00002.safetensors",
|
| 739 |
+
"vision_model.encoder.layers.9.attn.qkv.bias": "model-00002-of-00002.safetensors",
|
| 740 |
+
"vision_model.encoder.layers.9.attn.qkv.weight": "model-00002-of-00002.safetensors",
|
| 741 |
+
"vision_model.encoder.layers.9.ls1": "model-00002-of-00002.safetensors",
|
| 742 |
+
"vision_model.encoder.layers.9.ls2": "model-00002-of-00002.safetensors",
|
| 743 |
+
"vision_model.encoder.layers.9.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 744 |
+
"vision_model.encoder.layers.9.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 745 |
+
"vision_model.encoder.layers.9.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 746 |
+
"vision_model.encoder.layers.9.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 747 |
+
"vision_model.encoder.layers.9.norm1.bias": "model-00002-of-00002.safetensors",
|
| 748 |
+
"vision_model.encoder.layers.9.norm1.weight": "model-00002-of-00002.safetensors",
|
| 749 |
+
"vision_model.encoder.layers.9.norm2.bias": "model-00002-of-00002.safetensors",
|
| 750 |
+
"vision_model.encoder.layers.9.norm2.weight": "model-00002-of-00002.safetensors"
|
| 751 |
+
}
|
| 752 |
+
}
|
blobs/80c7a2e27da60b66f80a04f3d710f99d9b9da929
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
license_link: LICENSE
|
| 4 |
+
language:
|
| 5 |
+
- multilingual
|
| 6 |
+
tags:
|
| 7 |
+
- vision-language
|
| 8 |
+
- ocr
|
| 9 |
+
- document-intelligence
|
| 10 |
+
- qianfan
|
| 11 |
+
pipeline_tag: image-text-to-text
|
| 12 |
+
library_name: transformers
|
| 13 |
+
model-index:
|
| 14 |
+
- name: Qianfan-OCR
|
| 15 |
+
results:
|
| 16 |
+
- task:
|
| 17 |
+
type: document-parsing
|
| 18 |
+
name: Document Parsing
|
| 19 |
+
dataset:
|
| 20 |
+
name: OmniDocBench v1.5
|
| 21 |
+
type: opendatalab/OmniDocBench
|
| 22 |
+
metrics:
|
| 23 |
+
- type: overall
|
| 24 |
+
value: 93.12
|
| 25 |
+
name: Overall Score
|
| 26 |
+
- task:
|
| 27 |
+
type: ocr
|
| 28 |
+
name: OCR
|
| 29 |
+
dataset:
|
| 30 |
+
name: OlmOCR Bench
|
| 31 |
+
type: allenai/olmOCR-bench
|
| 32 |
+
metrics:
|
| 33 |
+
- type: accuracy
|
| 34 |
+
value: 79.8
|
| 35 |
+
name: Overall Score
|
| 36 |
+
- task:
|
| 37 |
+
type: ocr
|
| 38 |
+
name: OCR
|
| 39 |
+
dataset:
|
| 40 |
+
name: OCRBench
|
| 41 |
+
type: echo840/OCRBench
|
| 42 |
+
metrics:
|
| 43 |
+
- type: accuracy
|
| 44 |
+
value: 880
|
| 45 |
+
name: Score
|
| 46 |
+
---
|
| 47 |
+
|
| 48 |
+
<div align="center">
|
| 49 |
+
|
| 50 |
+
<h1>Qianfan-OCR</h1>
|
| 51 |
+
|
| 52 |
+
<h3>A Unified End-to-End Model for Document Intelligence</h3>
|
| 53 |
+
|
| 54 |
+
[**🤖 Demo**](https://huggingface.co/spaces/baidu/Qianfan-OCR-Demo) |
|
| 55 |
+
[**📄 Technical Report**](https://arxiv.org/abs/2603.13398) |
|
| 56 |
+
[**🖥️ Qianfan Platform**](https://cloud.baidu.com/product-s/qianfan_home) |
|
| 57 |
+
[**💻 GitHub**](https://github.com/baidubce/Qianfan-VL) |
|
| 58 |
+
[**🧩 Skill**](https://github.com/baidubce/skills/tree/develop/skills/qianfanocr-document-intelligence)
|
| 59 |
+
|
| 60 |
+
</div>
|
| 61 |
+
|
| 62 |
+
## Introduction
|
| 63 |
+
|
| 64 |
+
**Qianfan-OCR** is a **4B-parameter end-to-end document intelligence model** developed by the Baidu Qianfan Team. It unifies document parsing, layout analysis, and document understanding within a single vision-language architecture.
|
| 65 |
+
|
| 66 |
+
Unlike traditional multi-stage OCR pipelines that chain separate layout detection, text recognition, and language comprehension modules, Qianfan-OCR performs **direct image-to-Markdown conversion** and supports a broad range of prompt-driven tasks — from structured document parsing and table extraction to chart understanding, document question answering, and key information extraction — all within one model.
|
| 67 |
+
|
| 68 |
+
### Key Highlights
|
| 69 |
+
|
| 70 |
+
- 🏆 **#1 End-to-End Model on OmniDocBench v1.5** — Achieves **93.12** overall score, surpassing DeepSeek-OCR-v2 (91.09), Gemini-3 Pro (90.33), and all other end-to-end models
|
| 71 |
+
- 🏆 **#1 End-to-End Model on OlmOCR Bench** — Scores **79.8**
|
| 72 |
+
- 🏆 **#1 on Key Information Extraction** — Overall mean score of **87.9** across five public KIE benchmarks, surpassing Gemini-3.1-Pro, Gemini-3-Pro, Seed-2.0, and Qwen3-VL-235B-A22B
|
| 73 |
+
- 🧠 **Layout-as-Thought** — An innovative optional thinking phase that recovers explicit layout analysis within the end-to-end paradigm via `⟨think⟩` tokens
|
| 74 |
+
- 🌍 **192 Languages** — Multilingual OCR support across diverse scripts
|
| 75 |
+
- ⚡ **Efficient Deployment** — Achieves **1.024 PPS** (pages per second) with W8A8 quantization on a single A100 GPU
|
| 76 |
+
|
| 77 |
+
## Architecture
|
| 78 |
+
|
| 79 |
+
Qianfan-OCR adopts the multimodal bridging architecture from [Qianfan-VL](https://arxiv.org/abs/2509.18189), consisting of three core components:
|
| 80 |
+
|
| 81 |
+
| Component | Details |
|
| 82 |
+
|---|---|
|
| 83 |
+
| **Vision Encoder** | Qianfan-ViT, 24 Transformer layers, AnyResolution design (up to 4K), 256 visual tokens per 448×448 tile, max 4,096 tokens per image |
|
| 84 |
+
| **Language Model** | Qwen3-4B (3.6B non-embedding), 36 layers, 2560 hidden dim, GQA (32 query / 8 KV heads), 32K context (extendable to 131K) |
|
| 85 |
+
| **Cross-Modal Adapter** | 2-layer MLP with GELU activation, projecting from 1024-dim to 2560-dim |
|
| 86 |
+
|
| 87 |
+
### Layout-as-Thought
|
| 88 |
+
|
| 89 |
+
A key innovation is **Layout-as-Thought**: an optional thinking phase triggered by `⟨think⟩` tokens, where the model generates structured layout representations (bounding boxes, element types, reading order) before producing final outputs.
|
| 90 |
+
|
| 91 |
+
This mechanism serves two purposes:
|
| 92 |
+
1. **Functional**: Recovers layout analysis capability within the end-to-end paradigm — users obtain structured layout results directly
|
| 93 |
+
2. **Enhancement**: Provides targeted accuracy improvements on documents with complex layouts, cluttered elements, or non-standard reading orders
|
| 94 |
+
|
| 95 |
+
> **When to use**: Enable thinking for heterogeneous pages with mixed element types (exam papers, technical reports, newspapers). Disable for homogeneous documents (single-column text, simple forms) for better results and lower latency.
|
| 96 |
+
|
| 97 |
+
## Benchmark Results
|
| 98 |
+
|
| 99 |
+
### OmniDocBench v1.5 (Document Parsing)
|
| 100 |
+
|
| 101 |
+
| Model | Type | Overall ↑ | TextEdit ↓ | FormulaCDM ↑ | TableTEDs ↑ | TableTEDss ↑ | R-orderEdit ↓ |
|
| 102 |
+
|---|---|---|---|---|---|---|---|
|
| 103 |
+
| **Qianfan-OCR (Ours)** | End-to-end | **93.12** | **0.041** | **92.43** | **91.02** | **93.85** | **0.049** |
|
| 104 |
+
| DeepSeek-OCR-v2 | End-to-end | 91.09 | 0.048 | 90.31 | 87.75 | 92.06 | 0.057 |
|
| 105 |
+
| Gemini-3 Pro | End-to-end | 90.33 | 0.065 | 89.18 | 88.28 | 90.29 | 0.071 |
|
| 106 |
+
| Qwen3-VL-235B | End-to-end | 89.15 | 0.069 | 88.14 | 86.21 | 90.55 | 0.068 |
|
| 107 |
+
| dots.ocr | End-to-end | 88.41 | 0.048 | 83.22 | 86.78 | 90.62 | 0.053 |
|
| 108 |
+
| PaddleOCR-VL 1.5 | Pipeline | 94.50 | 0.035 | 94.21 | 92.76 | 95.79 | 0.042 |
|
| 109 |
+
|
| 110 |
+
### General OCR Benchmarks
|
| 111 |
+
|
| 112 |
+
| Model | OCRBench | OCRBenchv2 (en/zh) | CCOCR-multilan | CCOCR-overall |
|
| 113 |
+
|---|---|---|---|---|
|
| 114 |
+
| **Qianfan-OCR (Ours)** | **880** | 56.0 / **60.77** | **76.7** | **79.3** |
|
| 115 |
+
| Qwen3-VL-4B | 873 | **60.68** / 59.13 | 74.2 | 76.5 |
|
| 116 |
+
| MonkeyOCR | 655 | 21.78 / 38.91 | 43.8 | 35.2 |
|
| 117 |
+
| DeepSeek-OCR | 459 | 15.98 / 38.31 | 32.5 | 27.6 |
|
| 118 |
+
|
| 119 |
+
### Document Understanding
|
| 120 |
+
|
| 121 |
+
| Benchmark | Qianfan-OCR | Qwen3-VL-4B | Qwen3-VL-2B |
|
| 122 |
+
|---|---|---|---|
|
| 123 |
+
| DocVQA | 92.8 | **94.9** | 92.7 |
|
| 124 |
+
| CharXiv_DQ | **94.0** | 81.8 | 69.7 |
|
| 125 |
+
| CharXiv_RQ | **85.2** | 48.5 | 41.3 |
|
| 126 |
+
| ChartQA | **88.1** | 83.3 | 78.3 |
|
| 127 |
+
| ChartQAPro | **42.9** | 36.2 | 24.5 |
|
| 128 |
+
| ChartBench | **85.9** | 74.9 | 73.2 |
|
| 129 |
+
| TextVQA | 80.0 | **81.8** | 79.9 |
|
| 130 |
+
| OCRVQA | **66.8** | 64.7 | 59.3 |
|
| 131 |
+
|
| 132 |
+
> 💡 Two-stage OCR+LLM systems score **0.0** on CharXiv (both DQ and RQ), demonstrating that chart structures discarded during text extraction are essential for reasoning.
|
| 133 |
+
|
| 134 |
+
### Key Information Extraction (KIE)
|
| 135 |
+
|
| 136 |
+
| Model | Overall | OCRBench KIE | OCRBenchv2 KIE (en) | OCRBenchv2 KIE (zh) | CCOCR KIE | Nanonets KIE (F1) |
|
| 137 |
+
|---|---|---|---|---|---|---|
|
| 138 |
+
| **Qianfan-OCR (Ours)** | **87.9** | 95.0 | 82.8 | **82.3** | 92.8 | **86.5** |
|
| 139 |
+
| Qwen3-VL-235B-A22B | 84.2 | 94.0 | 85.6 | 62.9 | **95.1** | 83.8 |
|
| 140 |
+
| Qwen3-4B-VL | 83.5 | 89.0 | 82.1 | 71.3 | 91.6 | 83.3 |
|
| 141 |
+
| Gemini-3.1-Pro | 79.2 | **96.0** | **87.8** | 63.4 | 72.5 | 76.1 |
|
| 142 |
+
|
| 143 |
+
### Inference Throughput
|
| 144 |
+
|
| 145 |
+
| Model | PPS (pages/sec) |
|
| 146 |
+
|---|---|
|
| 147 |
+
| **Qianfan-OCR (W8A8)** | **1.024** |
|
| 148 |
+
| Qianfan-OCR (W16A16) | 0.503 |
|
| 149 |
+
| MinerU 2.5 | 1.057 |
|
| 150 |
+
| MonkeyOCR-pro-1.2B | 0.673 |
|
| 151 |
+
| Dots OCR | 0.352 |
|
| 152 |
+
|
| 153 |
+
*All benchmarks on a single NVIDIA A100 GPU with vLLM 0.10.2.*
|
| 154 |
+
|
| 155 |
+
## Supported Tasks
|
| 156 |
+
|
| 157 |
+
Qianfan-OCR supports a comprehensive set of document intelligence tasks through prompt-driven control:
|
| 158 |
+
|
| 159 |
+
| Task Category | Specific Tasks |
|
| 160 |
+
|---|---|
|
| 161 |
+
| **Document Parsing** | Image-to-Markdown conversion, multi-page parsing, structured output (JSON/HTML) |
|
| 162 |
+
| **Layout Analysis** | Bounding box detection, element type classification (25 categories), reading order |
|
| 163 |
+
| **Table Recognition** | Complex table extraction (merged cells, rotated tables), HTML output |
|
| 164 |
+
| **Formula Recognition** | Inline and display math formulas, LaTeX output |
|
| 165 |
+
| **Chart Understanding** | Chart QA, trend analysis, data extraction from various chart types |
|
| 166 |
+
| **Key Information Extraction** | Receipts, invoices, certificates, medical records, ID cards |
|
| 167 |
+
| **Handwriting Recognition** | Chinese and English handwritten text |
|
| 168 |
+
| **Scene Text Recognition** | Street signs, product labels, natural scene text |
|
| 169 |
+
| **Multilingual OCR** | 192 languages including Latin, Cyrillic, Arabic, South/Southeast Asian, CJK scripts |
|
| 170 |
+
|
| 171 |
+
## Quick Start
|
| 172 |
+
|
| 173 |
+
### Basic Usage
|
| 174 |
+
|
| 175 |
+
```python
|
| 176 |
+
import torch
|
| 177 |
+
import torchvision.transforms as T
|
| 178 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 179 |
+
from transformers import AutoModel, AutoTokenizer
|
| 180 |
+
from PIL import Image
|
| 181 |
+
|
| 182 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 183 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 184 |
+
|
| 185 |
+
def build_transform(input_size):
|
| 186 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 187 |
+
transform = T.Compose([
|
| 188 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 189 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 190 |
+
T.ToTensor(),
|
| 191 |
+
T.Normalize(mean=MEAN, std=STD)
|
| 192 |
+
])
|
| 193 |
+
return transform
|
| 194 |
+
|
| 195 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 196 |
+
best_ratio_diff = float('inf')
|
| 197 |
+
best_ratio = (1, 1)
|
| 198 |
+
area = width * height
|
| 199 |
+
for ratio in target_ratios:
|
| 200 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 201 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 202 |
+
if ratio_diff < best_ratio_diff:
|
| 203 |
+
best_ratio_diff = ratio_diff
|
| 204 |
+
best_ratio = ratio
|
| 205 |
+
elif ratio_diff == best_ratio_diff:
|
| 206 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 207 |
+
best_ratio = ratio
|
| 208 |
+
return best_ratio
|
| 209 |
+
|
| 210 |
+
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
| 211 |
+
orig_width, orig_height = image.size
|
| 212 |
+
aspect_ratio = orig_width / orig_height
|
| 213 |
+
|
| 214 |
+
# calculate the existing image aspect ratio
|
| 215 |
+
target_ratios = set(
|
| 216 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 217 |
+
i * j <= max_num and i * j >= min_num)
|
| 218 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 219 |
+
|
| 220 |
+
# find the closest aspect ratio to the target
|
| 221 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 222 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 223 |
+
|
| 224 |
+
# calculate the target width and height
|
| 225 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 226 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 227 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 228 |
+
|
| 229 |
+
# resize the image
|
| 230 |
+
resized_img = image.resize((target_width, target_height))
|
| 231 |
+
processed_images = []
|
| 232 |
+
for i in range(blocks):
|
| 233 |
+
box = (
|
| 234 |
+
(i % (target_width // image_size)) * image_size,
|
| 235 |
+
(i // (target_width // image_size)) * image_size,
|
| 236 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 237 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 238 |
+
)
|
| 239 |
+
# split the image
|
| 240 |
+
split_img = resized_img.crop(box)
|
| 241 |
+
processed_images.append(split_img)
|
| 242 |
+
assert len(processed_images) == blocks
|
| 243 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 244 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 245 |
+
processed_images.append(thumbnail_img)
|
| 246 |
+
return processed_images
|
| 247 |
+
|
| 248 |
+
def load_image(image_file, input_size=448, max_num=12):
|
| 249 |
+
image = Image.open(image_file).convert('RGB')
|
| 250 |
+
transform = build_transform(input_size=input_size)
|
| 251 |
+
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 252 |
+
pixel_values = [transform(image) for image in images]
|
| 253 |
+
pixel_values = torch.stack(pixel_values)
|
| 254 |
+
return pixel_values
|
| 255 |
+
|
| 256 |
+
# Load model
|
| 257 |
+
MODEL_PATH = "baidu/Qianfan-OCR"
|
| 258 |
+
model = AutoModel.from_pretrained(
|
| 259 |
+
MODEL_PATH,
|
| 260 |
+
torch_dtype=torch.bfloat16,
|
| 261 |
+
trust_remote_code=True,
|
| 262 |
+
device_map="auto"
|
| 263 |
+
).eval()
|
| 264 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
|
| 265 |
+
|
| 266 |
+
# Load and process image
|
| 267 |
+
pixel_values = load_image("./Qianfan-OCR/examples/document.png").to(torch.bfloat16).to(model.device)
|
| 268 |
+
|
| 269 |
+
# Inference
|
| 270 |
+
prompt = "Parse this document to Markdown."
|
| 271 |
+
with torch.no_grad():
|
| 272 |
+
response = model.chat(
|
| 273 |
+
tokenizer,
|
| 274 |
+
pixel_values=pixel_values,
|
| 275 |
+
question=prompt,
|
| 276 |
+
generation_config={"max_new_tokens": 16384}
|
| 277 |
+
)
|
| 278 |
+
print(response)
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
### With Layout-as-Thought (Thinking Mode)
|
| 282 |
+
|
| 283 |
+
```python
|
| 284 |
+
# Enable Layout-as-Thought by appending <think> token to query
|
| 285 |
+
|
| 286 |
+
pixel_values = load_image("./Qianfan-OCR/examples/complex_document.jpg").to(torch.bfloat16)
|
| 287 |
+
prompt = "Parse this document to Markdown.<think>"
|
| 288 |
+
with torch.no_grad():
|
| 289 |
+
response = model.chat(
|
| 290 |
+
tokenizer,
|
| 291 |
+
pixel_values=pixel_values,
|
| 292 |
+
question=prompt,
|
| 293 |
+
generation_config={"max_new_tokens": 16384}
|
| 294 |
+
)
|
| 295 |
+
print(response)
|
| 296 |
+
|
| 297 |
+
# The model will first generate structured layout analysis, then produce the final output
|
| 298 |
+
```
|
| 299 |
+
|
| 300 |
+
### Key Information Extraction
|
| 301 |
+
|
| 302 |
+
```python
|
| 303 |
+
pixel_values = load_image("./Qianfan-OCR/examples/invoice.jpg").to(torch.bfloat16)
|
| 304 |
+
prompt = "请从图片中提取以下字段信息:姓名、日期、总金额。使用标准JSON格式输出。"
|
| 305 |
+
with torch.no_grad():
|
| 306 |
+
response = model.chat(
|
| 307 |
+
tokenizer,
|
| 308 |
+
pixel_values=pixel_values,
|
| 309 |
+
question=prompt,
|
| 310 |
+
generation_config={"max_new_tokens": 16384}
|
| 311 |
+
)
|
| 312 |
+
print(response)
|
| 313 |
+
```
|
| 314 |
+
|
| 315 |
+
### vLLM Deployment
|
| 316 |
+
|
| 317 |
+
```bash
|
| 318 |
+
# Serve with vLLM for high-throughput inference
|
| 319 |
+
vllm serve baidu/Qianfan-OCR --trust-remote-code
|
| 320 |
+
```
|
| 321 |
+
|
| 322 |
+
## Skill
|
| 323 |
+
|
| 324 |
+
We provide a [Qianfan OCR Document Intelligence](https://github.com/baidubce/skills/tree/develop/skills/qianfanocr-document-intelligence) skill for image and PDF understanding workflows.
|
| 325 |
+
|
| 326 |
+
It can be used by users of OpenClaw, Claude Code, Codex, and other assistants that support this skill format.
|
| 327 |
+
This skill packages reusable instructions, scripts, and references so the agent can automatically apply Qianfan-powered document intelligence to tasks such as:
|
| 328 |
+
|
| 329 |
+
- document parsing to Markdown
|
| 330 |
+
- layout analysis
|
| 331 |
+
- element recognition
|
| 332 |
+
- general OCR
|
| 333 |
+
- key information extraction
|
| 334 |
+
- chart understanding
|
| 335 |
+
- document VQA
|
| 336 |
+
|
| 337 |
+
The skill is designed for visual understanding tasks over images and PDFs, and includes the execution flow needed to prepare inputs, choose the right analysis mode, and call the bundled CLI tools.
|
| 338 |
+
|
| 339 |
+
## Citation
|
| 340 |
+
|
| 341 |
+
```bibtex
|
| 342 |
+
@misc{dong2026qianfanocrunifiedendtoendmodel,
|
| 343 |
+
title={Qianfan-OCR: A Unified End-to-End Model for Document Intelligence},
|
| 344 |
+
author={Daxiang Dong and Mingming Zheng and Dong Xu and Chunhua Luo and Bairong Zhuang and Yuxuan Li and Ruoyun He and Haoran Wang and Wenyu Zhang and Wenbo Wang and Yicheng Wang and Xue Xiong and Ayong Zheng and Xiaoying Zuo and Ziwei Ou and Jingnan Gu and Quanhao Guo and Jianmin Wu and Dawei Yin and Dou Shen},
|
| 345 |
+
year={2026},
|
| 346 |
+
eprint={2603.13398},
|
| 347 |
+
archivePrefix={arXiv},
|
| 348 |
+
primaryClass={cs.CV},
|
| 349 |
+
url={https://arxiv.org/abs/2603.13398},
|
| 350 |
+
}
|
| 351 |
+
```
|
| 352 |
+
|
| 353 |
+
## Acknowledgments
|
| 354 |
+
|
| 355 |
+
We thank the Baidu AI Cloud team for infrastructure support, the Baige and Kunlun teams for AI infrastructure assistance, and all contributors to the Qianfan platform.
|
| 356 |
+
|
| 357 |
+
## License
|
| 358 |
+
|
| 359 |
+
This project is licensed under the Apache License 2.0. See `LICENSE` for the
|
| 360 |
+
full license text.
|
| 361 |
+
|
| 362 |
+
Some bundled third-party source files are licensed under the MIT License. See
|
| 363 |
+
`NOTICE` for the file list and corresponding attribution details.
|
blobs/96805d61fbb9523fd27a09ab40451d04da09e9ba4b102341eac0184d8f82a0b1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:96805d61fbb9523fd27a09ab40451d04da09e9ba4b102341eac0184d8f82a0b1
|
| 3 |
+
size 4503788440
|
blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710
|
| 3 |
+
size 309990
|
blobs/a25c514d33074f195f0907523948db0428f78cda
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see NOTICE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
from typing import Dict, Any, Optional
|
| 9 |
+
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
|
| 13 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 14 |
+
|
| 15 |
+
logger = logging.get_logger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class InternVLChatConfig(PretrainedConfig):
|
| 19 |
+
model_type = 'internvl_chat'
|
| 20 |
+
is_composition = True
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
vision_config: Optional[Dict[str, Any]] = None,
|
| 25 |
+
llm_config: Optional[Dict[str, Any]] = None,
|
| 26 |
+
use_backbone_lora=0,
|
| 27 |
+
use_llm_lora=0,
|
| 28 |
+
select_layer=-1,
|
| 29 |
+
force_image_size=None,
|
| 30 |
+
downsample_ratio=0.5,
|
| 31 |
+
template=None,
|
| 32 |
+
dynamic_image_size=False,
|
| 33 |
+
use_thumbnail=False,
|
| 34 |
+
ps_version="v1",
|
| 35 |
+
min_dynamic_patch=1,
|
| 36 |
+
max_dynamic_patch=6,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
super().__init__(**kwargs)
|
| 40 |
+
|
| 41 |
+
if vision_config is None:
|
| 42 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
| 43 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
| 44 |
+
|
| 45 |
+
if llm_config is None:
|
| 46 |
+
llm_config = {'architectures': ['Qwen2ForCausalLM']}
|
| 47 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
| 48 |
+
assert 'architectures' in llm_config, "Should specify architecture in llm_config"
|
| 49 |
+
|
| 50 |
+
if isinstance(vision_config, dict):
|
| 51 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
| 52 |
+
else:
|
| 53 |
+
self.vision_config = vision_config
|
| 54 |
+
|
| 55 |
+
if isinstance(llm_config, dict):
|
| 56 |
+
architecture: str = llm_config['architectures'][0]
|
| 57 |
+
if architecture == 'LlamaForCausalLM':
|
| 58 |
+
from transformers import LlamaConfig
|
| 59 |
+
self.llm_config = LlamaConfig(**llm_config)
|
| 60 |
+
elif architecture == 'Qwen2ForCausalLM':
|
| 61 |
+
from transformers import Qwen2Config
|
| 62 |
+
self.llm_config = Qwen2Config(**llm_config)
|
| 63 |
+
elif architecture == 'Qwen3MoeForCausalLM':
|
| 64 |
+
from transformers import Qwen3MoeConfig
|
| 65 |
+
self.llm_config = Qwen3MoeConfig(**llm_config)
|
| 66 |
+
elif architecture == 'Qwen3ForCausalLM':
|
| 67 |
+
from transformers import Qwen3Config
|
| 68 |
+
self.llm_config = Qwen3Config(**llm_config)
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError('Unsupported architecture: {}'.format(architecture))
|
| 71 |
+
else:
|
| 72 |
+
self.llm_config = llm_config
|
| 73 |
+
|
| 74 |
+
self.use_backbone_lora = use_backbone_lora
|
| 75 |
+
self.use_llm_lora = use_llm_lora
|
| 76 |
+
self.select_layer = select_layer
|
| 77 |
+
self.force_image_size = force_image_size
|
| 78 |
+
self.downsample_ratio = downsample_ratio
|
| 79 |
+
self.template = template
|
| 80 |
+
self.dynamic_image_size = dynamic_image_size
|
| 81 |
+
self.use_thumbnail = use_thumbnail
|
| 82 |
+
self.ps_version = ps_version # pixel shuffle version
|
| 83 |
+
self.min_dynamic_patch = min_dynamic_patch
|
| 84 |
+
self.max_dynamic_patch = max_dynamic_patch
|
| 85 |
+
self.tie_word_embeddings = self.llm_config.tie_word_embeddings
|
| 86 |
+
|
| 87 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
| 88 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 89 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
| 90 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
| 91 |
+
|
| 92 |
+
def to_dict(self):
|
| 93 |
+
"""
|
| 94 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
| 98 |
+
"""
|
| 99 |
+
output = copy.deepcopy(self.__dict__)
|
| 100 |
+
output['vision_config'] = self.vision_config.to_dict()
|
| 101 |
+
output['llm_config'] = self.llm_config.to_dict()
|
| 102 |
+
output['model_type'] = self.__class__.model_type
|
| 103 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
| 104 |
+
output['use_llm_lora'] = self.use_llm_lora
|
| 105 |
+
output['select_layer'] = self.select_layer
|
| 106 |
+
output['force_image_size'] = self.force_image_size
|
| 107 |
+
output['downsample_ratio'] = self.downsample_ratio
|
| 108 |
+
output['template'] = self.template
|
| 109 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
| 110 |
+
output['use_thumbnail'] = self.use_thumbnail
|
| 111 |
+
output['ps_version'] = self.ps_version
|
| 112 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
| 113 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
| 114 |
+
|
| 115 |
+
return output
|
blobs/a7b376e0a83f26eaa784db792ef61be7aac5494f
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": null,
|
| 3 |
+
"crop_to_patches": false,
|
| 4 |
+
"data_format": "channels_first",
|
| 5 |
+
"default_to_square": true,
|
| 6 |
+
"device": null,
|
| 7 |
+
"do_center_crop": null,
|
| 8 |
+
"do_convert_rgb": true,
|
| 9 |
+
"do_normalize": true,
|
| 10 |
+
"do_rescale": true,
|
| 11 |
+
"do_resize": true,
|
| 12 |
+
"image_mean": [
|
| 13 |
+
0.485,
|
| 14 |
+
0.456,
|
| 15 |
+
0.406
|
| 16 |
+
],
|
| 17 |
+
"image_processor_type": "GotOcr2ImageProcessorFast",
|
| 18 |
+
"image_std": [
|
| 19 |
+
0.229,
|
| 20 |
+
0.224,
|
| 21 |
+
0.225
|
| 22 |
+
],
|
| 23 |
+
"input_data_format": null,
|
| 24 |
+
"max_patches": 12,
|
| 25 |
+
"min_patches": 1,
|
| 26 |
+
"processor_class": "InternVLProcessor",
|
| 27 |
+
"resample": 3,
|
| 28 |
+
"rescale_factor": 0.00392156862745098,
|
| 29 |
+
"return_tensors": null,
|
| 30 |
+
"size": {
|
| 31 |
+
"height": 448,
|
| 32 |
+
"width": 448
|
| 33 |
+
}
|
| 34 |
+
}
|
blobs/b2f155131ba1b6cb1664845ddde157100a30a2c5
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- dataset:
|
| 2 |
+
id: allenai/olmOCR-bench
|
| 3 |
+
task_id: overall
|
| 4 |
+
value: 79.8
|
| 5 |
+
source:
|
| 6 |
+
url: https://huggingface.co/papers/2603.13398
|
| 7 |
+
name: Qianfan-OCR technical report
|
| 8 |
+
user: nielsr
|
| 9 |
+
- dataset:
|
| 10 |
+
id: allenai/olmOCR-bench
|
| 11 |
+
task_id: arxiv_math
|
| 12 |
+
value: 80.1
|
| 13 |
+
source:
|
| 14 |
+
url: https://huggingface.co/papers/2603.13398
|
| 15 |
+
name: Qianfan-OCR technical report
|
| 16 |
+
user: nielsr
|
| 17 |
+
- dataset:
|
| 18 |
+
id: allenai/olmOCR-bench
|
| 19 |
+
task_id: old_scans_math
|
| 20 |
+
value: 73.1
|
| 21 |
+
source:
|
| 22 |
+
url: https://huggingface.co/papers/2603.13398
|
| 23 |
+
name: Qianfan-OCR technical report
|
| 24 |
+
user: nielsr
|
| 25 |
+
- dataset:
|
| 26 |
+
id: allenai/olmOCR-bench
|
| 27 |
+
task_id: table_tests
|
| 28 |
+
value: 81.6
|
| 29 |
+
source:
|
| 30 |
+
url: https://huggingface.co/papers/2603.13398
|
| 31 |
+
name: Qianfan-OCR technical report
|
| 32 |
+
user: nielsr
|
| 33 |
+
- dataset:
|
| 34 |
+
id: allenai/olmOCR-bench
|
| 35 |
+
task_id: old_scans
|
| 36 |
+
value: 42.0
|
| 37 |
+
source:
|
| 38 |
+
url: https://huggingface.co/papers/2603.13398
|
| 39 |
+
name: Qianfan-OCR technical report
|
| 40 |
+
user: nielsr
|
| 41 |
+
- dataset:
|
| 42 |
+
id: allenai/olmOCR-bench
|
| 43 |
+
task_id: multi_column
|
| 44 |
+
value: 80.4
|
| 45 |
+
source:
|
| 46 |
+
url: https://huggingface.co/papers/2603.13398
|
| 47 |
+
name: Qianfan-OCR technical report
|
| 48 |
+
user: nielsr
|
| 49 |
+
- dataset:
|
| 50 |
+
id: allenai/olmOCR-bench
|
| 51 |
+
task_id: long_tiny_text
|
| 52 |
+
value: 89.1
|
| 53 |
+
source:
|
| 54 |
+
url: https://huggingface.co/papers/2603.13398
|
| 55 |
+
name: Qianfan-OCR technical report
|
| 56 |
+
user: nielsr
|
| 57 |
+
- dataset:
|
| 58 |
+
id: allenai/olmOCR-bench
|
| 59 |
+
task_id: headers_footers
|
| 60 |
+
value: 92.2
|
| 61 |
+
source:
|
| 62 |
+
url: https://huggingface.co/papers/2603.13398
|
| 63 |
+
name: Qianfan-OCR technical report
|
| 64 |
+
user: nielsr
|
| 65 |
+
- dataset:
|
| 66 |
+
id: allenai/olmOCR-bench
|
| 67 |
+
task_id: baseline
|
| 68 |
+
value: 99.6
|
| 69 |
+
source:
|
| 70 |
+
url: https://huggingface.co/papers/2603.13398
|
| 71 |
+
name: Qianfan-OCR technical report
|
| 72 |
+
user: nielsr
|
blobs/bbf7935cd9e3744c905911ff0971ec1fbebf17e1
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see NOTICE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
import transformers
|
| 12 |
+
from torch import nn
|
| 13 |
+
from torch.nn import CrossEntropyLoss
|
| 14 |
+
from transformers import GenerationConfig
|
| 15 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 16 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
from transformers import LlamaForCausalLM, Qwen2ForCausalLM, Qwen3ForCausalLM, Qwen3MoeForCausalLM
|
| 19 |
+
|
| 20 |
+
from .configuration_internvl_chat import InternVLChatConfig
|
| 21 |
+
from .conversation import get_conv_template
|
| 22 |
+
from .modeling_intern_vit import InternVisionModel, has_flash_attn
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def version_cmp(v1, v2, op='eq'):
|
| 28 |
+
import operator
|
| 29 |
+
|
| 30 |
+
from packaging import version
|
| 31 |
+
op_func = getattr(operator, op)
|
| 32 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class InternVLChatModel(PreTrainedModel):
|
| 36 |
+
config_class = InternVLChatConfig
|
| 37 |
+
main_input_name = 'pixel_values'
|
| 38 |
+
base_model_prefix = 'language_model'
|
| 39 |
+
_supports_flash_attn_2 = True
|
| 40 |
+
supports_gradient_checkpointing = True
|
| 41 |
+
_no_split_modules = [
|
| 42 |
+
"InternVisionModel",
|
| 43 |
+
"Qwen3DecoderLayer",
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
# support transformers 4.51.+
|
| 47 |
+
_tp_plan = ''
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def all_tied_weights_keys(self):
|
| 51 |
+
if hasattr(self, 'language_model'):
|
| 52 |
+
return getattr(
|
| 53 |
+
self.language_model,
|
| 54 |
+
'all_tied_weights_keys',
|
| 55 |
+
getattr(self.language_model, '_tied_weights_keys', []),
|
| 56 |
+
)
|
| 57 |
+
return getattr(self, '_tied_weights_keys', [])
|
| 58 |
+
|
| 59 |
+
def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
|
| 60 |
+
super().__init__(config)
|
| 61 |
+
|
| 62 |
+
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
|
| 63 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
| 64 |
+
patch_size = config.vision_config.patch_size
|
| 65 |
+
self.patch_size = patch_size
|
| 66 |
+
self.select_layer = config.select_layer
|
| 67 |
+
self.template = config.template
|
| 68 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 69 |
+
self.downsample_ratio = config.downsample_ratio
|
| 70 |
+
self.ps_version = config.ps_version
|
| 71 |
+
use_flash_attn = use_flash_attn if has_flash_attn else False
|
| 72 |
+
config.vision_config.use_flash_attn = True if use_flash_attn else False
|
| 73 |
+
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
|
| 74 |
+
|
| 75 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 76 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 77 |
+
if vision_model is not None:
|
| 78 |
+
self.vision_model = vision_model
|
| 79 |
+
else:
|
| 80 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
| 81 |
+
if language_model is not None:
|
| 82 |
+
self.language_model = language_model
|
| 83 |
+
else:
|
| 84 |
+
architecture: str = config.llm_config.architectures[0]
|
| 85 |
+
if architecture == 'LlamaForCausalLM':
|
| 86 |
+
self.language_model = LlamaForCausalLM(config.llm_config)
|
| 87 |
+
elif architecture == 'Qwen2ForCausalLM':
|
| 88 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
| 89 |
+
elif architecture == 'Qwen3MoeForCausalLM':
|
| 90 |
+
self.language_model = Qwen3MoeForCausalLM(config.llm_config)
|
| 91 |
+
elif architecture == 'Qwen3ForCausalLM':
|
| 92 |
+
self.language_model = Qwen3ForCausalLM(config.llm_config)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError(f'{architecture} is not implemented.')
|
| 95 |
+
|
| 96 |
+
vit_hidden_size = config.vision_config.hidden_size
|
| 97 |
+
llm_hidden_size = config.llm_config.hidden_size
|
| 98 |
+
|
| 99 |
+
self.mlp1 = nn.Sequential(
|
| 100 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
| 101 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
| 102 |
+
nn.GELU(),
|
| 103 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
self.img_context_token_id = None
|
| 107 |
+
self.conv_template = get_conv_template(self.template)
|
| 108 |
+
self.system_message = self.conv_template.system_message
|
| 109 |
+
|
| 110 |
+
def forward(
|
| 111 |
+
self,
|
| 112 |
+
pixel_values: torch.FloatTensor,
|
| 113 |
+
input_ids: torch.LongTensor = None,
|
| 114 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 115 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 116 |
+
image_flags: Optional[torch.LongTensor] = None,
|
| 117 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 118 |
+
labels: Optional[torch.LongTensor] = None,
|
| 119 |
+
use_cache: Optional[bool] = None,
|
| 120 |
+
output_attentions: Optional[bool] = None,
|
| 121 |
+
output_hidden_states: Optional[bool] = None,
|
| 122 |
+
return_dict: Optional[bool] = None,
|
| 123 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 124 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 125 |
+
|
| 126 |
+
image_flags = image_flags.squeeze(-1)
|
| 127 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
|
| 128 |
+
|
| 129 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 130 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
| 131 |
+
vit_batch_size = pixel_values.shape[0]
|
| 132 |
+
|
| 133 |
+
B, N, C = input_embeds.shape
|
| 134 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 135 |
+
|
| 136 |
+
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
|
| 137 |
+
# print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
| 138 |
+
|
| 139 |
+
input_ids = input_ids.reshape(B * N)
|
| 140 |
+
selected = (input_ids == self.img_context_token_id)
|
| 141 |
+
try:
|
| 142 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
| 143 |
+
except Exception as e:
|
| 144 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
| 145 |
+
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
|
| 146 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
| 147 |
+
n_token = min(selected.sum(), vit_embeds.size(0))
|
| 148 |
+
input_embeds[selected][:n_token] = input_embeds[selected][:n_token] * 0.0 + vit_embeds[:n_token]
|
| 149 |
+
|
| 150 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 151 |
+
|
| 152 |
+
outputs = self.language_model(
|
| 153 |
+
inputs_embeds=input_embeds,
|
| 154 |
+
attention_mask=attention_mask,
|
| 155 |
+
position_ids=position_ids,
|
| 156 |
+
past_key_values=past_key_values,
|
| 157 |
+
use_cache=use_cache,
|
| 158 |
+
output_attentions=output_attentions,
|
| 159 |
+
output_hidden_states=output_hidden_states,
|
| 160 |
+
return_dict=return_dict,
|
| 161 |
+
)
|
| 162 |
+
logits = outputs.logits
|
| 163 |
+
|
| 164 |
+
loss = None
|
| 165 |
+
if labels is not None:
|
| 166 |
+
# Shift so that tokens < n predict n
|
| 167 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 168 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 169 |
+
# Flatten the tokens
|
| 170 |
+
loss_fct = CrossEntropyLoss()
|
| 171 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
| 172 |
+
shift_labels = shift_labels.view(-1)
|
| 173 |
+
# Enable model parallelism
|
| 174 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 175 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 176 |
+
|
| 177 |
+
if not return_dict:
|
| 178 |
+
output = (logits,) + outputs[1:]
|
| 179 |
+
return (loss,) + output if loss is not None else output
|
| 180 |
+
|
| 181 |
+
return CausalLMOutputWithPast(
|
| 182 |
+
loss=loss,
|
| 183 |
+
logits=logits,
|
| 184 |
+
past_key_values=outputs.past_key_values,
|
| 185 |
+
hidden_states=outputs.hidden_states,
|
| 186 |
+
attentions=outputs.attentions,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 190 |
+
n, w, h, c = x.size()
|
| 191 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 192 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 193 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 194 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 195 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 196 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 197 |
+
int(c / (scale_factor * scale_factor)))
|
| 198 |
+
if self.ps_version == 'v1':
|
| 199 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
| 200 |
+
'which results in a transposed image.')
|
| 201 |
+
else:
|
| 202 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 203 |
+
return x
|
| 204 |
+
|
| 205 |
+
def extract_feature(self, pixel_values):
|
| 206 |
+
if self.select_layer == -1:
|
| 207 |
+
vit_embeds = self.vision_model(
|
| 208 |
+
pixel_values=pixel_values,
|
| 209 |
+
output_hidden_states=False,
|
| 210 |
+
return_dict=True).last_hidden_state
|
| 211 |
+
else:
|
| 212 |
+
vit_embeds = self.vision_model(
|
| 213 |
+
pixel_values=pixel_values,
|
| 214 |
+
output_hidden_states=True,
|
| 215 |
+
return_dict=True).hidden_states[self.select_layer]
|
| 216 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
| 217 |
+
|
| 218 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
| 219 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
| 220 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
| 221 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
| 222 |
+
vit_embeds = self.mlp1(vit_embeds)
|
| 223 |
+
return vit_embeds
|
| 224 |
+
|
| 225 |
+
def batch_chat(self, tokenizer, pixel_values, questions, generation_config=None, num_patches_list=None,
|
| 226 |
+
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
|
| 227 |
+
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
|
| 228 |
+
if history is not None or return_history:
|
| 229 |
+
print('Now multi-turn chat is not supported in batch_chat.')
|
| 230 |
+
raise NotImplementedError
|
| 231 |
+
|
| 232 |
+
if image_counts is not None:
|
| 233 |
+
num_patches_list = image_counts
|
| 234 |
+
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
|
| 235 |
+
|
| 236 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 237 |
+
self.img_context_token_id = img_context_token_id
|
| 238 |
+
|
| 239 |
+
if verbose and pixel_values is not None:
|
| 240 |
+
image_bs = pixel_values.shape[0]
|
| 241 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 242 |
+
|
| 243 |
+
queries = []
|
| 244 |
+
for idx, num_patches in enumerate(num_patches_list):
|
| 245 |
+
question = questions[idx]
|
| 246 |
+
if pixel_values is not None and '<image>' not in question:
|
| 247 |
+
question = '<image>\n' + question
|
| 248 |
+
template = get_conv_template(self.template)
|
| 249 |
+
template.system_message = self.system_message
|
| 250 |
+
template.append_message(template.roles[0], question)
|
| 251 |
+
template.append_message(template.roles[1], None)
|
| 252 |
+
query = template.get_prompt()
|
| 253 |
+
|
| 254 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 255 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 256 |
+
queries.append(query)
|
| 257 |
+
|
| 258 |
+
tokenizer.padding_side = 'left'
|
| 259 |
+
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
| 260 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 261 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 262 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 263 |
+
if generation_config is None:
|
| 264 |
+
generation_config = {}
|
| 265 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 266 |
+
generation_output = self.generate(
|
| 267 |
+
pixel_values=pixel_values,
|
| 268 |
+
input_ids=input_ids,
|
| 269 |
+
attention_mask=attention_mask,
|
| 270 |
+
**generation_config
|
| 271 |
+
)
|
| 272 |
+
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=False)
|
| 273 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
| 274 |
+
return responses
|
| 275 |
+
|
| 276 |
+
def chat(self, tokenizer, pixel_values, question, generation_config=None, history=None, return_history=False,
|
| 277 |
+
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
|
| 278 |
+
verbose=False):
|
| 279 |
+
|
| 280 |
+
if history is None and pixel_values is not None and '<image>' not in question:
|
| 281 |
+
question = '<image>\n' + question
|
| 282 |
+
|
| 283 |
+
if num_patches_list is None:
|
| 284 |
+
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
|
| 285 |
+
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
|
| 286 |
+
|
| 287 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 288 |
+
self.img_context_token_id = img_context_token_id
|
| 289 |
+
|
| 290 |
+
template = get_conv_template(self.template)
|
| 291 |
+
template.system_message = self.system_message
|
| 292 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
| 293 |
+
|
| 294 |
+
history = [] if history is None else history
|
| 295 |
+
for (old_question, old_answer) in history:
|
| 296 |
+
template.append_message(template.roles[0], old_question)
|
| 297 |
+
template.append_message(template.roles[1], old_answer)
|
| 298 |
+
template.append_message(template.roles[0], question)
|
| 299 |
+
template.append_message(template.roles[1], None)
|
| 300 |
+
query = template.get_prompt()
|
| 301 |
+
|
| 302 |
+
if verbose and pixel_values is not None:
|
| 303 |
+
image_bs = pixel_values.shape[0]
|
| 304 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 305 |
+
|
| 306 |
+
for num_patches in num_patches_list:
|
| 307 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 308 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 309 |
+
|
| 310 |
+
model_inputs = tokenizer(query, return_tensors='pt')
|
| 311 |
+
input_ids = model_inputs['input_ids'].to(self.device)
|
| 312 |
+
attention_mask = model_inputs['attention_mask'].to(self.device)
|
| 313 |
+
if generation_config is None:
|
| 314 |
+
generation_config = {}
|
| 315 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 316 |
+
generation_output = self.generate(
|
| 317 |
+
pixel_values=pixel_values,
|
| 318 |
+
input_ids=input_ids,
|
| 319 |
+
attention_mask=attention_mask,
|
| 320 |
+
**generation_config
|
| 321 |
+
)
|
| 322 |
+
response = tokenizer.batch_decode(generation_output, skip_special_tokens=False)[0]
|
| 323 |
+
response = response.split(template.sep.strip())[0].strip()
|
| 324 |
+
history.append((question, response))
|
| 325 |
+
if return_history:
|
| 326 |
+
return response, history
|
| 327 |
+
else:
|
| 328 |
+
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
|
| 329 |
+
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
|
| 330 |
+
if verbose:
|
| 331 |
+
print(query_to_print, response)
|
| 332 |
+
return response
|
| 333 |
+
|
| 334 |
+
@torch.no_grad()
|
| 335 |
+
def generate(
|
| 336 |
+
self,
|
| 337 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 338 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 339 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 340 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
| 341 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 342 |
+
output_hidden_states: Optional[bool] = None,
|
| 343 |
+
**generate_kwargs,
|
| 344 |
+
) -> torch.LongTensor:
|
| 345 |
+
|
| 346 |
+
assert self.img_context_token_id is not None
|
| 347 |
+
if pixel_values is not None:
|
| 348 |
+
if visual_features is not None:
|
| 349 |
+
vit_embeds = visual_features
|
| 350 |
+
else:
|
| 351 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 352 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 353 |
+
B, N, C = input_embeds.shape
|
| 354 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 355 |
+
|
| 356 |
+
input_ids = input_ids.reshape(B * N)
|
| 357 |
+
selected = (input_ids == self.img_context_token_id)
|
| 358 |
+
assert selected.sum() != 0
|
| 359 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
| 360 |
+
|
| 361 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 362 |
+
else:
|
| 363 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 364 |
+
|
| 365 |
+
outputs = self.language_model.generate(
|
| 366 |
+
inputs_embeds=input_embeds,
|
| 367 |
+
attention_mask=attention_mask,
|
| 368 |
+
generation_config=generation_config,
|
| 369 |
+
output_hidden_states=output_hidden_states,
|
| 370 |
+
use_cache=True,
|
| 371 |
+
**generate_kwargs,
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
return outputs
|
| 375 |
+
|
| 376 |
+
@property
|
| 377 |
+
def lm_head(self):
|
| 378 |
+
return self.language_model.get_output_embeddings()
|
| 379 |
+
|
| 380 |
+
def get_output_embeddings(self):
|
| 381 |
+
return self.language_model.get_output_embeddings()
|
| 382 |
+
|
| 383 |
+
def get_input_embeddings(self):
|
| 384 |
+
return self.language_model.get_input_embeddings()
|
| 385 |
+
|
| 386 |
+
def set_input_embeddings(self, value):
|
| 387 |
+
return self.language_model.set_input_embeddings(value)
|
| 388 |
+
|
| 389 |
+
def set_output_embeddings(self, value):
|
| 390 |
+
return self.language_model.set_output_embeddings(value)
|
blobs/c47c173a6ee6ba9cdc52eafd51b7e6d679293b38
ADDED
|
@@ -0,0 +1,1047 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</box>": 151677,
|
| 3 |
+
"</brief>": 152679,
|
| 4 |
+
"</img>": 151670,
|
| 5 |
+
"</label>": 152681,
|
| 6 |
+
"</layout>": 152687,
|
| 7 |
+
"</quad>": 151673,
|
| 8 |
+
"</ref>": 151675,
|
| 9 |
+
"</text>": 152683,
|
| 10 |
+
"</text_list>": 152685,
|
| 11 |
+
"</think>": 151668,
|
| 12 |
+
"</tool_call>": 151658,
|
| 13 |
+
"</tool_response>": 151666,
|
| 14 |
+
"<COORD_000>": 151678,
|
| 15 |
+
"<COORD_001>": 151679,
|
| 16 |
+
"<COORD_002>": 151680,
|
| 17 |
+
"<COORD_003>": 151681,
|
| 18 |
+
"<COORD_004>": 151682,
|
| 19 |
+
"<COORD_005>": 151683,
|
| 20 |
+
"<COORD_006>": 151684,
|
| 21 |
+
"<COORD_007>": 151685,
|
| 22 |
+
"<COORD_008>": 151686,
|
| 23 |
+
"<COORD_009>": 151687,
|
| 24 |
+
"<COORD_010>": 151688,
|
| 25 |
+
"<COORD_011>": 151689,
|
| 26 |
+
"<COORD_012>": 151690,
|
| 27 |
+
"<COORD_013>": 151691,
|
| 28 |
+
"<COORD_014>": 151692,
|
| 29 |
+
"<COORD_015>": 151693,
|
| 30 |
+
"<COORD_016>": 151694,
|
| 31 |
+
"<COORD_017>": 151695,
|
| 32 |
+
"<COORD_018>": 151696,
|
| 33 |
+
"<COORD_019>": 151697,
|
| 34 |
+
"<COORD_020>": 151698,
|
| 35 |
+
"<COORD_021>": 151699,
|
| 36 |
+
"<COORD_022>": 151700,
|
| 37 |
+
"<COORD_023>": 151701,
|
| 38 |
+
"<COORD_024>": 151702,
|
| 39 |
+
"<COORD_025>": 151703,
|
| 40 |
+
"<COORD_026>": 151704,
|
| 41 |
+
"<COORD_027>": 151705,
|
| 42 |
+
"<COORD_028>": 151706,
|
| 43 |
+
"<COORD_029>": 151707,
|
| 44 |
+
"<COORD_030>": 151708,
|
| 45 |
+
"<COORD_031>": 151709,
|
| 46 |
+
"<COORD_032>": 151710,
|
| 47 |
+
"<COORD_033>": 151711,
|
| 48 |
+
"<COORD_034>": 151712,
|
| 49 |
+
"<COORD_035>": 151713,
|
| 50 |
+
"<COORD_036>": 151714,
|
| 51 |
+
"<COORD_037>": 151715,
|
| 52 |
+
"<COORD_038>": 151716,
|
| 53 |
+
"<COORD_039>": 151717,
|
| 54 |
+
"<COORD_040>": 151718,
|
| 55 |
+
"<COORD_041>": 151719,
|
| 56 |
+
"<COORD_042>": 151720,
|
| 57 |
+
"<COORD_043>": 151721,
|
| 58 |
+
"<COORD_044>": 151722,
|
| 59 |
+
"<COORD_045>": 151723,
|
| 60 |
+
"<COORD_046>": 151724,
|
| 61 |
+
"<COORD_047>": 151725,
|
| 62 |
+
"<COORD_048>": 151726,
|
| 63 |
+
"<COORD_049>": 151727,
|
| 64 |
+
"<COORD_050>": 151728,
|
| 65 |
+
"<COORD_051>": 151729,
|
| 66 |
+
"<COORD_052>": 151730,
|
| 67 |
+
"<COORD_053>": 151731,
|
| 68 |
+
"<COORD_054>": 151732,
|
| 69 |
+
"<COORD_055>": 151733,
|
| 70 |
+
"<COORD_056>": 151734,
|
| 71 |
+
"<COORD_057>": 151735,
|
| 72 |
+
"<COORD_058>": 151736,
|
| 73 |
+
"<COORD_059>": 151737,
|
| 74 |
+
"<COORD_060>": 151738,
|
| 75 |
+
"<COORD_061>": 151739,
|
| 76 |
+
"<COORD_062>": 151740,
|
| 77 |
+
"<COORD_063>": 151741,
|
| 78 |
+
"<COORD_064>": 151742,
|
| 79 |
+
"<COORD_065>": 151743,
|
| 80 |
+
"<COORD_066>": 151744,
|
| 81 |
+
"<COORD_067>": 151745,
|
| 82 |
+
"<COORD_068>": 151746,
|
| 83 |
+
"<COORD_069>": 151747,
|
| 84 |
+
"<COORD_070>": 151748,
|
| 85 |
+
"<COORD_071>": 151749,
|
| 86 |
+
"<COORD_072>": 151750,
|
| 87 |
+
"<COORD_073>": 151751,
|
| 88 |
+
"<COORD_074>": 151752,
|
| 89 |
+
"<COORD_075>": 151753,
|
| 90 |
+
"<COORD_076>": 151754,
|
| 91 |
+
"<COORD_077>": 151755,
|
| 92 |
+
"<COORD_078>": 151756,
|
| 93 |
+
"<COORD_079>": 151757,
|
| 94 |
+
"<COORD_080>": 151758,
|
| 95 |
+
"<COORD_081>": 151759,
|
| 96 |
+
"<COORD_082>": 151760,
|
| 97 |
+
"<COORD_083>": 151761,
|
| 98 |
+
"<COORD_084>": 151762,
|
| 99 |
+
"<COORD_085>": 151763,
|
| 100 |
+
"<COORD_086>": 151764,
|
| 101 |
+
"<COORD_087>": 151765,
|
| 102 |
+
"<COORD_088>": 151766,
|
| 103 |
+
"<COORD_089>": 151767,
|
| 104 |
+
"<COORD_090>": 151768,
|
| 105 |
+
"<COORD_091>": 151769,
|
| 106 |
+
"<COORD_092>": 151770,
|
| 107 |
+
"<COORD_093>": 151771,
|
| 108 |
+
"<COORD_094>": 151772,
|
| 109 |
+
"<COORD_095>": 151773,
|
| 110 |
+
"<COORD_096>": 151774,
|
| 111 |
+
"<COORD_097>": 151775,
|
| 112 |
+
"<COORD_098>": 151776,
|
| 113 |
+
"<COORD_099>": 151777,
|
| 114 |
+
"<COORD_100>": 151778,
|
| 115 |
+
"<COORD_101>": 151779,
|
| 116 |
+
"<COORD_102>": 151780,
|
| 117 |
+
"<COORD_103>": 151781,
|
| 118 |
+
"<COORD_104>": 151782,
|
| 119 |
+
"<COORD_105>": 151783,
|
| 120 |
+
"<COORD_106>": 151784,
|
| 121 |
+
"<COORD_107>": 151785,
|
| 122 |
+
"<COORD_108>": 151786,
|
| 123 |
+
"<COORD_109>": 151787,
|
| 124 |
+
"<COORD_110>": 151788,
|
| 125 |
+
"<COORD_111>": 151789,
|
| 126 |
+
"<COORD_112>": 151790,
|
| 127 |
+
"<COORD_113>": 151791,
|
| 128 |
+
"<COORD_114>": 151792,
|
| 129 |
+
"<COORD_115>": 151793,
|
| 130 |
+
"<COORD_116>": 151794,
|
| 131 |
+
"<COORD_117>": 151795,
|
| 132 |
+
"<COORD_118>": 151796,
|
| 133 |
+
"<COORD_119>": 151797,
|
| 134 |
+
"<COORD_120>": 151798,
|
| 135 |
+
"<COORD_121>": 151799,
|
| 136 |
+
"<COORD_122>": 151800,
|
| 137 |
+
"<COORD_123>": 151801,
|
| 138 |
+
"<COORD_124>": 151802,
|
| 139 |
+
"<COORD_125>": 151803,
|
| 140 |
+
"<COORD_126>": 151804,
|
| 141 |
+
"<COORD_127>": 151805,
|
| 142 |
+
"<COORD_128>": 151806,
|
| 143 |
+
"<COORD_129>": 151807,
|
| 144 |
+
"<COORD_130>": 151808,
|
| 145 |
+
"<COORD_131>": 151809,
|
| 146 |
+
"<COORD_132>": 151810,
|
| 147 |
+
"<COORD_133>": 151811,
|
| 148 |
+
"<COORD_134>": 151812,
|
| 149 |
+
"<COORD_135>": 151813,
|
| 150 |
+
"<COORD_136>": 151814,
|
| 151 |
+
"<COORD_137>": 151815,
|
| 152 |
+
"<COORD_138>": 151816,
|
| 153 |
+
"<COORD_139>": 151817,
|
| 154 |
+
"<COORD_140>": 151818,
|
| 155 |
+
"<COORD_141>": 151819,
|
| 156 |
+
"<COORD_142>": 151820,
|
| 157 |
+
"<COORD_143>": 151821,
|
| 158 |
+
"<COORD_144>": 151822,
|
| 159 |
+
"<COORD_145>": 151823,
|
| 160 |
+
"<COORD_146>": 151824,
|
| 161 |
+
"<COORD_147>": 151825,
|
| 162 |
+
"<COORD_148>": 151826,
|
| 163 |
+
"<COORD_149>": 151827,
|
| 164 |
+
"<COORD_150>": 151828,
|
| 165 |
+
"<COORD_151>": 151829,
|
| 166 |
+
"<COORD_152>": 151830,
|
| 167 |
+
"<COORD_153>": 151831,
|
| 168 |
+
"<COORD_154>": 151832,
|
| 169 |
+
"<COORD_155>": 151833,
|
| 170 |
+
"<COORD_156>": 151834,
|
| 171 |
+
"<COORD_157>": 151835,
|
| 172 |
+
"<COORD_158>": 151836,
|
| 173 |
+
"<COORD_159>": 151837,
|
| 174 |
+
"<COORD_160>": 151838,
|
| 175 |
+
"<COORD_161>": 151839,
|
| 176 |
+
"<COORD_162>": 151840,
|
| 177 |
+
"<COORD_163>": 151841,
|
| 178 |
+
"<COORD_164>": 151842,
|
| 179 |
+
"<COORD_165>": 151843,
|
| 180 |
+
"<COORD_166>": 151844,
|
| 181 |
+
"<COORD_167>": 151845,
|
| 182 |
+
"<COORD_168>": 151846,
|
| 183 |
+
"<COORD_169>": 151847,
|
| 184 |
+
"<COORD_170>": 151848,
|
| 185 |
+
"<COORD_171>": 151849,
|
| 186 |
+
"<COORD_172>": 151850,
|
| 187 |
+
"<COORD_173>": 151851,
|
| 188 |
+
"<COORD_174>": 151852,
|
| 189 |
+
"<COORD_175>": 151853,
|
| 190 |
+
"<COORD_176>": 151854,
|
| 191 |
+
"<COORD_177>": 151855,
|
| 192 |
+
"<COORD_178>": 151856,
|
| 193 |
+
"<COORD_179>": 151857,
|
| 194 |
+
"<COORD_180>": 151858,
|
| 195 |
+
"<COORD_181>": 151859,
|
| 196 |
+
"<COORD_182>": 151860,
|
| 197 |
+
"<COORD_183>": 151861,
|
| 198 |
+
"<COORD_184>": 151862,
|
| 199 |
+
"<COORD_185>": 151863,
|
| 200 |
+
"<COORD_186>": 151864,
|
| 201 |
+
"<COORD_187>": 151865,
|
| 202 |
+
"<COORD_188>": 151866,
|
| 203 |
+
"<COORD_189>": 151867,
|
| 204 |
+
"<COORD_190>": 151868,
|
| 205 |
+
"<COORD_191>": 151869,
|
| 206 |
+
"<COORD_192>": 151870,
|
| 207 |
+
"<COORD_193>": 151871,
|
| 208 |
+
"<COORD_194>": 151872,
|
| 209 |
+
"<COORD_195>": 151873,
|
| 210 |
+
"<COORD_196>": 151874,
|
| 211 |
+
"<COORD_197>": 151875,
|
| 212 |
+
"<COORD_198>": 151876,
|
| 213 |
+
"<COORD_199>": 151877,
|
| 214 |
+
"<COORD_200>": 151878,
|
| 215 |
+
"<COORD_201>": 151879,
|
| 216 |
+
"<COORD_202>": 151880,
|
| 217 |
+
"<COORD_203>": 151881,
|
| 218 |
+
"<COORD_204>": 151882,
|
| 219 |
+
"<COORD_205>": 151883,
|
| 220 |
+
"<COORD_206>": 151884,
|
| 221 |
+
"<COORD_207>": 151885,
|
| 222 |
+
"<COORD_208>": 151886,
|
| 223 |
+
"<COORD_209>": 151887,
|
| 224 |
+
"<COORD_210>": 151888,
|
| 225 |
+
"<COORD_211>": 151889,
|
| 226 |
+
"<COORD_212>": 151890,
|
| 227 |
+
"<COORD_213>": 151891,
|
| 228 |
+
"<COORD_214>": 151892,
|
| 229 |
+
"<COORD_215>": 151893,
|
| 230 |
+
"<COORD_216>": 151894,
|
| 231 |
+
"<COORD_217>": 151895,
|
| 232 |
+
"<COORD_218>": 151896,
|
| 233 |
+
"<COORD_219>": 151897,
|
| 234 |
+
"<COORD_220>": 151898,
|
| 235 |
+
"<COORD_221>": 151899,
|
| 236 |
+
"<COORD_222>": 151900,
|
| 237 |
+
"<COORD_223>": 151901,
|
| 238 |
+
"<COORD_224>": 151902,
|
| 239 |
+
"<COORD_225>": 151903,
|
| 240 |
+
"<COORD_226>": 151904,
|
| 241 |
+
"<COORD_227>": 151905,
|
| 242 |
+
"<COORD_228>": 151906,
|
| 243 |
+
"<COORD_229>": 151907,
|
| 244 |
+
"<COORD_230>": 151908,
|
| 245 |
+
"<COORD_231>": 151909,
|
| 246 |
+
"<COORD_232>": 151910,
|
| 247 |
+
"<COORD_233>": 151911,
|
| 248 |
+
"<COORD_234>": 151912,
|
| 249 |
+
"<COORD_235>": 151913,
|
| 250 |
+
"<COORD_236>": 151914,
|
| 251 |
+
"<COORD_237>": 151915,
|
| 252 |
+
"<COORD_238>": 151916,
|
| 253 |
+
"<COORD_239>": 151917,
|
| 254 |
+
"<COORD_240>": 151918,
|
| 255 |
+
"<COORD_241>": 151919,
|
| 256 |
+
"<COORD_242>": 151920,
|
| 257 |
+
"<COORD_243>": 151921,
|
| 258 |
+
"<COORD_244>": 151922,
|
| 259 |
+
"<COORD_245>": 151923,
|
| 260 |
+
"<COORD_246>": 151924,
|
| 261 |
+
"<COORD_247>": 151925,
|
| 262 |
+
"<COORD_248>": 151926,
|
| 263 |
+
"<COORD_249>": 151927,
|
| 264 |
+
"<COORD_250>": 151928,
|
| 265 |
+
"<COORD_251>": 151929,
|
| 266 |
+
"<COORD_252>": 151930,
|
| 267 |
+
"<COORD_253>": 151931,
|
| 268 |
+
"<COORD_254>": 151932,
|
| 269 |
+
"<COORD_255>": 151933,
|
| 270 |
+
"<COORD_256>": 151934,
|
| 271 |
+
"<COORD_257>": 151935,
|
| 272 |
+
"<COORD_258>": 151936,
|
| 273 |
+
"<COORD_259>": 151937,
|
| 274 |
+
"<COORD_260>": 151938,
|
| 275 |
+
"<COORD_261>": 151939,
|
| 276 |
+
"<COORD_262>": 151940,
|
| 277 |
+
"<COORD_263>": 151941,
|
| 278 |
+
"<COORD_264>": 151942,
|
| 279 |
+
"<COORD_265>": 151943,
|
| 280 |
+
"<COORD_266>": 151944,
|
| 281 |
+
"<COORD_267>": 151945,
|
| 282 |
+
"<COORD_268>": 151946,
|
| 283 |
+
"<COORD_269>": 151947,
|
| 284 |
+
"<COORD_270>": 151948,
|
| 285 |
+
"<COORD_271>": 151949,
|
| 286 |
+
"<COORD_272>": 151950,
|
| 287 |
+
"<COORD_273>": 151951,
|
| 288 |
+
"<COORD_274>": 151952,
|
| 289 |
+
"<COORD_275>": 151953,
|
| 290 |
+
"<COORD_276>": 151954,
|
| 291 |
+
"<COORD_277>": 151955,
|
| 292 |
+
"<COORD_278>": 151956,
|
| 293 |
+
"<COORD_279>": 151957,
|
| 294 |
+
"<COORD_280>": 151958,
|
| 295 |
+
"<COORD_281>": 151959,
|
| 296 |
+
"<COORD_282>": 151960,
|
| 297 |
+
"<COORD_283>": 151961,
|
| 298 |
+
"<COORD_284>": 151962,
|
| 299 |
+
"<COORD_285>": 151963,
|
| 300 |
+
"<COORD_286>": 151964,
|
| 301 |
+
"<COORD_287>": 151965,
|
| 302 |
+
"<COORD_288>": 151966,
|
| 303 |
+
"<COORD_289>": 151967,
|
| 304 |
+
"<COORD_290>": 151968,
|
| 305 |
+
"<COORD_291>": 151969,
|
| 306 |
+
"<COORD_292>": 151970,
|
| 307 |
+
"<COORD_293>": 151971,
|
| 308 |
+
"<COORD_294>": 151972,
|
| 309 |
+
"<COORD_295>": 151973,
|
| 310 |
+
"<COORD_296>": 151974,
|
| 311 |
+
"<COORD_297>": 151975,
|
| 312 |
+
"<COORD_298>": 151976,
|
| 313 |
+
"<COORD_299>": 151977,
|
| 314 |
+
"<COORD_300>": 151978,
|
| 315 |
+
"<COORD_301>": 151979,
|
| 316 |
+
"<COORD_302>": 151980,
|
| 317 |
+
"<COORD_303>": 151981,
|
| 318 |
+
"<COORD_304>": 151982,
|
| 319 |
+
"<COORD_305>": 151983,
|
| 320 |
+
"<COORD_306>": 151984,
|
| 321 |
+
"<COORD_307>": 151985,
|
| 322 |
+
"<COORD_308>": 151986,
|
| 323 |
+
"<COORD_309>": 151987,
|
| 324 |
+
"<COORD_310>": 151988,
|
| 325 |
+
"<COORD_311>": 151989,
|
| 326 |
+
"<COORD_312>": 151990,
|
| 327 |
+
"<COORD_313>": 151991,
|
| 328 |
+
"<COORD_314>": 151992,
|
| 329 |
+
"<COORD_315>": 151993,
|
| 330 |
+
"<COORD_316>": 151994,
|
| 331 |
+
"<COORD_317>": 151995,
|
| 332 |
+
"<COORD_318>": 151996,
|
| 333 |
+
"<COORD_319>": 151997,
|
| 334 |
+
"<COORD_320>": 151998,
|
| 335 |
+
"<COORD_321>": 151999,
|
| 336 |
+
"<COORD_322>": 152000,
|
| 337 |
+
"<COORD_323>": 152001,
|
| 338 |
+
"<COORD_324>": 152002,
|
| 339 |
+
"<COORD_325>": 152003,
|
| 340 |
+
"<COORD_326>": 152004,
|
| 341 |
+
"<COORD_327>": 152005,
|
| 342 |
+
"<COORD_328>": 152006,
|
| 343 |
+
"<COORD_329>": 152007,
|
| 344 |
+
"<COORD_330>": 152008,
|
| 345 |
+
"<COORD_331>": 152009,
|
| 346 |
+
"<COORD_332>": 152010,
|
| 347 |
+
"<COORD_333>": 152011,
|
| 348 |
+
"<COORD_334>": 152012,
|
| 349 |
+
"<COORD_335>": 152013,
|
| 350 |
+
"<COORD_336>": 152014,
|
| 351 |
+
"<COORD_337>": 152015,
|
| 352 |
+
"<COORD_338>": 152016,
|
| 353 |
+
"<COORD_339>": 152017,
|
| 354 |
+
"<COORD_340>": 152018,
|
| 355 |
+
"<COORD_341>": 152019,
|
| 356 |
+
"<COORD_342>": 152020,
|
| 357 |
+
"<COORD_343>": 152021,
|
| 358 |
+
"<COORD_344>": 152022,
|
| 359 |
+
"<COORD_345>": 152023,
|
| 360 |
+
"<COORD_346>": 152024,
|
| 361 |
+
"<COORD_347>": 152025,
|
| 362 |
+
"<COORD_348>": 152026,
|
| 363 |
+
"<COORD_349>": 152027,
|
| 364 |
+
"<COORD_350>": 152028,
|
| 365 |
+
"<COORD_351>": 152029,
|
| 366 |
+
"<COORD_352>": 152030,
|
| 367 |
+
"<COORD_353>": 152031,
|
| 368 |
+
"<COORD_354>": 152032,
|
| 369 |
+
"<COORD_355>": 152033,
|
| 370 |
+
"<COORD_356>": 152034,
|
| 371 |
+
"<COORD_357>": 152035,
|
| 372 |
+
"<COORD_358>": 152036,
|
| 373 |
+
"<COORD_359>": 152037,
|
| 374 |
+
"<COORD_360>": 152038,
|
| 375 |
+
"<COORD_361>": 152039,
|
| 376 |
+
"<COORD_362>": 152040,
|
| 377 |
+
"<COORD_363>": 152041,
|
| 378 |
+
"<COORD_364>": 152042,
|
| 379 |
+
"<COORD_365>": 152043,
|
| 380 |
+
"<COORD_366>": 152044,
|
| 381 |
+
"<COORD_367>": 152045,
|
| 382 |
+
"<COORD_368>": 152046,
|
| 383 |
+
"<COORD_369>": 152047,
|
| 384 |
+
"<COORD_370>": 152048,
|
| 385 |
+
"<COORD_371>": 152049,
|
| 386 |
+
"<COORD_372>": 152050,
|
| 387 |
+
"<COORD_373>": 152051,
|
| 388 |
+
"<COORD_374>": 152052,
|
| 389 |
+
"<COORD_375>": 152053,
|
| 390 |
+
"<COORD_376>": 152054,
|
| 391 |
+
"<COORD_377>": 152055,
|
| 392 |
+
"<COORD_378>": 152056,
|
| 393 |
+
"<COORD_379>": 152057,
|
| 394 |
+
"<COORD_380>": 152058,
|
| 395 |
+
"<COORD_381>": 152059,
|
| 396 |
+
"<COORD_382>": 152060,
|
| 397 |
+
"<COORD_383>": 152061,
|
| 398 |
+
"<COORD_384>": 152062,
|
| 399 |
+
"<COORD_385>": 152063,
|
| 400 |
+
"<COORD_386>": 152064,
|
| 401 |
+
"<COORD_387>": 152065,
|
| 402 |
+
"<COORD_388>": 152066,
|
| 403 |
+
"<COORD_389>": 152067,
|
| 404 |
+
"<COORD_390>": 152068,
|
| 405 |
+
"<COORD_391>": 152069,
|
| 406 |
+
"<COORD_392>": 152070,
|
| 407 |
+
"<COORD_393>": 152071,
|
| 408 |
+
"<COORD_394>": 152072,
|
| 409 |
+
"<COORD_395>": 152073,
|
| 410 |
+
"<COORD_396>": 152074,
|
| 411 |
+
"<COORD_397>": 152075,
|
| 412 |
+
"<COORD_398>": 152076,
|
| 413 |
+
"<COORD_399>": 152077,
|
| 414 |
+
"<COORD_400>": 152078,
|
| 415 |
+
"<COORD_401>": 152079,
|
| 416 |
+
"<COORD_402>": 152080,
|
| 417 |
+
"<COORD_403>": 152081,
|
| 418 |
+
"<COORD_404>": 152082,
|
| 419 |
+
"<COORD_405>": 152083,
|
| 420 |
+
"<COORD_406>": 152084,
|
| 421 |
+
"<COORD_407>": 152085,
|
| 422 |
+
"<COORD_408>": 152086,
|
| 423 |
+
"<COORD_409>": 152087,
|
| 424 |
+
"<COORD_410>": 152088,
|
| 425 |
+
"<COORD_411>": 152089,
|
| 426 |
+
"<COORD_412>": 152090,
|
| 427 |
+
"<COORD_413>": 152091,
|
| 428 |
+
"<COORD_414>": 152092,
|
| 429 |
+
"<COORD_415>": 152093,
|
| 430 |
+
"<COORD_416>": 152094,
|
| 431 |
+
"<COORD_417>": 152095,
|
| 432 |
+
"<COORD_418>": 152096,
|
| 433 |
+
"<COORD_419>": 152097,
|
| 434 |
+
"<COORD_420>": 152098,
|
| 435 |
+
"<COORD_421>": 152099,
|
| 436 |
+
"<COORD_422>": 152100,
|
| 437 |
+
"<COORD_423>": 152101,
|
| 438 |
+
"<COORD_424>": 152102,
|
| 439 |
+
"<COORD_425>": 152103,
|
| 440 |
+
"<COORD_426>": 152104,
|
| 441 |
+
"<COORD_427>": 152105,
|
| 442 |
+
"<COORD_428>": 152106,
|
| 443 |
+
"<COORD_429>": 152107,
|
| 444 |
+
"<COORD_430>": 152108,
|
| 445 |
+
"<COORD_431>": 152109,
|
| 446 |
+
"<COORD_432>": 152110,
|
| 447 |
+
"<COORD_433>": 152111,
|
| 448 |
+
"<COORD_434>": 152112,
|
| 449 |
+
"<COORD_435>": 152113,
|
| 450 |
+
"<COORD_436>": 152114,
|
| 451 |
+
"<COORD_437>": 152115,
|
| 452 |
+
"<COORD_438>": 152116,
|
| 453 |
+
"<COORD_439>": 152117,
|
| 454 |
+
"<COORD_440>": 152118,
|
| 455 |
+
"<COORD_441>": 152119,
|
| 456 |
+
"<COORD_442>": 152120,
|
| 457 |
+
"<COORD_443>": 152121,
|
| 458 |
+
"<COORD_444>": 152122,
|
| 459 |
+
"<COORD_445>": 152123,
|
| 460 |
+
"<COORD_446>": 152124,
|
| 461 |
+
"<COORD_447>": 152125,
|
| 462 |
+
"<COORD_448>": 152126,
|
| 463 |
+
"<COORD_449>": 152127,
|
| 464 |
+
"<COORD_450>": 152128,
|
| 465 |
+
"<COORD_451>": 152129,
|
| 466 |
+
"<COORD_452>": 152130,
|
| 467 |
+
"<COORD_453>": 152131,
|
| 468 |
+
"<COORD_454>": 152132,
|
| 469 |
+
"<COORD_455>": 152133,
|
| 470 |
+
"<COORD_456>": 152134,
|
| 471 |
+
"<COORD_457>": 152135,
|
| 472 |
+
"<COORD_458>": 152136,
|
| 473 |
+
"<COORD_459>": 152137,
|
| 474 |
+
"<COORD_460>": 152138,
|
| 475 |
+
"<COORD_461>": 152139,
|
| 476 |
+
"<COORD_462>": 152140,
|
| 477 |
+
"<COORD_463>": 152141,
|
| 478 |
+
"<COORD_464>": 152142,
|
| 479 |
+
"<COORD_465>": 152143,
|
| 480 |
+
"<COORD_466>": 152144,
|
| 481 |
+
"<COORD_467>": 152145,
|
| 482 |
+
"<COORD_468>": 152146,
|
| 483 |
+
"<COORD_469>": 152147,
|
| 484 |
+
"<COORD_470>": 152148,
|
| 485 |
+
"<COORD_471>": 152149,
|
| 486 |
+
"<COORD_472>": 152150,
|
| 487 |
+
"<COORD_473>": 152151,
|
| 488 |
+
"<COORD_474>": 152152,
|
| 489 |
+
"<COORD_475>": 152153,
|
| 490 |
+
"<COORD_476>": 152154,
|
| 491 |
+
"<COORD_477>": 152155,
|
| 492 |
+
"<COORD_478>": 152156,
|
| 493 |
+
"<COORD_479>": 152157,
|
| 494 |
+
"<COORD_480>": 152158,
|
| 495 |
+
"<COORD_481>": 152159,
|
| 496 |
+
"<COORD_482>": 152160,
|
| 497 |
+
"<COORD_483>": 152161,
|
| 498 |
+
"<COORD_484>": 152162,
|
| 499 |
+
"<COORD_485>": 152163,
|
| 500 |
+
"<COORD_486>": 152164,
|
| 501 |
+
"<COORD_487>": 152165,
|
| 502 |
+
"<COORD_488>": 152166,
|
| 503 |
+
"<COORD_489>": 152167,
|
| 504 |
+
"<COORD_490>": 152168,
|
| 505 |
+
"<COORD_491>": 152169,
|
| 506 |
+
"<COORD_492>": 152170,
|
| 507 |
+
"<COORD_493>": 152171,
|
| 508 |
+
"<COORD_494>": 152172,
|
| 509 |
+
"<COORD_495>": 152173,
|
| 510 |
+
"<COORD_496>": 152174,
|
| 511 |
+
"<COORD_497>": 152175,
|
| 512 |
+
"<COORD_498>": 152176,
|
| 513 |
+
"<COORD_499>": 152177,
|
| 514 |
+
"<COORD_500>": 152178,
|
| 515 |
+
"<COORD_501>": 152179,
|
| 516 |
+
"<COORD_502>": 152180,
|
| 517 |
+
"<COORD_503>": 152181,
|
| 518 |
+
"<COORD_504>": 152182,
|
| 519 |
+
"<COORD_505>": 152183,
|
| 520 |
+
"<COORD_506>": 152184,
|
| 521 |
+
"<COORD_507>": 152185,
|
| 522 |
+
"<COORD_508>": 152186,
|
| 523 |
+
"<COORD_509>": 152187,
|
| 524 |
+
"<COORD_510>": 152188,
|
| 525 |
+
"<COORD_511>": 152189,
|
| 526 |
+
"<COORD_512>": 152190,
|
| 527 |
+
"<COORD_513>": 152191,
|
| 528 |
+
"<COORD_514>": 152192,
|
| 529 |
+
"<COORD_515>": 152193,
|
| 530 |
+
"<COORD_516>": 152194,
|
| 531 |
+
"<COORD_517>": 152195,
|
| 532 |
+
"<COORD_518>": 152196,
|
| 533 |
+
"<COORD_519>": 152197,
|
| 534 |
+
"<COORD_520>": 152198,
|
| 535 |
+
"<COORD_521>": 152199,
|
| 536 |
+
"<COORD_522>": 152200,
|
| 537 |
+
"<COORD_523>": 152201,
|
| 538 |
+
"<COORD_524>": 152202,
|
| 539 |
+
"<COORD_525>": 152203,
|
| 540 |
+
"<COORD_526>": 152204,
|
| 541 |
+
"<COORD_527>": 152205,
|
| 542 |
+
"<COORD_528>": 152206,
|
| 543 |
+
"<COORD_529>": 152207,
|
| 544 |
+
"<COORD_530>": 152208,
|
| 545 |
+
"<COORD_531>": 152209,
|
| 546 |
+
"<COORD_532>": 152210,
|
| 547 |
+
"<COORD_533>": 152211,
|
| 548 |
+
"<COORD_534>": 152212,
|
| 549 |
+
"<COORD_535>": 152213,
|
| 550 |
+
"<COORD_536>": 152214,
|
| 551 |
+
"<COORD_537>": 152215,
|
| 552 |
+
"<COORD_538>": 152216,
|
| 553 |
+
"<COORD_539>": 152217,
|
| 554 |
+
"<COORD_540>": 152218,
|
| 555 |
+
"<COORD_541>": 152219,
|
| 556 |
+
"<COORD_542>": 152220,
|
| 557 |
+
"<COORD_543>": 152221,
|
| 558 |
+
"<COORD_544>": 152222,
|
| 559 |
+
"<COORD_545>": 152223,
|
| 560 |
+
"<COORD_546>": 152224,
|
| 561 |
+
"<COORD_547>": 152225,
|
| 562 |
+
"<COORD_548>": 152226,
|
| 563 |
+
"<COORD_549>": 152227,
|
| 564 |
+
"<COORD_550>": 152228,
|
| 565 |
+
"<COORD_551>": 152229,
|
| 566 |
+
"<COORD_552>": 152230,
|
| 567 |
+
"<COORD_553>": 152231,
|
| 568 |
+
"<COORD_554>": 152232,
|
| 569 |
+
"<COORD_555>": 152233,
|
| 570 |
+
"<COORD_556>": 152234,
|
| 571 |
+
"<COORD_557>": 152235,
|
| 572 |
+
"<COORD_558>": 152236,
|
| 573 |
+
"<COORD_559>": 152237,
|
| 574 |
+
"<COORD_560>": 152238,
|
| 575 |
+
"<COORD_561>": 152239,
|
| 576 |
+
"<COORD_562>": 152240,
|
| 577 |
+
"<COORD_563>": 152241,
|
| 578 |
+
"<COORD_564>": 152242,
|
| 579 |
+
"<COORD_565>": 152243,
|
| 580 |
+
"<COORD_566>": 152244,
|
| 581 |
+
"<COORD_567>": 152245,
|
| 582 |
+
"<COORD_568>": 152246,
|
| 583 |
+
"<COORD_569>": 152247,
|
| 584 |
+
"<COORD_570>": 152248,
|
| 585 |
+
"<COORD_571>": 152249,
|
| 586 |
+
"<COORD_572>": 152250,
|
| 587 |
+
"<COORD_573>": 152251,
|
| 588 |
+
"<COORD_574>": 152252,
|
| 589 |
+
"<COORD_575>": 152253,
|
| 590 |
+
"<COORD_576>": 152254,
|
| 591 |
+
"<COORD_577>": 152255,
|
| 592 |
+
"<COORD_578>": 152256,
|
| 593 |
+
"<COORD_579>": 152257,
|
| 594 |
+
"<COORD_580>": 152258,
|
| 595 |
+
"<COORD_581>": 152259,
|
| 596 |
+
"<COORD_582>": 152260,
|
| 597 |
+
"<COORD_583>": 152261,
|
| 598 |
+
"<COORD_584>": 152262,
|
| 599 |
+
"<COORD_585>": 152263,
|
| 600 |
+
"<COORD_586>": 152264,
|
| 601 |
+
"<COORD_587>": 152265,
|
| 602 |
+
"<COORD_588>": 152266,
|
| 603 |
+
"<COORD_589>": 152267,
|
| 604 |
+
"<COORD_590>": 152268,
|
| 605 |
+
"<COORD_591>": 152269,
|
| 606 |
+
"<COORD_592>": 152270,
|
| 607 |
+
"<COORD_593>": 152271,
|
| 608 |
+
"<COORD_594>": 152272,
|
| 609 |
+
"<COORD_595>": 152273,
|
| 610 |
+
"<COORD_596>": 152274,
|
| 611 |
+
"<COORD_597>": 152275,
|
| 612 |
+
"<COORD_598>": 152276,
|
| 613 |
+
"<COORD_599>": 152277,
|
| 614 |
+
"<COORD_600>": 152278,
|
| 615 |
+
"<COORD_601>": 152279,
|
| 616 |
+
"<COORD_602>": 152280,
|
| 617 |
+
"<COORD_603>": 152281,
|
| 618 |
+
"<COORD_604>": 152282,
|
| 619 |
+
"<COORD_605>": 152283,
|
| 620 |
+
"<COORD_606>": 152284,
|
| 621 |
+
"<COORD_607>": 152285,
|
| 622 |
+
"<COORD_608>": 152286,
|
| 623 |
+
"<COORD_609>": 152287,
|
| 624 |
+
"<COORD_610>": 152288,
|
| 625 |
+
"<COORD_611>": 152289,
|
| 626 |
+
"<COORD_612>": 152290,
|
| 627 |
+
"<COORD_613>": 152291,
|
| 628 |
+
"<COORD_614>": 152292,
|
| 629 |
+
"<COORD_615>": 152293,
|
| 630 |
+
"<COORD_616>": 152294,
|
| 631 |
+
"<COORD_617>": 152295,
|
| 632 |
+
"<COORD_618>": 152296,
|
| 633 |
+
"<COORD_619>": 152297,
|
| 634 |
+
"<COORD_620>": 152298,
|
| 635 |
+
"<COORD_621>": 152299,
|
| 636 |
+
"<COORD_622>": 152300,
|
| 637 |
+
"<COORD_623>": 152301,
|
| 638 |
+
"<COORD_624>": 152302,
|
| 639 |
+
"<COORD_625>": 152303,
|
| 640 |
+
"<COORD_626>": 152304,
|
| 641 |
+
"<COORD_627>": 152305,
|
| 642 |
+
"<COORD_628>": 152306,
|
| 643 |
+
"<COORD_629>": 152307,
|
| 644 |
+
"<COORD_630>": 152308,
|
| 645 |
+
"<COORD_631>": 152309,
|
| 646 |
+
"<COORD_632>": 152310,
|
| 647 |
+
"<COORD_633>": 152311,
|
| 648 |
+
"<COORD_634>": 152312,
|
| 649 |
+
"<COORD_635>": 152313,
|
| 650 |
+
"<COORD_636>": 152314,
|
| 651 |
+
"<COORD_637>": 152315,
|
| 652 |
+
"<COORD_638>": 152316,
|
| 653 |
+
"<COORD_639>": 152317,
|
| 654 |
+
"<COORD_640>": 152318,
|
| 655 |
+
"<COORD_641>": 152319,
|
| 656 |
+
"<COORD_642>": 152320,
|
| 657 |
+
"<COORD_643>": 152321,
|
| 658 |
+
"<COORD_644>": 152322,
|
| 659 |
+
"<COORD_645>": 152323,
|
| 660 |
+
"<COORD_646>": 152324,
|
| 661 |
+
"<COORD_647>": 152325,
|
| 662 |
+
"<COORD_648>": 152326,
|
| 663 |
+
"<COORD_649>": 152327,
|
| 664 |
+
"<COORD_650>": 152328,
|
| 665 |
+
"<COORD_651>": 152329,
|
| 666 |
+
"<COORD_652>": 152330,
|
| 667 |
+
"<COORD_653>": 152331,
|
| 668 |
+
"<COORD_654>": 152332,
|
| 669 |
+
"<COORD_655>": 152333,
|
| 670 |
+
"<COORD_656>": 152334,
|
| 671 |
+
"<COORD_657>": 152335,
|
| 672 |
+
"<COORD_658>": 152336,
|
| 673 |
+
"<COORD_659>": 152337,
|
| 674 |
+
"<COORD_660>": 152338,
|
| 675 |
+
"<COORD_661>": 152339,
|
| 676 |
+
"<COORD_662>": 152340,
|
| 677 |
+
"<COORD_663>": 152341,
|
| 678 |
+
"<COORD_664>": 152342,
|
| 679 |
+
"<COORD_665>": 152343,
|
| 680 |
+
"<COORD_666>": 152344,
|
| 681 |
+
"<COORD_667>": 152345,
|
| 682 |
+
"<COORD_668>": 152346,
|
| 683 |
+
"<COORD_669>": 152347,
|
| 684 |
+
"<COORD_670>": 152348,
|
| 685 |
+
"<COORD_671>": 152349,
|
| 686 |
+
"<COORD_672>": 152350,
|
| 687 |
+
"<COORD_673>": 152351,
|
| 688 |
+
"<COORD_674>": 152352,
|
| 689 |
+
"<COORD_675>": 152353,
|
| 690 |
+
"<COORD_676>": 152354,
|
| 691 |
+
"<COORD_677>": 152355,
|
| 692 |
+
"<COORD_678>": 152356,
|
| 693 |
+
"<COORD_679>": 152357,
|
| 694 |
+
"<COORD_680>": 152358,
|
| 695 |
+
"<COORD_681>": 152359,
|
| 696 |
+
"<COORD_682>": 152360,
|
| 697 |
+
"<COORD_683>": 152361,
|
| 698 |
+
"<COORD_684>": 152362,
|
| 699 |
+
"<COORD_685>": 152363,
|
| 700 |
+
"<COORD_686>": 152364,
|
| 701 |
+
"<COORD_687>": 152365,
|
| 702 |
+
"<COORD_688>": 152366,
|
| 703 |
+
"<COORD_689>": 152367,
|
| 704 |
+
"<COORD_690>": 152368,
|
| 705 |
+
"<COORD_691>": 152369,
|
| 706 |
+
"<COORD_692>": 152370,
|
| 707 |
+
"<COORD_693>": 152371,
|
| 708 |
+
"<COORD_694>": 152372,
|
| 709 |
+
"<COORD_695>": 152373,
|
| 710 |
+
"<COORD_696>": 152374,
|
| 711 |
+
"<COORD_697>": 152375,
|
| 712 |
+
"<COORD_698>": 152376,
|
| 713 |
+
"<COORD_699>": 152377,
|
| 714 |
+
"<COORD_700>": 152378,
|
| 715 |
+
"<COORD_701>": 152379,
|
| 716 |
+
"<COORD_702>": 152380,
|
| 717 |
+
"<COORD_703>": 152381,
|
| 718 |
+
"<COORD_704>": 152382,
|
| 719 |
+
"<COORD_705>": 152383,
|
| 720 |
+
"<COORD_706>": 152384,
|
| 721 |
+
"<COORD_707>": 152385,
|
| 722 |
+
"<COORD_708>": 152386,
|
| 723 |
+
"<COORD_709>": 152387,
|
| 724 |
+
"<COORD_710>": 152388,
|
| 725 |
+
"<COORD_711>": 152389,
|
| 726 |
+
"<COORD_712>": 152390,
|
| 727 |
+
"<COORD_713>": 152391,
|
| 728 |
+
"<COORD_714>": 152392,
|
| 729 |
+
"<COORD_715>": 152393,
|
| 730 |
+
"<COORD_716>": 152394,
|
| 731 |
+
"<COORD_717>": 152395,
|
| 732 |
+
"<COORD_718>": 152396,
|
| 733 |
+
"<COORD_719>": 152397,
|
| 734 |
+
"<COORD_720>": 152398,
|
| 735 |
+
"<COORD_721>": 152399,
|
| 736 |
+
"<COORD_722>": 152400,
|
| 737 |
+
"<COORD_723>": 152401,
|
| 738 |
+
"<COORD_724>": 152402,
|
| 739 |
+
"<COORD_725>": 152403,
|
| 740 |
+
"<COORD_726>": 152404,
|
| 741 |
+
"<COORD_727>": 152405,
|
| 742 |
+
"<COORD_728>": 152406,
|
| 743 |
+
"<COORD_729>": 152407,
|
| 744 |
+
"<COORD_730>": 152408,
|
| 745 |
+
"<COORD_731>": 152409,
|
| 746 |
+
"<COORD_732>": 152410,
|
| 747 |
+
"<COORD_733>": 152411,
|
| 748 |
+
"<COORD_734>": 152412,
|
| 749 |
+
"<COORD_735>": 152413,
|
| 750 |
+
"<COORD_736>": 152414,
|
| 751 |
+
"<COORD_737>": 152415,
|
| 752 |
+
"<COORD_738>": 152416,
|
| 753 |
+
"<COORD_739>": 152417,
|
| 754 |
+
"<COORD_740>": 152418,
|
| 755 |
+
"<COORD_741>": 152419,
|
| 756 |
+
"<COORD_742>": 152420,
|
| 757 |
+
"<COORD_743>": 152421,
|
| 758 |
+
"<COORD_744>": 152422,
|
| 759 |
+
"<COORD_745>": 152423,
|
| 760 |
+
"<COORD_746>": 152424,
|
| 761 |
+
"<COORD_747>": 152425,
|
| 762 |
+
"<COORD_748>": 152426,
|
| 763 |
+
"<COORD_749>": 152427,
|
| 764 |
+
"<COORD_750>": 152428,
|
| 765 |
+
"<COORD_751>": 152429,
|
| 766 |
+
"<COORD_752>": 152430,
|
| 767 |
+
"<COORD_753>": 152431,
|
| 768 |
+
"<COORD_754>": 152432,
|
| 769 |
+
"<COORD_755>": 152433,
|
| 770 |
+
"<COORD_756>": 152434,
|
| 771 |
+
"<COORD_757>": 152435,
|
| 772 |
+
"<COORD_758>": 152436,
|
| 773 |
+
"<COORD_759>": 152437,
|
| 774 |
+
"<COORD_760>": 152438,
|
| 775 |
+
"<COORD_761>": 152439,
|
| 776 |
+
"<COORD_762>": 152440,
|
| 777 |
+
"<COORD_763>": 152441,
|
| 778 |
+
"<COORD_764>": 152442,
|
| 779 |
+
"<COORD_765>": 152443,
|
| 780 |
+
"<COORD_766>": 152444,
|
| 781 |
+
"<COORD_767>": 152445,
|
| 782 |
+
"<COORD_768>": 152446,
|
| 783 |
+
"<COORD_769>": 152447,
|
| 784 |
+
"<COORD_770>": 152448,
|
| 785 |
+
"<COORD_771>": 152449,
|
| 786 |
+
"<COORD_772>": 152450,
|
| 787 |
+
"<COORD_773>": 152451,
|
| 788 |
+
"<COORD_774>": 152452,
|
| 789 |
+
"<COORD_775>": 152453,
|
| 790 |
+
"<COORD_776>": 152454,
|
| 791 |
+
"<COORD_777>": 152455,
|
| 792 |
+
"<COORD_778>": 152456,
|
| 793 |
+
"<COORD_779>": 152457,
|
| 794 |
+
"<COORD_780>": 152458,
|
| 795 |
+
"<COORD_781>": 152459,
|
| 796 |
+
"<COORD_782>": 152460,
|
| 797 |
+
"<COORD_783>": 152461,
|
| 798 |
+
"<COORD_784>": 152462,
|
| 799 |
+
"<COORD_785>": 152463,
|
| 800 |
+
"<COORD_786>": 152464,
|
| 801 |
+
"<COORD_787>": 152465,
|
| 802 |
+
"<COORD_788>": 152466,
|
| 803 |
+
"<COORD_789>": 152467,
|
| 804 |
+
"<COORD_790>": 152468,
|
| 805 |
+
"<COORD_791>": 152469,
|
| 806 |
+
"<COORD_792>": 152470,
|
| 807 |
+
"<COORD_793>": 152471,
|
| 808 |
+
"<COORD_794>": 152472,
|
| 809 |
+
"<COORD_795>": 152473,
|
| 810 |
+
"<COORD_796>": 152474,
|
| 811 |
+
"<COORD_797>": 152475,
|
| 812 |
+
"<COORD_798>": 152476,
|
| 813 |
+
"<COORD_799>": 152477,
|
| 814 |
+
"<COORD_800>": 152478,
|
| 815 |
+
"<COORD_801>": 152479,
|
| 816 |
+
"<COORD_802>": 152480,
|
| 817 |
+
"<COORD_803>": 152481,
|
| 818 |
+
"<COORD_804>": 152482,
|
| 819 |
+
"<COORD_805>": 152483,
|
| 820 |
+
"<COORD_806>": 152484,
|
| 821 |
+
"<COORD_807>": 152485,
|
| 822 |
+
"<COORD_808>": 152486,
|
| 823 |
+
"<COORD_809>": 152487,
|
| 824 |
+
"<COORD_810>": 152488,
|
| 825 |
+
"<COORD_811>": 152489,
|
| 826 |
+
"<COORD_812>": 152490,
|
| 827 |
+
"<COORD_813>": 152491,
|
| 828 |
+
"<COORD_814>": 152492,
|
| 829 |
+
"<COORD_815>": 152493,
|
| 830 |
+
"<COORD_816>": 152494,
|
| 831 |
+
"<COORD_817>": 152495,
|
| 832 |
+
"<COORD_818>": 152496,
|
| 833 |
+
"<COORD_819>": 152497,
|
| 834 |
+
"<COORD_820>": 152498,
|
| 835 |
+
"<COORD_821>": 152499,
|
| 836 |
+
"<COORD_822>": 152500,
|
| 837 |
+
"<COORD_823>": 152501,
|
| 838 |
+
"<COORD_824>": 152502,
|
| 839 |
+
"<COORD_825>": 152503,
|
| 840 |
+
"<COORD_826>": 152504,
|
| 841 |
+
"<COORD_827>": 152505,
|
| 842 |
+
"<COORD_828>": 152506,
|
| 843 |
+
"<COORD_829>": 152507,
|
| 844 |
+
"<COORD_830>": 152508,
|
| 845 |
+
"<COORD_831>": 152509,
|
| 846 |
+
"<COORD_832>": 152510,
|
| 847 |
+
"<COORD_833>": 152511,
|
| 848 |
+
"<COORD_834>": 152512,
|
| 849 |
+
"<COORD_835>": 152513,
|
| 850 |
+
"<COORD_836>": 152514,
|
| 851 |
+
"<COORD_837>": 152515,
|
| 852 |
+
"<COORD_838>": 152516,
|
| 853 |
+
"<COORD_839>": 152517,
|
| 854 |
+
"<COORD_840>": 152518,
|
| 855 |
+
"<COORD_841>": 152519,
|
| 856 |
+
"<COORD_842>": 152520,
|
| 857 |
+
"<COORD_843>": 152521,
|
| 858 |
+
"<COORD_844>": 152522,
|
| 859 |
+
"<COORD_845>": 152523,
|
| 860 |
+
"<COORD_846>": 152524,
|
| 861 |
+
"<COORD_847>": 152525,
|
| 862 |
+
"<COORD_848>": 152526,
|
| 863 |
+
"<COORD_849>": 152527,
|
| 864 |
+
"<COORD_850>": 152528,
|
| 865 |
+
"<COORD_851>": 152529,
|
| 866 |
+
"<COORD_852>": 152530,
|
| 867 |
+
"<COORD_853>": 152531,
|
| 868 |
+
"<COORD_854>": 152532,
|
| 869 |
+
"<COORD_855>": 152533,
|
| 870 |
+
"<COORD_856>": 152534,
|
| 871 |
+
"<COORD_857>": 152535,
|
| 872 |
+
"<COORD_858>": 152536,
|
| 873 |
+
"<COORD_859>": 152537,
|
| 874 |
+
"<COORD_860>": 152538,
|
| 875 |
+
"<COORD_861>": 152539,
|
| 876 |
+
"<COORD_862>": 152540,
|
| 877 |
+
"<COORD_863>": 152541,
|
| 878 |
+
"<COORD_864>": 152542,
|
| 879 |
+
"<COORD_865>": 152543,
|
| 880 |
+
"<COORD_866>": 152544,
|
| 881 |
+
"<COORD_867>": 152545,
|
| 882 |
+
"<COORD_868>": 152546,
|
| 883 |
+
"<COORD_869>": 152547,
|
| 884 |
+
"<COORD_870>": 152548,
|
| 885 |
+
"<COORD_871>": 152549,
|
| 886 |
+
"<COORD_872>": 152550,
|
| 887 |
+
"<COORD_873>": 152551,
|
| 888 |
+
"<COORD_874>": 152552,
|
| 889 |
+
"<COORD_875>": 152553,
|
| 890 |
+
"<COORD_876>": 152554,
|
| 891 |
+
"<COORD_877>": 152555,
|
| 892 |
+
"<COORD_878>": 152556,
|
| 893 |
+
"<COORD_879>": 152557,
|
| 894 |
+
"<COORD_880>": 152558,
|
| 895 |
+
"<COORD_881>": 152559,
|
| 896 |
+
"<COORD_882>": 152560,
|
| 897 |
+
"<COORD_883>": 152561,
|
| 898 |
+
"<COORD_884>": 152562,
|
| 899 |
+
"<COORD_885>": 152563,
|
| 900 |
+
"<COORD_886>": 152564,
|
| 901 |
+
"<COORD_887>": 152565,
|
| 902 |
+
"<COORD_888>": 152566,
|
| 903 |
+
"<COORD_889>": 152567,
|
| 904 |
+
"<COORD_890>": 152568,
|
| 905 |
+
"<COORD_891>": 152569,
|
| 906 |
+
"<COORD_892>": 152570,
|
| 907 |
+
"<COORD_893>": 152571,
|
| 908 |
+
"<COORD_894>": 152572,
|
| 909 |
+
"<COORD_895>": 152573,
|
| 910 |
+
"<COORD_896>": 152574,
|
| 911 |
+
"<COORD_897>": 152575,
|
| 912 |
+
"<COORD_898>": 152576,
|
| 913 |
+
"<COORD_899>": 152577,
|
| 914 |
+
"<COORD_900>": 152578,
|
| 915 |
+
"<COORD_901>": 152579,
|
| 916 |
+
"<COORD_902>": 152580,
|
| 917 |
+
"<COORD_903>": 152581,
|
| 918 |
+
"<COORD_904>": 152582,
|
| 919 |
+
"<COORD_905>": 152583,
|
| 920 |
+
"<COORD_906>": 152584,
|
| 921 |
+
"<COORD_907>": 152585,
|
| 922 |
+
"<COORD_908>": 152586,
|
| 923 |
+
"<COORD_909>": 152587,
|
| 924 |
+
"<COORD_910>": 152588,
|
| 925 |
+
"<COORD_911>": 152589,
|
| 926 |
+
"<COORD_912>": 152590,
|
| 927 |
+
"<COORD_913>": 152591,
|
| 928 |
+
"<COORD_914>": 152592,
|
| 929 |
+
"<COORD_915>": 152593,
|
| 930 |
+
"<COORD_916>": 152594,
|
| 931 |
+
"<COORD_917>": 152595,
|
| 932 |
+
"<COORD_918>": 152596,
|
| 933 |
+
"<COORD_919>": 152597,
|
| 934 |
+
"<COORD_920>": 152598,
|
| 935 |
+
"<COORD_921>": 152599,
|
| 936 |
+
"<COORD_922>": 152600,
|
| 937 |
+
"<COORD_923>": 152601,
|
| 938 |
+
"<COORD_924>": 152602,
|
| 939 |
+
"<COORD_925>": 152603,
|
| 940 |
+
"<COORD_926>": 152604,
|
| 941 |
+
"<COORD_927>": 152605,
|
| 942 |
+
"<COORD_928>": 152606,
|
| 943 |
+
"<COORD_929>": 152607,
|
| 944 |
+
"<COORD_930>": 152608,
|
| 945 |
+
"<COORD_931>": 152609,
|
| 946 |
+
"<COORD_932>": 152610,
|
| 947 |
+
"<COORD_933>": 152611,
|
| 948 |
+
"<COORD_934>": 152612,
|
| 949 |
+
"<COORD_935>": 152613,
|
| 950 |
+
"<COORD_936>": 152614,
|
| 951 |
+
"<COORD_937>": 152615,
|
| 952 |
+
"<COORD_938>": 152616,
|
| 953 |
+
"<COORD_939>": 152617,
|
| 954 |
+
"<COORD_940>": 152618,
|
| 955 |
+
"<COORD_941>": 152619,
|
| 956 |
+
"<COORD_942>": 152620,
|
| 957 |
+
"<COORD_943>": 152621,
|
| 958 |
+
"<COORD_944>": 152622,
|
| 959 |
+
"<COORD_945>": 152623,
|
| 960 |
+
"<COORD_946>": 152624,
|
| 961 |
+
"<COORD_947>": 152625,
|
| 962 |
+
"<COORD_948>": 152626,
|
| 963 |
+
"<COORD_949>": 152627,
|
| 964 |
+
"<COORD_950>": 152628,
|
| 965 |
+
"<COORD_951>": 152629,
|
| 966 |
+
"<COORD_952>": 152630,
|
| 967 |
+
"<COORD_953>": 152631,
|
| 968 |
+
"<COORD_954>": 152632,
|
| 969 |
+
"<COORD_955>": 152633,
|
| 970 |
+
"<COORD_956>": 152634,
|
| 971 |
+
"<COORD_957>": 152635,
|
| 972 |
+
"<COORD_958>": 152636,
|
| 973 |
+
"<COORD_959>": 152637,
|
| 974 |
+
"<COORD_960>": 152638,
|
| 975 |
+
"<COORD_961>": 152639,
|
| 976 |
+
"<COORD_962>": 152640,
|
| 977 |
+
"<COORD_963>": 152641,
|
| 978 |
+
"<COORD_964>": 152642,
|
| 979 |
+
"<COORD_965>": 152643,
|
| 980 |
+
"<COORD_966>": 152644,
|
| 981 |
+
"<COORD_967>": 152645,
|
| 982 |
+
"<COORD_968>": 152646,
|
| 983 |
+
"<COORD_969>": 152647,
|
| 984 |
+
"<COORD_970>": 152648,
|
| 985 |
+
"<COORD_971>": 152649,
|
| 986 |
+
"<COORD_972>": 152650,
|
| 987 |
+
"<COORD_973>": 152651,
|
| 988 |
+
"<COORD_974>": 152652,
|
| 989 |
+
"<COORD_975>": 152653,
|
| 990 |
+
"<COORD_976>": 152654,
|
| 991 |
+
"<COORD_977>": 152655,
|
| 992 |
+
"<COORD_978>": 152656,
|
| 993 |
+
"<COORD_979>": 152657,
|
| 994 |
+
"<COORD_980>": 152658,
|
| 995 |
+
"<COORD_981>": 152659,
|
| 996 |
+
"<COORD_982>": 152660,
|
| 997 |
+
"<COORD_983>": 152661,
|
| 998 |
+
"<COORD_984>": 152662,
|
| 999 |
+
"<COORD_985>": 152663,
|
| 1000 |
+
"<COORD_986>": 152664,
|
| 1001 |
+
"<COORD_987>": 152665,
|
| 1002 |
+
"<COORD_988>": 152666,
|
| 1003 |
+
"<COORD_989>": 152667,
|
| 1004 |
+
"<COORD_990>": 152668,
|
| 1005 |
+
"<COORD_991>": 152669,
|
| 1006 |
+
"<COORD_992>": 152670,
|
| 1007 |
+
"<COORD_993>": 152671,
|
| 1008 |
+
"<COORD_994>": 152672,
|
| 1009 |
+
"<COORD_995>": 152673,
|
| 1010 |
+
"<COORD_996>": 152674,
|
| 1011 |
+
"<COORD_997>": 152675,
|
| 1012 |
+
"<COORD_998>": 152676,
|
| 1013 |
+
"<COORD_999>": 152677,
|
| 1014 |
+
"<IMG_CONTEXT>": 151671,
|
| 1015 |
+
"<box>": 151676,
|
| 1016 |
+
"<brief>": 152678,
|
| 1017 |
+
"<img>": 151669,
|
| 1018 |
+
"<label>": 152680,
|
| 1019 |
+
"<layout>": 152686,
|
| 1020 |
+
"<quad>": 151672,
|
| 1021 |
+
"<ref>": 151674,
|
| 1022 |
+
"<text>": 152682,
|
| 1023 |
+
"<text_list>": 152684,
|
| 1024 |
+
"<think>": 151667,
|
| 1025 |
+
"<tool_call>": 151657,
|
| 1026 |
+
"<tool_response>": 151665,
|
| 1027 |
+
"<|box_end|>": 151649,
|
| 1028 |
+
"<|box_start|>": 151648,
|
| 1029 |
+
"<|endoftext|>": 151643,
|
| 1030 |
+
"<|file_sep|>": 151664,
|
| 1031 |
+
"<|fim_middle|>": 151660,
|
| 1032 |
+
"<|fim_pad|>": 151662,
|
| 1033 |
+
"<|fim_prefix|>": 151659,
|
| 1034 |
+
"<|fim_suffix|>": 151661,
|
| 1035 |
+
"<|im_end|>": 151645,
|
| 1036 |
+
"<|im_start|>": 151644,
|
| 1037 |
+
"<|image_pad|>": 151655,
|
| 1038 |
+
"<|object_ref_end|>": 151647,
|
| 1039 |
+
"<|object_ref_start|>": 151646,
|
| 1040 |
+
"<|quad_end|>": 151651,
|
| 1041 |
+
"<|quad_start|>": 151650,
|
| 1042 |
+
"<|repo_name|>": 151663,
|
| 1043 |
+
"<|video_pad|>": 151656,
|
| 1044 |
+
"<|vision_end|>": 151653,
|
| 1045 |
+
"<|vision_pad|>": 151654,
|
| 1046 |
+
"<|vision_start|>": 151652
|
| 1047 |
+
}
|
blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345
|
| 3 |
+
size 11614159
|
blobs/e2a59915dd6a1c51ccb11be3addf4585fcf0840ac4f63f8e9fb629db58f8db6e
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2a59915dd6a1c51ccb11be3addf4585fcf0840ac4f63f8e9fb629db58f8db6e
|
| 3 |
+
size 4979120456
|
blobs/e543bcb3b7550029f450d28cf138706d7f9a5ef5
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_seq_length": 256,
|
| 3 |
+
"processor_class": "InternVLProcessor"
|
| 4 |
+
}
|
blobs/f16150710559ce7304ebcfca7232f17c7791f6c5
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"InternVLChatModel"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
|
| 7 |
+
"AutoModel": "modeling_internvl_chat.InternVLChatModel",
|
| 8 |
+
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
|
| 9 |
+
},
|
| 10 |
+
"downsample_ratio": 0.5,
|
| 11 |
+
"dynamic_image_size": true,
|
| 12 |
+
"eos_token_id": 151645,
|
| 13 |
+
"force_image_size": 448,
|
| 14 |
+
"llm_config": {
|
| 15 |
+
"architectures": [
|
| 16 |
+
"Qwen3ForCausalLM"
|
| 17 |
+
],
|
| 18 |
+
"attention_bias": false,
|
| 19 |
+
"attention_dropout": 0.0,
|
| 20 |
+
"bos_token_id": 151643,
|
| 21 |
+
"debug": false,
|
| 22 |
+
"eos_token_id": 151645,
|
| 23 |
+
"ep_size": 1,
|
| 24 |
+
"head_dim": 128,
|
| 25 |
+
"hidden_act": "silu",
|
| 26 |
+
"hidden_size": 2560,
|
| 27 |
+
"initializer_range": 0.02,
|
| 28 |
+
"intermediate_size": 9728,
|
| 29 |
+
"max_position_embeddings": 32768,
|
| 30 |
+
"max_window_layers": 36,
|
| 31 |
+
"micro_forward": false,
|
| 32 |
+
"model_type": "qwen3",
|
| 33 |
+
"num_attention_heads": 32,
|
| 34 |
+
"num_hidden_layers": 36,
|
| 35 |
+
"num_key_value_heads": 8,
|
| 36 |
+
"rms_norm_eps": 1e-06,
|
| 37 |
+
"rope_scaling": null,
|
| 38 |
+
"rope_theta": 5000000,
|
| 39 |
+
"skip_checkpoint": false,
|
| 40 |
+
"sliding_window": null,
|
| 41 |
+
"torch_dtype": "bfloat16",
|
| 42 |
+
"use_cache": false,
|
| 43 |
+
"use_deepep": false,
|
| 44 |
+
"use_sliding_window": false,
|
| 45 |
+
"vocab_size": 153678
|
| 46 |
+
},
|
| 47 |
+
"max_dynamic_patch": 12,
|
| 48 |
+
"min_dynamic_patch": 1,
|
| 49 |
+
"model_type": "internvl_chat",
|
| 50 |
+
"pad2square": false,
|
| 51 |
+
"pad_token_id": 151643,
|
| 52 |
+
"ps_version": "v2",
|
| 53 |
+
"select_layer": -1,
|
| 54 |
+
"template": "qianfanvl",
|
| 55 |
+
"tie_word_embeddings": false,
|
| 56 |
+
"torch_dtype": "bfloat16",
|
| 57 |
+
"transformers_version": null,
|
| 58 |
+
"use_backbone_lora": 0,
|
| 59 |
+
"use_llm_lora": 0,
|
| 60 |
+
"use_thumbnail": true,
|
| 61 |
+
"vision_config": {
|
| 62 |
+
"architectures": [
|
| 63 |
+
"InternVisionModel"
|
| 64 |
+
],
|
| 65 |
+
"attention_dropout": 0.0,
|
| 66 |
+
"auto_map": {
|
| 67 |
+
"AutoConfig": "configuration_intern_vit.InternVisionConfig",
|
| 68 |
+
"AutoModel": "modeling_intern_vit.InternVisionModel"
|
| 69 |
+
},
|
| 70 |
+
"drop_path_rate": 0.1,
|
| 71 |
+
"dropout": 0.0,
|
| 72 |
+
"hidden_act": "gelu",
|
| 73 |
+
"hidden_size": 1024,
|
| 74 |
+
"image_size": 448,
|
| 75 |
+
"initializer_factor": 1.0,
|
| 76 |
+
"initializer_range": 0.02,
|
| 77 |
+
"intermediate_size": 4096,
|
| 78 |
+
"layer_norm_eps": 1e-06,
|
| 79 |
+
"model_type": "intern_vit_6b",
|
| 80 |
+
"norm_type": "layer_norm",
|
| 81 |
+
"num_attention_heads": 16,
|
| 82 |
+
"num_channels": 3,
|
| 83 |
+
"num_hidden_layers": 24,
|
| 84 |
+
"patch_size": 14,
|
| 85 |
+
"qk_normalization": false,
|
| 86 |
+
"qkv_bias": true,
|
| 87 |
+
"torch_dtype": "bfloat16",
|
| 88 |
+
"use_fa3": false,
|
| 89 |
+
"use_flash_attn": true
|
| 90 |
+
}
|
| 91 |
+
}
|
refs/main
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
5e181e708edb4089b7b4cab94279c46c6a2604fc
|