model code
Browse files- config.json +4 -0
- configuration_geov.py +108 -0
- modeling_geov.py +666 -0
- tokenization_geov.py +177 -0
- tokenizer_config.json +6 -0
config.json
CHANGED
|
@@ -2,6 +2,10 @@
|
|
| 2 |
"architectures": [
|
| 3 |
"GeoVForCausalLM"
|
| 4 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
"bos_token_id": 0,
|
| 6 |
"eos_token_id": 2,
|
| 7 |
"hidden_size": 5120,
|
|
|
|
| 2 |
"architectures": [
|
| 3 |
"GeoVForCausalLM"
|
| 4 |
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_geov.GeoVConfig",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_geov.GeoVForCausalLM"
|
| 8 |
+
},
|
| 9 |
"bos_token_id": 0,
|
| 10 |
"eos_token_id": 2,
|
| 11 |
"hidden_size": 5120,
|
configuration_geov.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Better Planet Investments and labml.ai team. ALl rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" GeoV model configuration"""
|
| 16 |
+
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.get_logger(__name__)
|
| 22 |
+
|
| 23 |
+
GEOV_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 24 |
+
"GeoV/GeoV-9b": "https://huggingface.co/GeoV/GeoV-9b/resolve/main/config.json",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class GeoVConfig(PretrainedConfig):
|
| 29 |
+
r"""
|
| 30 |
+
This is the configuration class to store the configuration of a [`GeoVModel`]. It is used to instantiate a
|
| 31 |
+
GeoV model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 32 |
+
with the defaults will yield a similar configuration to that of the GeoV
|
| 33 |
+
[GeoV/GeoV-9b](https://huggingface.co/GeoV/GeoV-9b) architecture.
|
| 34 |
+
|
| 35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 36 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
vocab_size (`int`, *optional*, defaults to 65536):
|
| 41 |
+
Vocabulary size of the GeoV model. Defines the number of different tokens that can be represented by the
|
| 42 |
+
`inputs_ids` passed when calling [`GeoVModel`].
|
| 43 |
+
hidden_size (`int`, *optional*, defaults to 5120):
|
| 44 |
+
Dimension of the encoder layers and the pooler layer.
|
| 45 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 46 |
+
Number of hidden layers in the Transformer encoder.
|
| 47 |
+
num_attention_heads (`int`, *optional*, defaults to 40):
|
| 48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 49 |
+
intermediate_size (`int`, *optional*, defaults to 20480):
|
| 50 |
+
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 51 |
+
rotary_emb_base (`int`, *optional*, defaults to 10000)
|
| 52 |
+
base for computing rotary embeddings frequency
|
| 53 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
| 54 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 55 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 56 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-4):
|
| 57 |
+
The epsilon used by the layer normalization layers.
|
| 58 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 59 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 60 |
+
relevant if `config.is_decoder=True`.
|
| 61 |
+
use_extra_biases_ffn (`bool`, *optional*, defaults to `False`):
|
| 62 |
+
Whether or not to have extra bias parameters in the final layer of FFN modules.
|
| 63 |
+
Example:
|
| 64 |
+
|
| 65 |
+
```python
|
| 66 |
+
>>> from transformers import GeoVConfig, GeoVModel
|
| 67 |
+
|
| 68 |
+
>>> # Initializing a GeoV configuration
|
| 69 |
+
>>> configuration = GeoVConfig()
|
| 70 |
+
|
| 71 |
+
>>> # Initializing a model (with random weights) from the configuration
|
| 72 |
+
>>> model = GeoVModel(configuration) # doctest: +SKIP
|
| 73 |
+
|
| 74 |
+
>>> # Accessing the model configuration
|
| 75 |
+
>>> configuration = model.config # doctest: +SKIP
|
| 76 |
+
```"""
|
| 77 |
+
model_type = "geov"
|
| 78 |
+
|
| 79 |
+
def __init__(
|
| 80 |
+
self,
|
| 81 |
+
vocab_size=65_536,
|
| 82 |
+
hidden_size=5_120,
|
| 83 |
+
num_hidden_layers=32,
|
| 84 |
+
num_attention_heads=40,
|
| 85 |
+
intermediate_size=1024 * 5 * 4,
|
| 86 |
+
layer_norm_eps=1e-4,
|
| 87 |
+
rotary_emb_base=10000,
|
| 88 |
+
max_position_embeddings=2048,
|
| 89 |
+
use_extra_biases_ffn=False,
|
| 90 |
+
use_cache=True,
|
| 91 |
+
bos_token_id=0,
|
| 92 |
+
eos_token_id=2,
|
| 93 |
+
tie_word_embeddings=False,
|
| 94 |
+
**kwargs,
|
| 95 |
+
):
|
| 96 |
+
super().__init__(
|
| 97 |
+
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
|
| 98 |
+
)
|
| 99 |
+
self.vocab_size = vocab_size
|
| 100 |
+
self.max_position_embeddings = max_position_embeddings
|
| 101 |
+
self.hidden_size = hidden_size
|
| 102 |
+
self.num_hidden_layers = num_hidden_layers
|
| 103 |
+
self.num_attention_heads = num_attention_heads
|
| 104 |
+
self.intermediate_size = intermediate_size
|
| 105 |
+
self.rotary_emb_base = rotary_emb_base
|
| 106 |
+
self.use_cache = use_cache
|
| 107 |
+
self.layer_norm_eps = layer_norm_eps
|
| 108 |
+
self.use_extra_biases_ffn = use_extra_biases_ffn
|
modeling_geov.py
ADDED
|
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
# Modifications Copyright 2023 Better Planet Investments and labml.ai team. ALl rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
""" PyTorch GeoV model."""
|
| 17 |
+
import math
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint
|
| 22 |
+
from torch import nn
|
| 23 |
+
from torch.nn import CrossEntropyLoss
|
| 24 |
+
|
| 25 |
+
from transformers.file_utils import (
|
| 26 |
+
add_code_sample_docstrings,
|
| 27 |
+
add_start_docstrings,
|
| 28 |
+
add_start_docstrings_to_model_forward,
|
| 29 |
+
replace_return_docstrings,
|
| 30 |
+
)
|
| 31 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 32 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 33 |
+
from transformers.utils import logging
|
| 34 |
+
from .configuration_geov import GeoVConfig
|
| 35 |
+
|
| 36 |
+
logger = logging.get_logger(__name__)
|
| 37 |
+
|
| 38 |
+
_CHECKPOINT_FOR_DOC = "GeoV/GeoV-9b"
|
| 39 |
+
_REAL_CHECKPOINT_FOR_DOC = "GeoV/GeoV-9b"
|
| 40 |
+
_CONFIG_FOR_DOC = "GeoVConfig"
|
| 41 |
+
|
| 42 |
+
GEOV_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 43 |
+
"GeoV/GeoV-9b",
|
| 44 |
+
# See all GeoV models at https://huggingface.co/models?filter=geov
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class RotaryEmbedding(torch.nn.Module):
|
| 49 |
+
def __init__(self, dim, base=10000):
|
| 50 |
+
super().__init__()
|
| 51 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
|
| 52 |
+
self.register_buffer("inv_freq", inv_freq)
|
| 53 |
+
|
| 54 |
+
self.max_seq_len_cached = -1
|
| 55 |
+
|
| 56 |
+
def forward(self, x, seq_len=None):
|
| 57 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 58 |
+
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
|
| 59 |
+
if seq_len > self.max_seq_len_cached:
|
| 60 |
+
self.max_seq_len_cached = seq_len
|
| 61 |
+
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
|
| 62 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 63 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 64 |
+
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
|
| 65 |
+
self.cos_cached = emb.cos()[None, None, :, :].to(x.dtype)
|
| 66 |
+
self.sin_cached = emb.sin()[None, None, :, :].to(x.dtype)
|
| 67 |
+
return self.cos_cached.to(x.device), self.sin_cached.to(x.device)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def rotate_half(x):
|
| 71 |
+
"""Rotates half the hidden dims of the input."""
|
| 72 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 73 |
+
x2 = x[..., x.shape[-1] // 2:]
|
| 74 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def apply_rotary_pos_emb(q, cos, sin, position_ids):
|
| 78 |
+
"""Apply positional embeddings"""
|
| 79 |
+
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
|
| 80 |
+
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
|
| 81 |
+
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 82 |
+
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 83 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 84 |
+
return q_embed
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def apply_rotary_pos_emb_reverse(q, cos, sin, position_ids):
|
| 88 |
+
"""Apply positional embeddings in reverse"""
|
| 89 |
+
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
|
| 90 |
+
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
|
| 91 |
+
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 92 |
+
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 93 |
+
q_embed = (q * cos) - (rotate_half(q) * sin)
|
| 94 |
+
return q_embed
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class GeoVAttention(nn.Module):
|
| 98 |
+
"""
|
| 99 |
+
Attention module
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self, config):
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.num_attention_heads = config.num_attention_heads
|
| 105 |
+
self.hidden_size = config.hidden_size
|
| 106 |
+
self.head_size = self.hidden_size // self.num_attention_heads
|
| 107 |
+
max_positions = config.max_position_embeddings
|
| 108 |
+
self.register_buffer("causal_mask", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)))
|
| 109 |
+
self.rotary_emb = RotaryEmbedding(self.head_size, base=config.rotary_emb_base)
|
| 110 |
+
self.qkv = nn.Linear(config.hidden_size, 3 * config.hidden_size)
|
| 111 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
|
| 112 |
+
|
| 113 |
+
def forward(
|
| 114 |
+
self,
|
| 115 |
+
hidden_states: torch.FloatTensor,
|
| 116 |
+
attention_mask: torch.FloatTensor,
|
| 117 |
+
position_ids: torch.LongTensor,
|
| 118 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 119 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
| 120 |
+
use_cache: Optional[bool] = False,
|
| 121 |
+
output_attentions: Optional[bool] = False,
|
| 122 |
+
):
|
| 123 |
+
has_layer_past = layer_past is not None
|
| 124 |
+
|
| 125 |
+
# Compute QKV
|
| 126 |
+
# Attention heads [batch, seq_len, hidden_size]
|
| 127 |
+
# --> [batch, seq_len, (np * 3 * head_size)]
|
| 128 |
+
qkv = self.qkv(hidden_states)
|
| 129 |
+
query, key, value = torch.tensor_split(qkv, 3, dim=-1)
|
| 130 |
+
|
| 131 |
+
# 'b l (h q) -> b h l q'
|
| 132 |
+
query = self._split_heads(query, self.num_attention_heads)
|
| 133 |
+
key = self._split_heads(key, self.num_attention_heads)
|
| 134 |
+
value = self._split_heads(value, self.num_attention_heads)
|
| 135 |
+
|
| 136 |
+
# Compute token offset for rotary embeddings (when decoding)
|
| 137 |
+
seq_len = key.shape[-2]
|
| 138 |
+
offset = 0
|
| 139 |
+
if has_layer_past:
|
| 140 |
+
seq_len += layer_past[0].shape[-2]
|
| 141 |
+
|
| 142 |
+
cos, sin = self.rotary_emb(query, seq_len=seq_len)
|
| 143 |
+
query = apply_rotary_pos_emb(query, cos, sin, position_ids)
|
| 144 |
+
key = apply_rotary_pos_emb(key, cos, sin, position_ids)
|
| 145 |
+
value = apply_rotary_pos_emb(value, cos, sin, position_ids)
|
| 146 |
+
|
| 147 |
+
# Cache QKV values
|
| 148 |
+
if has_layer_past:
|
| 149 |
+
past_key = layer_past[0]
|
| 150 |
+
past_value = layer_past[1]
|
| 151 |
+
key = torch.cat((past_key, key), dim=-2)
|
| 152 |
+
value = torch.cat((past_value, value), dim=-2)
|
| 153 |
+
present = (key, value) if use_cache else None
|
| 154 |
+
|
| 155 |
+
# Compute attention
|
| 156 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
| 157 |
+
|
| 158 |
+
attn_output = apply_rotary_pos_emb_reverse(attn_output, cos, sin, position_ids)
|
| 159 |
+
|
| 160 |
+
# Reshape outputs
|
| 161 |
+
attn_output = self._merge_heads(attn_output)
|
| 162 |
+
attn_output = self.dense(attn_output)
|
| 163 |
+
|
| 164 |
+
outputs = (attn_output, present)
|
| 165 |
+
if output_attentions:
|
| 166 |
+
outputs += (attn_weights,)
|
| 167 |
+
|
| 168 |
+
return outputs
|
| 169 |
+
|
| 170 |
+
@classmethod
|
| 171 |
+
def _split_heads(cls, tensor, num_attention_heads):
|
| 172 |
+
"""
|
| 173 |
+
Splits hidden dim into num_attention_heads
|
| 174 |
+
"""
|
| 175 |
+
# tensor: [bs, seq_len, hidden_size]
|
| 176 |
+
new_shape = tensor.shape[:-1] + (num_attention_heads, tensor.shape[-1] // num_attention_heads)
|
| 177 |
+
# -> [bs, seq_len, num_attention_heads, attn_head_size]
|
| 178 |
+
tensor = tensor.view(new_shape)
|
| 179 |
+
# -> [bs, num_attention_heads, seq_len, attn_head_size]
|
| 180 |
+
tensor = tensor.permute(0, 2, 1, 3)
|
| 181 |
+
return tensor
|
| 182 |
+
|
| 183 |
+
@classmethod
|
| 184 |
+
def _merge_heads(cls, tensor):
|
| 185 |
+
"""
|
| 186 |
+
Merges heads
|
| 187 |
+
"""
|
| 188 |
+
# tensor [bs, num_attention_heads, seq_len, attn_head_size]
|
| 189 |
+
tensor = tensor.permute(0, 2, 1, 3).contiguous()
|
| 190 |
+
# -> [bs, seq_len, num_attention_heads, attn_head_size]
|
| 191 |
+
tensor = tensor.view(*tensor.shape[:2], tensor.shape[2] * tensor.shape[3])
|
| 192 |
+
# -> [bs, seq_len, hidden_size]
|
| 193 |
+
return tensor
|
| 194 |
+
|
| 195 |
+
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
|
| 196 |
+
# q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
|
| 197 |
+
# compute causal mask from causal mask buffer
|
| 198 |
+
batch_size, num_attention_heads, query_length, attn_head_size = query.shape
|
| 199 |
+
key_length = key.shape[-2]
|
| 200 |
+
|
| 201 |
+
causal_mask = self.causal_mask[None, None, key_length - query_length: key_length, :key_length]
|
| 202 |
+
|
| 203 |
+
attn_scores = torch.einsum("bhid,bhjd->bhij", query, key) / math.sqrt(attn_head_size)
|
| 204 |
+
|
| 205 |
+
attn_scores.masked_fill_(causal_mask == 0, torch.finfo(attn_scores.dtype).min)
|
| 206 |
+
|
| 207 |
+
if attention_mask is not None:
|
| 208 |
+
# Apply the attention mask
|
| 209 |
+
attn_scores = attn_scores + attention_mask
|
| 210 |
+
|
| 211 |
+
attn_weights = nn.functional.softmax(attn_scores, dim=-1)
|
| 212 |
+
|
| 213 |
+
# Mask heads if we want to
|
| 214 |
+
if head_mask is not None:
|
| 215 |
+
attn_weights = attn_weights * head_mask
|
| 216 |
+
|
| 217 |
+
attn_output = torch.matmul(attn_weights, value)
|
| 218 |
+
return attn_output, attn_weights
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class GeoVMLP(nn.Module):
|
| 222 |
+
"""Position wise Feed-forward network"""
|
| 223 |
+
|
| 224 |
+
def __init__(self, config: "GeoVConfig"):
|
| 225 |
+
super().__init__()
|
| 226 |
+
self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 227 |
+
self.dense_2h_to_h = nn.Linear(
|
| 228 |
+
config.intermediate_size // 2, config.hidden_size, bias=config.use_extra_biases_ffn
|
| 229 |
+
)
|
| 230 |
+
self.act = nn.GELU()
|
| 231 |
+
|
| 232 |
+
def forward(self, hidden_states):
|
| 233 |
+
hidden_states = self.dense_h_to_4h(hidden_states)
|
| 234 |
+
# Gated GELU
|
| 235 |
+
gate, pass_through = torch.tensor_split(hidden_states, 2, dim=-1)
|
| 236 |
+
gate = self.act(gate)
|
| 237 |
+
hidden_states = gate * pass_through
|
| 238 |
+
|
| 239 |
+
hidden_states = self.dense_2h_to_h(hidden_states)
|
| 240 |
+
return hidden_states
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class GeoVLayer(nn.Module):
|
| 244 |
+
"""GeoV transformer layer"""
|
| 245 |
+
|
| 246 |
+
def __init__(self, config: "GeoVConfig"):
|
| 247 |
+
super().__init__()
|
| 248 |
+
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 249 |
+
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 250 |
+
self.attention = GeoVAttention(config)
|
| 251 |
+
self.mlp = GeoVMLP(config)
|
| 252 |
+
|
| 253 |
+
def forward(
|
| 254 |
+
self,
|
| 255 |
+
hidden_states: Optional[torch.FloatTensor],
|
| 256 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 257 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 258 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 259 |
+
use_cache: Optional[bool] = False,
|
| 260 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
| 261 |
+
output_attentions: Optional[bool] = False,
|
| 262 |
+
):
|
| 263 |
+
attention_layer_outputs = self.attention(
|
| 264 |
+
self.input_layernorm(hidden_states),
|
| 265 |
+
attention_mask=attention_mask,
|
| 266 |
+
position_ids=position_ids,
|
| 267 |
+
layer_past=layer_past,
|
| 268 |
+
head_mask=head_mask,
|
| 269 |
+
use_cache=use_cache,
|
| 270 |
+
output_attentions=output_attentions,
|
| 271 |
+
)
|
| 272 |
+
attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)
|
| 273 |
+
outputs = attention_layer_outputs[1:]
|
| 274 |
+
|
| 275 |
+
attn_output = attn_output + hidden_states
|
| 276 |
+
mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
|
| 277 |
+
hidden_states = mlp_output + attn_output
|
| 278 |
+
|
| 279 |
+
if use_cache:
|
| 280 |
+
outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)
|
| 281 |
+
else:
|
| 282 |
+
outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)
|
| 283 |
+
|
| 284 |
+
return outputs
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class GeoVPreTrainedModel(PreTrainedModel):
|
| 288 |
+
"""
|
| 289 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 290 |
+
models.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
config_class = GeoVConfig
|
| 294 |
+
base_model_prefix = "geov"
|
| 295 |
+
supports_gradient_checkpointing = True
|
| 296 |
+
_no_split_modules = ["GeoVLayer"]
|
| 297 |
+
|
| 298 |
+
def _init_weights(self, module):
|
| 299 |
+
pass
|
| 300 |
+
|
| 301 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 302 |
+
if isinstance(module, GeoVModel):
|
| 303 |
+
module.gradient_checkpointing = value
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
GEOV_START_DOCSTRING = r"""
|
| 307 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
| 308 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
| 309 |
+
behavior.
|
| 310 |
+
|
| 311 |
+
Parameters:
|
| 312 |
+
config ([`~GeoVConfig`]): Model configuration class with all the parameters of the model.
|
| 313 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 314 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 315 |
+
"""
|
| 316 |
+
|
| 317 |
+
GEOV_INPUTS_DOCSTRING = r"""
|
| 318 |
+
Args:
|
| 319 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, seq_len)`):
|
| 320 |
+
Indices of input sequence tokens in the vocabulary.
|
| 321 |
+
|
| 322 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 323 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 324 |
+
|
| 325 |
+
[What are input IDs?](../glossary#input-ids)
|
| 326 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
|
| 327 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 328 |
+
|
| 329 |
+
- 1 for tokens that are **not masked**,
|
| 330 |
+
- 0 for tokens that are **masked**.
|
| 331 |
+
|
| 332 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 333 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 334 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 335 |
+
config.n_positions - 1]`.
|
| 336 |
+
|
| 337 |
+
[What are position IDs?](../glossary#position-ids)
|
| 338 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 339 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 340 |
+
|
| 341 |
+
- 1 indicates the head is **not masked**,
|
| 342 |
+
- 0 indicates the head is **masked**.
|
| 343 |
+
|
| 344 |
+
past_key_values (`Tuple[Tuple[torch.FloatTensor]]` of length `n_layers`, with each tuple having 2 tensors of shape `(batch_size, n_heads, seq_len - 1, head_size)`, *optional*):
|
| 345 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
| 346 |
+
|
| 347 |
+
use_cache (`bool`, *optional*):
|
| 348 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 349 |
+
`past_key_values`).
|
| 350 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
|
| 351 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 352 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
| 353 |
+
model's internal embedding lookup matrix.
|
| 354 |
+
output_attentions (`bool`, *optional*):
|
| 355 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 356 |
+
tensors for more detail.
|
| 357 |
+
output_hidden_states (`bool`, *optional*):
|
| 358 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 359 |
+
more detail.
|
| 360 |
+
return_dict (`bool`, *optional*):
|
| 361 |
+
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
|
| 362 |
+
"""
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
@add_start_docstrings(
|
| 366 |
+
"The bare GeoV Model transformer outputting raw hidden-states without any specific head on top.",
|
| 367 |
+
GEOV_START_DOCSTRING,
|
| 368 |
+
)
|
| 369 |
+
class GeoVModel(GeoVPreTrainedModel):
|
| 370 |
+
def __init__(self, config: "GeoVConfig"):
|
| 371 |
+
super().__init__(config)
|
| 372 |
+
self.config = config
|
| 373 |
+
|
| 374 |
+
self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 375 |
+
self.layers = nn.ModuleList([GeoVLayer(config) for _ in range(config.num_hidden_layers)])
|
| 376 |
+
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 377 |
+
|
| 378 |
+
self.embed_in.to(torch.bfloat16)
|
| 379 |
+
self.layers.to(torch.bfloat16)
|
| 380 |
+
|
| 381 |
+
self.gradient_checkpointing = False
|
| 382 |
+
|
| 383 |
+
# Initialize weights and apply final processing
|
| 384 |
+
self.post_init()
|
| 385 |
+
|
| 386 |
+
def get_input_embeddings(self):
|
| 387 |
+
return self.embed_in
|
| 388 |
+
|
| 389 |
+
def set_input_embeddings(self, value):
|
| 390 |
+
self.embed_in = value
|
| 391 |
+
|
| 392 |
+
@add_start_docstrings_to_model_forward(GEOV_INPUTS_DOCSTRING)
|
| 393 |
+
@add_code_sample_docstrings(
|
| 394 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 395 |
+
real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
|
| 396 |
+
output_type=BaseModelOutputWithPast,
|
| 397 |
+
config_class=_CONFIG_FOR_DOC,
|
| 398 |
+
)
|
| 399 |
+
def forward(
|
| 400 |
+
self,
|
| 401 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 402 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 403 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 404 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 405 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 406 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 407 |
+
use_cache: Optional[bool] = None,
|
| 408 |
+
output_attentions: Optional[bool] = None,
|
| 409 |
+
output_hidden_states: Optional[bool] = None,
|
| 410 |
+
return_dict: Optional[bool] = None,
|
| 411 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 412 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 413 |
+
output_hidden_states = (
|
| 414 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 415 |
+
)
|
| 416 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 417 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 418 |
+
|
| 419 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 420 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 421 |
+
elif input_ids is not None:
|
| 422 |
+
input_shape = input_ids.size()
|
| 423 |
+
elif inputs_embeds is not None:
|
| 424 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 425 |
+
else:
|
| 426 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 427 |
+
|
| 428 |
+
batch_size, seq_length = input_shape
|
| 429 |
+
|
| 430 |
+
if past_key_values is None:
|
| 431 |
+
past_length = 0
|
| 432 |
+
past_key_values = tuple([None] * self.config.num_hidden_layers)
|
| 433 |
+
else:
|
| 434 |
+
past_length = past_key_values[0][0].size(-2)
|
| 435 |
+
|
| 436 |
+
if position_ids is None:
|
| 437 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 438 |
+
position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device)
|
| 439 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 440 |
+
else:
|
| 441 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 442 |
+
|
| 443 |
+
# Attention mask.
|
| 444 |
+
if attention_mask is not None:
|
| 445 |
+
assert batch_size > 0, "batch_size has to be defined and > 0"
|
| 446 |
+
attention_mask = attention_mask.view(batch_size, -1)
|
| 447 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 448 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 449 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 450 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 451 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 452 |
+
attention_mask = attention_mask[:, None, None, :]
|
| 453 |
+
|
| 454 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 455 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 456 |
+
# positions we want to attend and the dtype's smallest value for masked positions.
|
| 457 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 458 |
+
# effectively the same as removing these entirely.
|
| 459 |
+
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
| 460 |
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
| 461 |
+
|
| 462 |
+
# Prepare head mask if needed
|
| 463 |
+
# 1.0 in head_mask indicate we keep the head
|
| 464 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 465 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 466 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 467 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 468 |
+
|
| 469 |
+
if inputs_embeds is None:
|
| 470 |
+
inputs_embeds = self.embed_in(input_ids)
|
| 471 |
+
|
| 472 |
+
hidden_states = inputs_embeds
|
| 473 |
+
|
| 474 |
+
if self.gradient_checkpointing and self.training:
|
| 475 |
+
if use_cache:
|
| 476 |
+
logger.warning(
|
| 477 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 478 |
+
)
|
| 479 |
+
use_cache = False
|
| 480 |
+
|
| 481 |
+
presents = () if use_cache else None
|
| 482 |
+
all_attentions = () if output_attentions else None
|
| 483 |
+
all_hidden_states = () if output_hidden_states else None
|
| 484 |
+
for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
|
| 485 |
+
if output_hidden_states:
|
| 486 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 487 |
+
|
| 488 |
+
if self.gradient_checkpointing and self.training:
|
| 489 |
+
|
| 490 |
+
def create_custom_forward(module):
|
| 491 |
+
def custom_forward(*inputs):
|
| 492 |
+
# None for layer_past
|
| 493 |
+
return module(*inputs, use_cache, None, output_attentions)
|
| 494 |
+
|
| 495 |
+
return custom_forward
|
| 496 |
+
|
| 497 |
+
outputs = torch.utils.checkpoint.checkpoint(
|
| 498 |
+
create_custom_forward(layer),
|
| 499 |
+
hidden_states,
|
| 500 |
+
attention_mask,
|
| 501 |
+
position_ids,
|
| 502 |
+
head_mask[i],
|
| 503 |
+
)
|
| 504 |
+
else:
|
| 505 |
+
outputs = layer(
|
| 506 |
+
hidden_states,
|
| 507 |
+
attention_mask=attention_mask,
|
| 508 |
+
position_ids=position_ids,
|
| 509 |
+
head_mask=head_mask[i],
|
| 510 |
+
layer_past=layer_past,
|
| 511 |
+
use_cache=use_cache,
|
| 512 |
+
output_attentions=output_attentions,
|
| 513 |
+
)
|
| 514 |
+
hidden_states = outputs[0]
|
| 515 |
+
if use_cache is True:
|
| 516 |
+
presents = presents + (outputs[1],)
|
| 517 |
+
if output_attentions:
|
| 518 |
+
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
|
| 519 |
+
|
| 520 |
+
# Cast the hidden state to final layer norm data type (this is the modification from GPTNeoX)
|
| 521 |
+
hidden_states = self.final_layer_norm(hidden_states.to(self.final_layer_norm.weight.dtype))
|
| 522 |
+
# Add last hidden state
|
| 523 |
+
if output_hidden_states:
|
| 524 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 525 |
+
|
| 526 |
+
if not return_dict:
|
| 527 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
| 528 |
+
|
| 529 |
+
return BaseModelOutputWithPast(
|
| 530 |
+
last_hidden_state=hidden_states,
|
| 531 |
+
past_key_values=presents,
|
| 532 |
+
hidden_states=all_hidden_states,
|
| 533 |
+
attentions=all_attentions,
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
@add_start_docstrings(
|
| 538 |
+
"""GeoV Model with a `language modeling` head on top for CLM fine-tuning.""", GEOV_START_DOCSTRING
|
| 539 |
+
)
|
| 540 |
+
class GeoVForCausalLM(GeoVPreTrainedModel):
|
| 541 |
+
_keys_to_ignore_on_load_missing = [r"causal_mask", r"inv_freq"]
|
| 542 |
+
|
| 543 |
+
def __init__(self, config: "GeoVConfig"):
|
| 544 |
+
super().__init__(config)
|
| 545 |
+
|
| 546 |
+
self.geov = GeoVModel(config)
|
| 547 |
+
self.embed_out = nn.Linear(config.hidden_size, config.vocab_size)
|
| 548 |
+
|
| 549 |
+
# Initialize weights and apply final processing
|
| 550 |
+
self.post_init()
|
| 551 |
+
|
| 552 |
+
def get_output_embeddings(self):
|
| 553 |
+
return self.embed_out
|
| 554 |
+
|
| 555 |
+
def set_output_embeddings(self, new_embeddings):
|
| 556 |
+
self.embed_out = new_embeddings
|
| 557 |
+
|
| 558 |
+
@add_start_docstrings_to_model_forward(GEOV_INPUTS_DOCSTRING)
|
| 559 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 560 |
+
def forward(
|
| 561 |
+
self,
|
| 562 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 563 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
| 564 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 565 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 566 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 567 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 568 |
+
labels: Optional[torch.LongTensor] = None,
|
| 569 |
+
use_cache: Optional[bool] = None,
|
| 570 |
+
output_attentions: Optional[bool] = None,
|
| 571 |
+
output_hidden_states: Optional[bool] = None,
|
| 572 |
+
return_dict: Optional[bool] = None,
|
| 573 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 574 |
+
r"""
|
| 575 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 576 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
| 577 |
+
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
|
| 578 |
+
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
|
| 579 |
+
|
| 580 |
+
Returns:
|
| 581 |
+
|
| 582 |
+
Example:
|
| 583 |
+
|
| 584 |
+
```python
|
| 585 |
+
>>> from transformers import AutoTokenizer, GeoVForCausalLM, GeoVConfig
|
| 586 |
+
>>> import torch
|
| 587 |
+
|
| 588 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("GeoV/GeoV-9b")
|
| 589 |
+
>>> model = GeoVForCausalLM.from_pretrained("GeoV/GeoV-9b")
|
| 590 |
+
|
| 591 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
| 592 |
+
>>> outputs = model(**inputs)
|
| 593 |
+
|
| 594 |
+
>>> prediction_logits = outputs.logits
|
| 595 |
+
```"""
|
| 596 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 597 |
+
|
| 598 |
+
outputs = self.geov(
|
| 599 |
+
input_ids,
|
| 600 |
+
attention_mask=attention_mask,
|
| 601 |
+
position_ids=position_ids,
|
| 602 |
+
head_mask=head_mask,
|
| 603 |
+
inputs_embeds=inputs_embeds,
|
| 604 |
+
past_key_values=past_key_values,
|
| 605 |
+
use_cache=use_cache,
|
| 606 |
+
output_attentions=output_attentions,
|
| 607 |
+
output_hidden_states=output_hidden_states,
|
| 608 |
+
return_dict=return_dict,
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
hidden_states = outputs[0]
|
| 612 |
+
lm_logits = self.embed_out(hidden_states)
|
| 613 |
+
|
| 614 |
+
lm_loss = None
|
| 615 |
+
if labels is not None:
|
| 616 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
| 617 |
+
shift_logits = lm_logits[:, :-1, :].contiguous()
|
| 618 |
+
labels = labels[:, 1:].contiguous()
|
| 619 |
+
loss_fct = CrossEntropyLoss()
|
| 620 |
+
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
|
| 621 |
+
|
| 622 |
+
if not return_dict:
|
| 623 |
+
output = (lm_logits,) + outputs[1:]
|
| 624 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
| 625 |
+
|
| 626 |
+
return CausalLMOutputWithPast(
|
| 627 |
+
loss=lm_loss,
|
| 628 |
+
logits=lm_logits,
|
| 629 |
+
past_key_values=outputs.past_key_values,
|
| 630 |
+
hidden_states=outputs.hidden_states,
|
| 631 |
+
attentions=outputs.attentions,
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **kwargs):
|
| 635 |
+
input_shape = input_ids.shape
|
| 636 |
+
|
| 637 |
+
# cut decoder_input_ids if past is used
|
| 638 |
+
if past_key_values and past_key_values[0] is not None:
|
| 639 |
+
input_ids = input_ids[:, -1:]
|
| 640 |
+
|
| 641 |
+
position_ids = kwargs.get("position_ids", None)
|
| 642 |
+
if attention_mask is not None and position_ids is None:
|
| 643 |
+
# create position_ids on the fly for batch generation
|
| 644 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 645 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 646 |
+
if past_key_values:
|
| 647 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
| 648 |
+
|
| 649 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
| 650 |
+
if attention_mask is None:
|
| 651 |
+
attention_mask = input_ids.new_ones(input_shape)
|
| 652 |
+
|
| 653 |
+
return {
|
| 654 |
+
"input_ids": input_ids,
|
| 655 |
+
"attention_mask": attention_mask,
|
| 656 |
+
"position_ids": position_ids,
|
| 657 |
+
"past_key_values": past_key_values,
|
| 658 |
+
}
|
| 659 |
+
|
| 660 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
| 661 |
+
reordered_past = ()
|
| 662 |
+
for layer_past in past_key_values:
|
| 663 |
+
reordered_past += (
|
| 664 |
+
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
|
| 665 |
+
)
|
| 666 |
+
return reordered_past
|
tokenization_geov.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Better Planet Investments and labml.ai team. ALl rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Tokenization classes for GeoV."""
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
import sentencepiece as spm
|
| 20 |
+
|
| 21 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
| 22 |
+
from transformers.utils import SPIECE_UNDERLINE, logging
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
|
| 27 |
+
|
| 28 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
| 29 |
+
"vocab_file": {
|
| 30 |
+
"GeoV/GeoV-9b": "https://huggingface.co/GeoV/GeoV-9b/resolve/main/spiece.model",
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
| 35 |
+
"GeoV-9b": 2048,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class GeoVTokenizer(PreTrainedTokenizer):
|
| 40 |
+
"""
|
| 41 |
+
Construct an GeoV tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
|
| 42 |
+
|
| 43 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
| 44 |
+
this superclass for more information regarding those methods.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
vocab_file (`str`):
|
| 48 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
|
| 49 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
| 50 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
| 51 |
+
The beginning of sequence token that was used during pretraining.
|
| 52 |
+
|
| 53 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
| 54 |
+
The end of sequence token.
|
| 55 |
+
|
| 56 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
| 57 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| 58 |
+
token instead.
|
| 59 |
+
|
| 60 |
+
new_line_token_id (`int`, *optional*, defaults to `65_499`):
|
| 61 |
+
The token id of new line character.
|
| 62 |
+
|
| 63 |
+
Attributes:
|
| 64 |
+
sp_model (`SentencePieceProcessor`):
|
| 65 |
+
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
| 69 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
| 70 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
| 71 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 72 |
+
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
vocab_file,
|
| 76 |
+
bos_token="<s>",
|
| 77 |
+
eos_token="</s>",
|
| 78 |
+
unk_token="<unk>",
|
| 79 |
+
new_line_token_id=65_499,
|
| 80 |
+
**kwargs,
|
| 81 |
+
) -> None:
|
| 82 |
+
super().__init__(
|
| 83 |
+
bos_token=bos_token,
|
| 84 |
+
eos_token=eos_token,
|
| 85 |
+
unk_token=unk_token,
|
| 86 |
+
new_line_token_id=new_line_token_id,
|
| 87 |
+
**kwargs,
|
| 88 |
+
)
|
| 89 |
+
self.vocab_file = vocab_file
|
| 90 |
+
self.new_line_token_id = new_line_token_id
|
| 91 |
+
|
| 92 |
+
self.sp_model = spm.SentencePieceProcessor()
|
| 93 |
+
self.sp_model.Load(vocab_file)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def vocab_size(self):
|
| 97 |
+
return len(self.sp_model)
|
| 98 |
+
|
| 99 |
+
def get_vocab(self):
|
| 100 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
| 101 |
+
vocab.update(self.added_tokens_encoder)
|
| 102 |
+
return vocab
|
| 103 |
+
|
| 104 |
+
def __getstate__(self):
|
| 105 |
+
state = self.__dict__.copy()
|
| 106 |
+
state["sp_model"] = None
|
| 107 |
+
return state
|
| 108 |
+
|
| 109 |
+
def __setstate__(self, d):
|
| 110 |
+
self.__dict__ = d
|
| 111 |
+
|
| 112 |
+
self.sp_model = spm.SentencePieceProcessor()
|
| 113 |
+
self.sp_model.Load(self.vocab_file)
|
| 114 |
+
|
| 115 |
+
def _tokenize(self, text: str) -> List[str]:
|
| 116 |
+
"""Tokenize a string."""
|
| 117 |
+
ret = []
|
| 118 |
+
split_text = text.splitlines()
|
| 119 |
+
for l in split_text:
|
| 120 |
+
rl = self.sp_model.encode(l, out_type=str)
|
| 121 |
+
ret.extend(rl)
|
| 122 |
+
ret.append("\n")
|
| 123 |
+
ret = ret[:-1]
|
| 124 |
+
return ret
|
| 125 |
+
|
| 126 |
+
def _convert_token_to_id(self, token):
|
| 127 |
+
"""Converts a token (str) in an id using the vocab."""
|
| 128 |
+
if token == "\n":
|
| 129 |
+
return self.new_line_token_id
|
| 130 |
+
return self.sp_model.PieceToId(token)
|
| 131 |
+
|
| 132 |
+
def _convert_id_to_token(self, index):
|
| 133 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
| 134 |
+
if index == self.new_line_token_id:
|
| 135 |
+
return "\n"
|
| 136 |
+
return self.sp_model.IdToPiece(index)
|
| 137 |
+
|
| 138 |
+
def convert_tokens_to_string(self, tokens):
|
| 139 |
+
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
|
| 140 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
| 141 |
+
return out_string
|
| 142 |
+
|
| 143 |
+
def _decode(
|
| 144 |
+
self,
|
| 145 |
+
token_ids: List[int],
|
| 146 |
+
skip_special_tokens: bool = False,
|
| 147 |
+
clean_up_tokenization_spaces: bool = True,
|
| 148 |
+
spaces_between_special_tokens: bool = True,
|
| 149 |
+
**kwargs,
|
| 150 |
+
) -> str:
|
| 151 |
+
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
|
| 152 |
+
|
| 153 |
+
if skip_special_tokens:
|
| 154 |
+
filtered_tokens = [t for t in filtered_tokens if t not in self.all_special_ids]
|
| 155 |
+
|
| 156 |
+
text = self.convert_tokens_to_string(filtered_tokens)
|
| 157 |
+
|
| 158 |
+
if clean_up_tokenization_spaces:
|
| 159 |
+
clean_text = self.clean_up_tokenization(text)
|
| 160 |
+
return clean_text
|
| 161 |
+
else:
|
| 162 |
+
return text
|
| 163 |
+
|
| 164 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 165 |
+
save_directory = Path(save_directory)
|
| 166 |
+
if not save_directory.is_dir():
|
| 167 |
+
raise ValueError(f"Vocabulary path ({save_directory}) should be a directory")
|
| 168 |
+
vocab_fn = VOCAB_FILES_NAMES["vocab_file"]
|
| 169 |
+
filename_prefix = f"{filename_prefix}-" if filename_prefix else ""
|
| 170 |
+
|
| 171 |
+
vocab_file = save_directory / f"{filename_prefix}{vocab_fn}"
|
| 172 |
+
|
| 173 |
+
with open(str(vocab_file), "wb") as fi:
|
| 174 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
| 175 |
+
fi.write(content_spiece_model)
|
| 176 |
+
|
| 177 |
+
return (str(vocab_file),)
|
tokenizer_config.json
CHANGED
|
@@ -1,4 +1,10 @@
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
"bos_token": "<s>",
|
| 3 |
"eos_token": "</s>",
|
| 4 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
|
| 1 |
{
|
| 2 |
+
"auto_map": {
|
| 3 |
+
"AutoTokenizer": [
|
| 4 |
+
"tokenization_geov.GeoVTokenizer",
|
| 5 |
+
null
|
| 6 |
+
]
|
| 7 |
+
},
|
| 8 |
"bos_token": "<s>",
|
| 9 |
"eos_token": "</s>",
|
| 10 |
"model_max_length": 1000000000000000019884624838656,
|