Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +163 -3
- config.json +38 -0
- configuration_hf_alibaba_nlp_gte.py +145 -0
- global_step30/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
- global_step30/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
- global_step30/zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
- latest +1 -0
- model.safetensors +3 -0
- modeling_hf_alibaba_nlp_gte.py +967 -0
- rng_state_0.pth +3 -0
- rng_state_1.pth +3 -0
- rng_state_2.pth +3 -0
- rng_state_3.pth +3 -0
- rng_state_4.pth +3 -0
- rng_state_5.pth +3 -0
- rng_state_6.pth +3 -0
- rng_state_7.pth +3 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +62 -0
- trainer_state.json +76 -0
- training_args.bin +3 -0
- zero_to_fp32.py +760 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,3 +1,163 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
- ko
|
| 6 |
+
pipeline_tag: sentence-similarity
|
| 7 |
+
tags:
|
| 8 |
+
- sentence-transformers
|
| 9 |
+
- feature-extraction
|
| 10 |
+
- sentence-similarity
|
| 11 |
+
- transformers
|
| 12 |
+
- embedding
|
| 13 |
+
- gte
|
| 14 |
+
- text-embedding
|
| 15 |
+
- retrieval
|
| 16 |
+
- matryoshka
|
| 17 |
+
- academic-search
|
| 18 |
+
- scientific-search
|
| 19 |
+
library_name: transformers
|
| 20 |
+
base_model: Snowflake/snowflake-arctic-embed-m-v2.0
|
| 21 |
+
datasets:
|
| 22 |
+
- ms_marco
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
# LinerAI/snowflake-arctic-embed-m-v2.0-academic for Academic Search
|
| 26 |
+
|
| 27 |
+
This is a fine-tuned version of [Snowflake/snowflake-arctic-embed-m-v2.0](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v2.0) optimized for **academic and scientific literature search**. The model has been trained using contrastive learning with hard negative mining, specifically curated for academic search scenarios.
|
| 28 |
+
|
| 29 |
+
## Highlights
|
| 30 |
+
|
| 31 |
+
- **Optimized for Academic Search**: Fine-tuned on datasets specifically designed for academic literature retrieval
|
| 32 |
+
- **Hard Negative Mining**: Trained with carefully mined hard negatives to improve discrimination between similar academic papers
|
| 33 |
+
- **Matryoshka Representation Learning (MRL)**: Supports flexible embedding dimensions (768, 512, 256, 128) for efficiency
|
| 34 |
+
- **Efficient & Fast**: Medium-sized model offering excellent speed-performance trade-off
|
| 35 |
+
- **Long Context**: Supports up to 4096 tokens
|
| 36 |
+
|
| 37 |
+
## Model Description
|
| 38 |
+
|
| 39 |
+
| Attribute | Value |
|
| 40 |
+
|-----------|-------|
|
| 41 |
+
| Base Model | Snowflake/snowflake-arctic-embed-m-v2.0 |
|
| 42 |
+
| Architecture | GTE |
|
| 43 |
+
| Embedding Dimension | 768 |
|
| 44 |
+
| MRL Dimensions | 768, 512, 256, 128 |
|
| 45 |
+
| Max Sequence Length | 4096 |
|
| 46 |
+
| Pooling | CLS token |
|
| 47 |
+
| Precision | float16 |
|
| 48 |
+
|
| 49 |
+
## Evaluation Results
|
| 50 |
+
|
| 51 |
+
| **Model** | Avg. | SciFact: Recall@10 | TRECCOVID: Recall@10 | NFCorpus: Recall@10 | SCIDOCS: Recall@10 | LitSearch: Recall@10 | QASA: Recall@10 |
|
| 52 |
+
| --- | --- | --- | --- | --- | --- | --- | --- |
|
| 53 |
+
| snowflake-arctic-embed-m-v2.0-academic | 0.3729 | 0.8609 | 0.0219 | 0.177 | 0.2129 | 0.6435 | 0.321 |
|
| 54 |
+
| snowflake-arctic-embed-m-v2.0 | 0.3654 | 0.8353 | 0.0224 | 0.1669 | 0.2122 | 0.6508 | 0.3046 |
|
| 55 |
+
|
| 56 |
+
## Training Details
|
| 57 |
+
|
| 58 |
+
### Training Configuration
|
| 59 |
+
|
| 60 |
+
| Parameter | Value |
|
| 61 |
+
|-----------|-------|
|
| 62 |
+
| Learning Rate | 2e-5 |
|
| 63 |
+
| Batch Size | 8192 (effective) |
|
| 64 |
+
| Per-Device Batch Size | 32 |
|
| 65 |
+
| Warmup Steps | 100 |
|
| 66 |
+
| Weight Decay | 0.1 |
|
| 67 |
+
| Precision | fp16 |
|
| 68 |
+
| Max Length | 4096 |
|
| 69 |
+
| Loss Function | InfoNCE (Contrastive) |
|
| 70 |
+
| Temperature (τ) | 0.02 |
|
| 71 |
+
|
| 72 |
+
### Training Data
|
| 73 |
+
|
| 74 |
+
The model was trained on [LEAD (Liner Embedding Academic Dataset)](https://huggingface.co/datasets/LinerAI/LEAD), a combination of ~55,560 samples tailored for academic search:
|
| 75 |
+
- **MS MARCO** (49%): General passage retrieval dataset with hard negatives
|
| 76 |
+
- **Academic Search Dataset** (51%): Custom dataset built specifically for academic literature search, with two-stage hard negative mining
|
| 77 |
+
|
| 78 |
+
### Matryoshka Representation Learning (MRL)
|
| 79 |
+
|
| 80 |
+
This model supports [Matryoshka Representation Learning](https://arxiv.org/abs/2205.13147). You can truncate embeddings to smaller dimensions (512, 256, 128) for faster computation and reduced storage.
|
| 81 |
+
|
| 82 |
+
```python
|
| 83 |
+
# Full dimension (768)
|
| 84 |
+
full_embedding = embeddings[:, :768]
|
| 85 |
+
|
| 86 |
+
# MRL dimensions
|
| 87 |
+
embedding_512 = embeddings[:, :512]
|
| 88 |
+
embedding_256 = embeddings[:, :256]
|
| 89 |
+
embedding_128 = embeddings[:, :128]
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
## Usage
|
| 93 |
+
|
| 94 |
+
### Using Transformers
|
| 95 |
+
|
| 96 |
+
```python
|
| 97 |
+
import torch
|
| 98 |
+
from transformers import AutoModel, AutoTokenizer
|
| 99 |
+
|
| 100 |
+
model_path = "LinerAI/snowflake-arctic-embed-l-v2.0-academic"
|
| 101 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 102 |
+
model = AutoModel.from_pretrained(model_path, torch_dtype=torch.float16, trust_remote_code=True)
|
| 103 |
+
model.eval()
|
| 104 |
+
|
| 105 |
+
# For queries
|
| 106 |
+
def encode_query(text):
|
| 107 |
+
input_text = f"query: {text}"
|
| 108 |
+
inputs = tokenizer(input_text, return_tensors="pt", max_length=4096, truncation=True)
|
| 109 |
+
with torch.no_grad():
|
| 110 |
+
outputs = model(**inputs)
|
| 111 |
+
embeddings = outputs.last_hidden_state[:, 0] # CLS token
|
| 112 |
+
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 113 |
+
return embeddings
|
| 114 |
+
|
| 115 |
+
# For passages
|
| 116 |
+
def encode_passage(text):
|
| 117 |
+
inputs = tokenizer(text, return_tensors="pt", max_length=4096, truncation=True)
|
| 118 |
+
with torch.no_grad():
|
| 119 |
+
outputs = model(**inputs)
|
| 120 |
+
embeddings = outputs.last_hidden_state[:, 0] # CLS token
|
| 121 |
+
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 122 |
+
return embeddings
|
| 123 |
+
|
| 124 |
+
# Example: Academic search
|
| 125 |
+
query = "transformer models for protein structure prediction"
|
| 126 |
+
abstract = "We introduce AlphaFold, a deep learning system that predicts protein structures..."
|
| 127 |
+
|
| 128 |
+
query_emb = encode_query(query)
|
| 129 |
+
passage_emb = encode_passage(abstract)
|
| 130 |
+
|
| 131 |
+
similarity = torch.nn.functional.cosine_similarity(query_emb, passage_emb)
|
| 132 |
+
print(f"Similarity: {similarity.item():.4f}")
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
## Input Format
|
| 136 |
+
|
| 137 |
+
### Query Format
|
| 138 |
+
```
|
| 139 |
+
query: {your_query_text}
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
### Passage Format
|
| 143 |
+
```
|
| 144 |
+
{your_passage_text}
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
## Intended Use
|
| 148 |
+
|
| 149 |
+
- **Academic Paper Search**: Finding relevant research papers given a research query
|
| 150 |
+
- **Literature Review**: Discovering related work in academic literature
|
| 151 |
+
- **Scientific Document Retrieval**: Retrieving scientific documents, abstracts, and articles
|
| 152 |
+
- **Research Question Answering**: Finding papers that address specific research questions
|
| 153 |
+
|
| 154 |
+
## Limitations
|
| 155 |
+
|
| 156 |
+
- Maximum sequence length is 4096 tokens
|
| 157 |
+
- Best performance achieved when using the specific input formats described above
|
| 158 |
+
- The model uses asymmetric encoding (query prefix for queries, no prefix for passages)
|
| 159 |
+
- Requires `trust_remote_code=True` for loading
|
| 160 |
+
|
| 161 |
+
## License
|
| 162 |
+
|
| 163 |
+
This model is released under the Apache 2.0 license.
|
config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"GteModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.0,
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "configuration_hf_alibaba_nlp_gte.GteConfig",
|
| 8 |
+
"AutoModel": "modeling_hf_alibaba_nlp_gte.GteModel"
|
| 9 |
+
},
|
| 10 |
+
"classifier_dropout": 0.1,
|
| 11 |
+
"dtype": "float16",
|
| 12 |
+
"hidden_act": "gelu",
|
| 13 |
+
"hidden_dropout_prob": 0.1,
|
| 14 |
+
"hidden_size": 768,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 3072,
|
| 17 |
+
"layer_norm_eps": 1e-12,
|
| 18 |
+
"layer_norm_type": "layer_norm",
|
| 19 |
+
"logn_attention_clip1": false,
|
| 20 |
+
"logn_attention_scale": false,
|
| 21 |
+
"matryoshka_dimensions": [
|
| 22 |
+
256
|
| 23 |
+
],
|
| 24 |
+
"max_position_embeddings": 8192,
|
| 25 |
+
"model_type": "gte",
|
| 26 |
+
"num_attention_heads": 12,
|
| 27 |
+
"num_hidden_layers": 12,
|
| 28 |
+
"pack_qkv": true,
|
| 29 |
+
"pad_token_id": 1,
|
| 30 |
+
"position_embedding_type": "rope",
|
| 31 |
+
"rope_scaling": null,
|
| 32 |
+
"rope_theta": 160000,
|
| 33 |
+
"transformers_version": "4.57.1",
|
| 34 |
+
"type_vocab_size": 1,
|
| 35 |
+
"unpad_inputs": false,
|
| 36 |
+
"use_memory_efficient_attention": false,
|
| 37 |
+
"vocab_size": 250048
|
| 38 |
+
}
|
configuration_hf_alibaba_nlp_gte.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The GTE Team Authors and Alibaba Group.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
""" GTE model configuration"""
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
logger = logging.get_logger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class GteConfig(PretrainedConfig):
|
| 24 |
+
r"""
|
| 25 |
+
This is the configuration class to store the configuration of a [`NewModel`] or a [`TFNewModel`]. It is used to
|
| 26 |
+
instantiate a NEW model according to the specified arguments, defining the model architecture. Instantiating a
|
| 27 |
+
configuration with the defaults will yield a similar configuration to that of the NEW
|
| 28 |
+
[izhx/new-base-en](https://huggingface.co/izhx/new-base-en) architecture.
|
| 29 |
+
|
| 30 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 31 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 36 |
+
Vocabulary size of the NEW model. Defines the number of different tokens that can be represented by the
|
| 37 |
+
`inputs_ids` passed when calling [`NewModel`] or [`TFNewModel`].
|
| 38 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 39 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 40 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 41 |
+
Number of hidden layers in the Transformer encoder.
|
| 42 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 43 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 44 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 45 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
| 46 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
| 47 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 48 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 49 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 51 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 52 |
+
The dropout ratio for the attention probabilities.
|
| 53 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 54 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 55 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 56 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
| 57 |
+
The vocabulary size of the `token_type_ids` passed when calling [`NewModel`] or [`TFNewModel`].
|
| 58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 60 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 61 |
+
The epsilon used by the layer normalization layers.
|
| 62 |
+
position_embedding_type (`str`, *optional*, defaults to `"rope"`):
|
| 63 |
+
Type of position embedding. Choose one of `"absolute"`, `"rope"`.
|
| 64 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 65 |
+
The base period of the RoPE embeddings.
|
| 66 |
+
rope_scaling (`Dict`, *optional*):
|
| 67 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
| 68 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
| 69 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
| 70 |
+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
|
| 71 |
+
these scaling strategies behave:
|
| 72 |
+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
|
| 73 |
+
experimental feature, subject to breaking API changes in future versions.
|
| 74 |
+
classifier_dropout (`float`, *optional*):
|
| 75 |
+
The dropout ratio for the classification head.
|
| 76 |
+
|
| 77 |
+
Examples:
|
| 78 |
+
|
| 79 |
+
```python
|
| 80 |
+
>>> from transformers import NewConfig, NewModel
|
| 81 |
+
|
| 82 |
+
>>> # Initializing a NEW izhx/new-base-en style configuration
|
| 83 |
+
>>> configuration = NewConfig()
|
| 84 |
+
|
| 85 |
+
>>> # Initializing a model (with random weights) from the izhx/new-base-en style configuration
|
| 86 |
+
>>> model = NewModel(configuration)
|
| 87 |
+
|
| 88 |
+
>>> # Accessing the model configuration
|
| 89 |
+
>>> configuration = model.config
|
| 90 |
+
```"""
|
| 91 |
+
|
| 92 |
+
model_type = "gte"
|
| 93 |
+
|
| 94 |
+
def __init__(
|
| 95 |
+
self,
|
| 96 |
+
vocab_size=30528,
|
| 97 |
+
hidden_size=768,
|
| 98 |
+
num_hidden_layers=12,
|
| 99 |
+
num_attention_heads=12,
|
| 100 |
+
intermediate_size=3072,
|
| 101 |
+
hidden_act="gelu",
|
| 102 |
+
hidden_dropout_prob=0.1,
|
| 103 |
+
attention_probs_dropout_prob=0.0,
|
| 104 |
+
max_position_embeddings=2048,
|
| 105 |
+
type_vocab_size=1,
|
| 106 |
+
initializer_range=0.02,
|
| 107 |
+
layer_norm_type='layer_norm',
|
| 108 |
+
layer_norm_eps=1e-12,
|
| 109 |
+
# pad_token_id=0,
|
| 110 |
+
position_embedding_type="rope",
|
| 111 |
+
rope_theta=10000.0,
|
| 112 |
+
rope_scaling=None,
|
| 113 |
+
classifier_dropout=None,
|
| 114 |
+
pack_qkv=True,
|
| 115 |
+
unpad_inputs=False,
|
| 116 |
+
use_memory_efficient_attention=False,
|
| 117 |
+
logn_attention_scale=False,
|
| 118 |
+
logn_attention_clip1=False,
|
| 119 |
+
**kwargs,
|
| 120 |
+
):
|
| 121 |
+
super().__init__(**kwargs)
|
| 122 |
+
|
| 123 |
+
self.vocab_size = vocab_size
|
| 124 |
+
self.hidden_size = hidden_size
|
| 125 |
+
self.num_hidden_layers = num_hidden_layers
|
| 126 |
+
self.num_attention_heads = num_attention_heads
|
| 127 |
+
self.hidden_act = hidden_act
|
| 128 |
+
self.intermediate_size = intermediate_size
|
| 129 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 130 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 131 |
+
self.max_position_embeddings = max_position_embeddings
|
| 132 |
+
self.type_vocab_size = type_vocab_size
|
| 133 |
+
self.initializer_range = initializer_range
|
| 134 |
+
self.layer_norm_type = layer_norm_type
|
| 135 |
+
self.layer_norm_eps = layer_norm_eps
|
| 136 |
+
self.position_embedding_type = position_embedding_type
|
| 137 |
+
self.rope_theta = rope_theta
|
| 138 |
+
self.rope_scaling = rope_scaling
|
| 139 |
+
self.classifier_dropout = classifier_dropout
|
| 140 |
+
|
| 141 |
+
self.pack_qkv = pack_qkv
|
| 142 |
+
self.unpad_inputs = unpad_inputs
|
| 143 |
+
self.use_memory_efficient_attention = use_memory_efficient_attention
|
| 144 |
+
self.logn_attention_scale = logn_attention_scale
|
| 145 |
+
self.logn_attention_clip1 = logn_attention_clip1
|
global_step30/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e69a46edd10887176c72a41d8ebc9a8da7ff947492c2442ef7e2e4c44232e5a9
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ff3a81f56e6d9b9b5a4f9d7e1b8e85aa020945225b82868631d68a5c8470938
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8cdcd5b45a0448de133a2d0da8b2931239d71f0ba699bce76b3f187941335543
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44730cab0dd6f8f0e50855efc65bb12b5980536748c6ec401b598158ee522398
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16ce086a15a5a358baba882933e615159ec1b9e88c3f3a294d38e16797c84abd
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7ef3da6a6368778ae2ccf155a44e2c9afd3ca95a18821cb8800bd7b4b65a783
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51e69d4b2561e30bc251b28ea2b57040037a172e6b86c5ead9d3ac38a778ae14
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68ea1f4a6c7cee25faffb0c52b15577ef427ba288510b881029195edf8c1ff75
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_4_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6afef3e7f7bb9dad7d75d02112757594660a99a8c004655174b5df1bd9be1f23
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_4_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:399d003e0429597258f42f0fe5b597a876d3ed6e7ed540e196788b91371769d3
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_5_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ffda8a422f53f5ac965634c5a2317ff9cd40cb4166b532d053c6aa88d68b46d
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_5_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5033945f3270c21ca2d4eadf2914ad0c0a336a1cac4e29b98cf3685ff0ab908
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_6_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b78359c30a03544441a31b63e4e388003bf0293a9729a697f261b3084ae5946
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_6_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0ba7e1115dc7921a1a4425eaf9d6aa1c78f71107e9665e6b4b7936545aac8e9
|
| 3 |
+
size 458190065
|
global_step30/zero_pp_rank_7_mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:742887558b15888d85126e8e117a7f4c11a8d8a0eeefddf1b3a0f55b506d5ae1
|
| 3 |
+
size 69793
|
global_step30/zero_pp_rank_7_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c539e901e5147c662e9abe31a717d1ba47f78a1224d33fa852431f3c6c655aa5
|
| 3 |
+
size 458190065
|
latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step30
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:728527f57c7430a1226d04d0a4340e58f79fd249e91bb56759b55cf5252838b8
|
| 3 |
+
size 610751112
|
modeling_hf_alibaba_nlp_gte.py
ADDED
|
@@ -0,0 +1,967 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The GTE Team Authors and Alibaba Group.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import List, Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.utils.checkpoint
|
| 23 |
+
from torch import nn
|
| 24 |
+
|
| 25 |
+
from transformers.activations import ACT2FN
|
| 26 |
+
from transformers.modeling_outputs import (
|
| 27 |
+
BaseModelOutput,
|
| 28 |
+
BaseModelOutputWithPooling,
|
| 29 |
+
MaskedLMOutput,
|
| 30 |
+
MultipleChoiceModelOutput,
|
| 31 |
+
QuestionAnsweringModelOutput,
|
| 32 |
+
SequenceClassifierOutput,
|
| 33 |
+
ModelOutput,
|
| 34 |
+
)
|
| 35 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 36 |
+
from transformers.utils import logging
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
import xformers.ops as xops
|
| 40 |
+
except ImportError as e:
|
| 41 |
+
xops = None
|
| 42 |
+
|
| 43 |
+
from .configuration_hf_alibaba_nlp_gte import GteConfig
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
logger = logging.get_logger(__name__)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/bert_padding.py
|
| 50 |
+
# Which was adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
|
| 51 |
+
class IndexFirstAxis(torch.autograd.Function):
|
| 52 |
+
@staticmethod
|
| 53 |
+
def forward(ctx, input, indices):
|
| 54 |
+
ctx.save_for_backward(indices)
|
| 55 |
+
assert input.ndim >= 2
|
| 56 |
+
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
|
| 57 |
+
second_dim = other_shape.numel()
|
| 58 |
+
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
|
| 59 |
+
# return input[indices]
|
| 60 |
+
# return torch.gather(
|
| 61 |
+
# rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
|
| 62 |
+
# ).reshape(-1, *other_shape)
|
| 63 |
+
return torch.gather(
|
| 64 |
+
input.view(ctx.first_axis_dim, second_dim),
|
| 65 |
+
0,
|
| 66 |
+
indices.unsqueeze(-1).expand(indices.size(0), second_dim)
|
| 67 |
+
).reshape(-1, *other_shape)
|
| 68 |
+
|
| 69 |
+
@staticmethod
|
| 70 |
+
def backward(ctx, grad_output):
|
| 71 |
+
(indices,) = ctx.saved_tensors
|
| 72 |
+
assert grad_output.ndim >= 2
|
| 73 |
+
other_shape = grad_output.shape[1:]
|
| 74 |
+
# grad_output = rearrange(grad_output, "b ... -> b (...)")
|
| 75 |
+
grad_output = grad_output.view(grad_output.size(0), other_shape.numel())
|
| 76 |
+
grad_input = torch.zeros(
|
| 77 |
+
[ctx.first_axis_dim, grad_output.shape[1]],
|
| 78 |
+
device=grad_output.device,
|
| 79 |
+
dtype=grad_output.dtype,
|
| 80 |
+
)
|
| 81 |
+
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
|
| 82 |
+
# grad_input[indices] = grad_output
|
| 83 |
+
# grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
|
| 84 |
+
grad_input.scatter_(
|
| 85 |
+
0, indices.unsqueeze(-1).expand(indices.size(0), grad_output.size(1)), grad_output
|
| 86 |
+
)
|
| 87 |
+
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
index_first_axis = IndexFirstAxis.apply
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def unpad_input(hidden_states, attention_mask=None, indices=None):
|
| 94 |
+
"""
|
| 95 |
+
Arguments:
|
| 96 |
+
hidden_states: (batch, seqlen, ...)
|
| 97 |
+
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
|
| 98 |
+
indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
|
| 99 |
+
Return:
|
| 100 |
+
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 101 |
+
"""
|
| 102 |
+
if indices is None:
|
| 103 |
+
assert attention_mask is not None
|
| 104 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 105 |
+
|
| 106 |
+
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 107 |
+
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 108 |
+
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
| 109 |
+
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
|
| 110 |
+
# so we write custom forward and backward to make it a bit faster.
|
| 111 |
+
hidden_states = hidden_states.view(-1, *hidden_states.shape[2:])
|
| 112 |
+
return index_first_axis(hidden_states, indices)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class IndexPutFirstAxis(torch.autograd.Function):
|
| 116 |
+
@staticmethod
|
| 117 |
+
def forward(
|
| 118 |
+
ctx,
|
| 119 |
+
values: torch.Tensor,
|
| 120 |
+
indices: torch.Tensor,
|
| 121 |
+
first_axis_dim
|
| 122 |
+
) -> torch.Tensor:
|
| 123 |
+
ctx.save_for_backward(indices)
|
| 124 |
+
assert indices.ndim == 1
|
| 125 |
+
assert values.ndim >= 2
|
| 126 |
+
output = torch.zeros(
|
| 127 |
+
first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
|
| 128 |
+
)
|
| 129 |
+
output[indices] = values
|
| 130 |
+
return output
|
| 131 |
+
|
| 132 |
+
@staticmethod
|
| 133 |
+
def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None, None]:
|
| 134 |
+
indices, = ctx.saved_tensors
|
| 135 |
+
grad_values = grad_output[indices]
|
| 136 |
+
return grad_values, None, None
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
index_put_first_axis = IndexPutFirstAxis.apply
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def pad_input(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int) -> torch.Tensor:
|
| 143 |
+
"""Add padding to sequences.
|
| 144 |
+
|
| 145 |
+
Arguments:
|
| 146 |
+
inputs: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 147 |
+
indices: (total_nnz), `indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()`
|
| 148 |
+
batch: int batch_size
|
| 149 |
+
seqlen: int max sequence length
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
inputs: (batch, seqlen, ...)
|
| 153 |
+
"""
|
| 154 |
+
output = index_put_first_axis(inputs, indices, batch * seqlen)
|
| 155 |
+
return output.view(batch, seqlen, *inputs.shape[1:])
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def rotate_half(x):
|
| 159 |
+
"""Rotates half the hidden dims of the input."""
|
| 160 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 161 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 162 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def apply_rotary_pos_emb(q, k, cos, sin):
|
| 166 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
q (`torch.Tensor`): The query tensor.
|
| 170 |
+
k (`torch.Tensor`): The key tensor.
|
| 171 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 172 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 173 |
+
Returns:
|
| 174 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 175 |
+
"""
|
| 176 |
+
cos, sin = cos.to(q.dtype), sin.to(q.dtype)
|
| 177 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 178 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 179 |
+
return q_embed, k_embed
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class RotaryEmbedding(torch.nn.Module):
|
| 183 |
+
def __init__(self, dim, max_position_embeddings=512, base=10000.0, device=None):
|
| 184 |
+
super().__init__()
|
| 185 |
+
|
| 186 |
+
self.dim = dim
|
| 187 |
+
self.max_position_embeddings = max_position_embeddings
|
| 188 |
+
self.base = base
|
| 189 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 190 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 191 |
+
|
| 192 |
+
# Build here to make `torch.jit.trace` work.
|
| 193 |
+
self._set_cos_sin_cache(
|
| 194 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 198 |
+
self.max_seq_len_cached = seq_len
|
| 199 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
|
| 200 |
+
|
| 201 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 202 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 203 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 204 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 205 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 206 |
+
|
| 207 |
+
def forward(self, x, seq_len=None):
|
| 208 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 209 |
+
if seq_len > self.max_seq_len_cached:
|
| 210 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 211 |
+
|
| 212 |
+
return (
|
| 213 |
+
self.cos_cached[:seq_len, ...].to(dtype=x.dtype),
|
| 214 |
+
self.sin_cached[:seq_len, ...].to(dtype=x.dtype),
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class NTKScalingRotaryEmbedding(RotaryEmbedding):
|
| 219 |
+
"""RotaryEmbedding extended with fixed and mixed NTK scaling. https://kexue.fm/archives/9706 """
|
| 220 |
+
|
| 221 |
+
def __init__(self, dim, max_position_embeddings=512, base=10000, device=None, scaling_factor=1.0, mixed_b=None):
|
| 222 |
+
self.scaling_factor = scaling_factor
|
| 223 |
+
self.mixed_b = mixed_b
|
| 224 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
| 225 |
+
max_position_embeddings = max_position_embeddings * self.scaling_factor
|
| 226 |
+
self._set_cos_sin_cache(max_position_embeddings, self.inv_freq.device, torch.get_default_dtype())
|
| 227 |
+
|
| 228 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 229 |
+
self.max_seq_len_cached = seq_len
|
| 230 |
+
|
| 231 |
+
if seq_len > self.max_position_embeddings:
|
| 232 |
+
base = self.base * (self.scaling_factor if self.mixed_b is None else 1)
|
| 233 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 234 |
+
|
| 235 |
+
if self.mixed_b is None:
|
| 236 |
+
inv_freq = inv_freq / self.scaling_factor ** (2 / self.dim) # (6)
|
| 237 |
+
else:
|
| 238 |
+
a = torch.tensor(self.scaling_factor).log() / (self.dim / 2) ** self.mixed_b # (13)
|
| 239 |
+
lambda_1_m = (a * torch.arange(1, self.dim // 2 + 1).float().to(device) ** self.mixed_b).exp() # (12)
|
| 240 |
+
inv_freq = inv_freq / lambda_1_m # (10)
|
| 241 |
+
|
| 242 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 243 |
+
|
| 244 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
|
| 245 |
+
|
| 246 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 247 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 248 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 249 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 250 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
class RMSNorm(nn.Module):
|
| 254 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 255 |
+
"""
|
| 256 |
+
RMSNorm is equivalent to T5LayerNorm
|
| 257 |
+
"""
|
| 258 |
+
super().__init__()
|
| 259 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 260 |
+
self.variance_epsilon = eps
|
| 261 |
+
|
| 262 |
+
def forward(self, hidden_states):
|
| 263 |
+
input_dtype = hidden_states.dtype
|
| 264 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 265 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 266 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 267 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
LAYER_NORM = {
|
| 271 |
+
'layer_norm': nn.LayerNorm,
|
| 272 |
+
'rms_norm': RMSNorm
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class GteEmbeddings(nn.Module):
|
| 277 |
+
"""
|
| 278 |
+
Embedding and Unpadding.
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
def __init__(self, config: GteConfig):
|
| 282 |
+
super().__init__()
|
| 283 |
+
self.padding_idx = config.pad_token_id
|
| 284 |
+
self.word_embeddings = nn.Embedding(
|
| 285 |
+
config.vocab_size, config.hidden_size, padding_idx=self.padding_idx
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
self.position_embedding_type = config.position_embedding_type
|
| 289 |
+
if self.position_embedding_type == 'absolute':
|
| 290 |
+
self.position_embeddings = nn.Embedding(
|
| 291 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
| 292 |
+
)
|
| 293 |
+
elif self.position_embedding_type == 'rope':
|
| 294 |
+
self._init_rope(config)
|
| 295 |
+
else:
|
| 296 |
+
raise ValueError
|
| 297 |
+
|
| 298 |
+
self.type_vocab_size = config.type_vocab_size
|
| 299 |
+
if self.type_vocab_size > 0:
|
| 300 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
| 301 |
+
|
| 302 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 303 |
+
# any TensorFlow checkpoint file
|
| 304 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 305 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 306 |
+
# position_ids is contiguous in memory and excluded when serialized
|
| 307 |
+
self.register_buffer(
|
| 308 |
+
"position_ids", torch.arange(config.max_position_embeddings), persistent=False
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
def _init_rope(self, config):
|
| 312 |
+
kwargs = dict(
|
| 313 |
+
dim=int(config.hidden_size / config.num_attention_heads),
|
| 314 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 315 |
+
base=config.rope_theta
|
| 316 |
+
)
|
| 317 |
+
if config.rope_scaling is None:
|
| 318 |
+
self.rotary_emb = RotaryEmbedding(**kwargs)
|
| 319 |
+
else:
|
| 320 |
+
kwargs.update(scaling_factor=config.rope_scaling["factor"])
|
| 321 |
+
scaling_type = config.rope_scaling["type"]
|
| 322 |
+
if scaling_type == 'ntk':
|
| 323 |
+
kwargs.update(mixed_b=config.rope_scaling.get('mixed_b', None))
|
| 324 |
+
self.rotary_emb = NTKScalingRotaryEmbedding(**kwargs)
|
| 325 |
+
# elif scaling_type == "linear":
|
| 326 |
+
# self.rotary_emb = LinearScalingRotaryEmbedding(**kwargs)
|
| 327 |
+
# elif scaling_type == "dynamic":
|
| 328 |
+
# self.rotary_emb = DynamicNTKScalingRotaryEmbedding(**kwargs)
|
| 329 |
+
else:
|
| 330 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
| 331 |
+
|
| 332 |
+
def forward(
|
| 333 |
+
self,
|
| 334 |
+
unpad_inputs: bool,
|
| 335 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 336 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 337 |
+
length: Optional[List[int]] = None,
|
| 338 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 339 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 340 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 341 |
+
) -> Tuple[torch.Tensor, torch.Tensor, Optional[Tuple], Optional[List[int]]]:
|
| 342 |
+
"""
|
| 343 |
+
"""
|
| 344 |
+
if inputs_embeds is None:
|
| 345 |
+
device, input_shape = input_ids.device, input_ids.shape
|
| 346 |
+
else:
|
| 347 |
+
device, input_shape = inputs_embeds.device, inputs_embeds.shape[:2]
|
| 348 |
+
batch_size, seq_length = input_shape
|
| 349 |
+
|
| 350 |
+
# Set attention_mask if it's None
|
| 351 |
+
if attention_mask is None:
|
| 352 |
+
attention_mask = torch.ones(input_shape, device=device)
|
| 353 |
+
if length is not None:
|
| 354 |
+
for i, l in enumerate(length):
|
| 355 |
+
attention_mask[i, l:] = 0
|
| 356 |
+
|
| 357 |
+
# Set attention_mask_bool for unpadding
|
| 358 |
+
if unpad_inputs:
|
| 359 |
+
attention_mask_bool = attention_mask.bool()
|
| 360 |
+
if length is None:
|
| 361 |
+
length = attention_mask.sum(-1).tolist()
|
| 362 |
+
|
| 363 |
+
# Get word embeddings
|
| 364 |
+
if inputs_embeds is None:
|
| 365 |
+
if unpad_inputs:
|
| 366 |
+
input_ids = input_ids[attention_mask_bool].unsqueeze(0)
|
| 367 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 368 |
+
else:
|
| 369 |
+
if unpad_inputs:
|
| 370 |
+
inputs_embeds = inputs_embeds[attention_mask_bool].unsqueeze(0)
|
| 371 |
+
embeddings = inputs_embeds
|
| 372 |
+
|
| 373 |
+
# Set and unpad position_ids
|
| 374 |
+
if position_ids is None:
|
| 375 |
+
if seq_length > self.position_ids.size(0):
|
| 376 |
+
self.register_buffer(
|
| 377 |
+
"position_ids", torch.arange(seq_length, device=embeddings.device), persistent=False
|
| 378 |
+
)
|
| 379 |
+
if unpad_inputs:
|
| 380 |
+
# [1, cumsum_seq_len]
|
| 381 |
+
position_ids = torch.cat([self.position_ids[:l] for l in length]).unsqueeze(0)
|
| 382 |
+
else:
|
| 383 |
+
# [bs, seq_len]
|
| 384 |
+
position_ids = self.position_ids[:seq_length].expand(batch_size, -1)
|
| 385 |
+
elif unpad_inputs:
|
| 386 |
+
position_ids = position_ids[attention_mask_bool].unsqueeze(0) # [1, cumsum_seq_len]
|
| 387 |
+
|
| 388 |
+
# Compute rotary embedding
|
| 389 |
+
if self.position_embedding_type == 'rope':
|
| 390 |
+
rope_cos, rope_sin = self.rotary_emb(inputs_embeds, seq_len=seq_length)
|
| 391 |
+
rope_cos = rope_cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
|
| 392 |
+
rope_sin = rope_sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
|
| 393 |
+
rope_embeds = rope_cos, rope_sin
|
| 394 |
+
else:
|
| 395 |
+
rope_embeds = None
|
| 396 |
+
|
| 397 |
+
if self.type_vocab_size > 0:
|
| 398 |
+
if token_type_ids is None:
|
| 399 |
+
token_type_ids = position_ids.mul(0)
|
| 400 |
+
else:
|
| 401 |
+
if self.type_vocab_size < 2:
|
| 402 |
+
token_type_ids.mul_(0)
|
| 403 |
+
if unpad_inputs:
|
| 404 |
+
token_type_ids = token_type_ids[attention_mask_bool].unsqueeze(0)
|
| 405 |
+
|
| 406 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 407 |
+
embeddings = embeddings + token_type_embeddings
|
| 408 |
+
|
| 409 |
+
# BERT position
|
| 410 |
+
if self.position_embedding_type == "absolute":
|
| 411 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 412 |
+
embeddings = embeddings + position_embeddings
|
| 413 |
+
|
| 414 |
+
embeddings = self.LayerNorm(embeddings)
|
| 415 |
+
embeddings = self.dropout(embeddings)
|
| 416 |
+
|
| 417 |
+
return embeddings, attention_mask, rope_embeds, length
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
class GteAttention(nn.Module):
|
| 421 |
+
def __init__(self, config: GteConfig, pack_qkv=None, use_memory_efficient_attention=None):
|
| 422 |
+
super().__init__()
|
| 423 |
+
self.config = config
|
| 424 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 425 |
+
raise ValueError(
|
| 426 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 427 |
+
f"heads ({config.num_attention_heads})"
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
self.hidden_size = config.hidden_size
|
| 431 |
+
self.num_attention_heads = config.num_attention_heads
|
| 432 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 433 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 434 |
+
|
| 435 |
+
if pack_qkv is None:
|
| 436 |
+
pack_qkv = config.pack_qkv
|
| 437 |
+
self.pack_qkv = pack_qkv
|
| 438 |
+
|
| 439 |
+
if self.pack_qkv:
|
| 440 |
+
self.qkv_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=True)
|
| 441 |
+
else:
|
| 442 |
+
self.q_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 443 |
+
self.k_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 444 |
+
self.v_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 445 |
+
|
| 446 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 447 |
+
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
|
| 448 |
+
|
| 449 |
+
if use_memory_efficient_attention is None:
|
| 450 |
+
use_memory_efficient_attention = self.config.use_memory_efficient_attention
|
| 451 |
+
self.use_memory_efficient_attention = use_memory_efficient_attention
|
| 452 |
+
self.memory_efficient_attention = None if xops is None else xops.memory_efficient_attention
|
| 453 |
+
if self.use_memory_efficient_attention:
|
| 454 |
+
assert self.memory_efficient_attention is not None, 'please install xformers'
|
| 455 |
+
|
| 456 |
+
def forward(
|
| 457 |
+
self,
|
| 458 |
+
hidden_states: torch.Tensor,
|
| 459 |
+
attention_bias: torch.FloatTensor,
|
| 460 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 461 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 462 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 463 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 464 |
+
output_attentions: Optional[bool] = False,
|
| 465 |
+
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 466 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 467 |
+
shape_hd = (self.num_attention_heads, self.attention_head_size)
|
| 468 |
+
# qkv
|
| 469 |
+
if self.pack_qkv and qkv_inputs is None:
|
| 470 |
+
qkv_pack = self.qkv_proj(hidden_states).split(self.all_head_size, dim=-1)
|
| 471 |
+
else:
|
| 472 |
+
if qkv_inputs is None:
|
| 473 |
+
qkv_inputs = (hidden_states, hidden_states, hidden_states)
|
| 474 |
+
qkv_pack = [
|
| 475 |
+
getattr(self, n + '_proj')(s) for s, n in zip(qkv_inputs, 'qkv')
|
| 476 |
+
]
|
| 477 |
+
query_states, key_states, value_states = [t.view(t.shape[:-1] + shape_hd) for t in qkv_pack]
|
| 478 |
+
|
| 479 |
+
if self.config.position_embedding_type == 'rope':
|
| 480 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, *rope_embeds)
|
| 481 |
+
|
| 482 |
+
dtype = query_states.dtype
|
| 483 |
+
|
| 484 |
+
if self.config.logn_attention_scale and attention_scale is not None:
|
| 485 |
+
# https://kexue.fm/archives/8823
|
| 486 |
+
query_states = query_states * attention_scale.to(dtype)
|
| 487 |
+
|
| 488 |
+
if padding_inputs is not None:
|
| 489 |
+
query_states = pad_input(query_states.squeeze(), *padding_inputs)
|
| 490 |
+
key_states = pad_input(key_states.squeeze(), *padding_inputs)
|
| 491 |
+
value_states = pad_input(value_states.squeeze(), *padding_inputs)
|
| 492 |
+
|
| 493 |
+
if self.use_memory_efficient_attention:
|
| 494 |
+
assert self.memory_efficient_attention is not None, "xformers is not loaded"
|
| 495 |
+
assert output_attentions is False, "memory_efficient_attention do not output attentions"
|
| 496 |
+
assert head_mask is None, "Not support yet"
|
| 497 |
+
attention_probs = None
|
| 498 |
+
if torch.is_tensor(attention_bias):
|
| 499 |
+
attention_bias = attention_bias.to(dtype)
|
| 500 |
+
context_layer = self.memory_efficient_attention(
|
| 501 |
+
query_states,
|
| 502 |
+
key_states,
|
| 503 |
+
value_states,
|
| 504 |
+
attn_bias=attention_bias,
|
| 505 |
+
p=self.dropout.p
|
| 506 |
+
)
|
| 507 |
+
else:
|
| 508 |
+
if output_attentions and isinstance(self, GteSdpaAttention):
|
| 509 |
+
raise RuntimeError("SDPA do not output attentions")
|
| 510 |
+
context_layer, attention_probs = self._attention(
|
| 511 |
+
query_states, key_states, value_states, attention_bias, head_mask
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
if padding_inputs is not None:
|
| 515 |
+
context_layer = unpad_input(context_layer, indices=padding_inputs[0])
|
| 516 |
+
|
| 517 |
+
gte_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 518 |
+
context_layer = context_layer.view(gte_context_layer_shape)
|
| 519 |
+
|
| 520 |
+
# output proj
|
| 521 |
+
attn_output = self.o_proj(context_layer)
|
| 522 |
+
|
| 523 |
+
# add attentions if we output them
|
| 524 |
+
outputs = (attn_output, attention_probs) if output_attentions else (attn_output,)
|
| 525 |
+
return outputs
|
| 526 |
+
|
| 527 |
+
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 528 |
+
"""
|
| 529 |
+
Args:
|
| 530 |
+
q/k/v: (B, L, n_head, head_dim),
|
| 531 |
+
Returns:
|
| 532 |
+
attn_output: (B L, n_head, head_dim)
|
| 533 |
+
"""
|
| 534 |
+
query_states = query_states.transpose(1, 2)
|
| 535 |
+
key_states = key_states.transpose(1, 2)
|
| 536 |
+
value_states = value_states.transpose(1, 2)
|
| 537 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 538 |
+
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
|
| 539 |
+
|
| 540 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 541 |
+
if attention_bias is not None:
|
| 542 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 543 |
+
attention_scores = attention_scores + attention_bias
|
| 544 |
+
|
| 545 |
+
# Normalize the attention scores to probabilities.
|
| 546 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 547 |
+
|
| 548 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 549 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 550 |
+
if self.dropout.p > 0:
|
| 551 |
+
attention_probs = self.dropout(attention_probs)
|
| 552 |
+
|
| 553 |
+
# Mask heads if we want to
|
| 554 |
+
if head_mask is not None:
|
| 555 |
+
attention_probs = attention_probs * head_mask
|
| 556 |
+
|
| 557 |
+
context_layer = torch.matmul(attention_probs, value_states)
|
| 558 |
+
|
| 559 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 560 |
+
return context_layer, attention_probs
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class GteSdpaAttention(GteAttention):
|
| 564 |
+
"""
|
| 565 |
+
Gte attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 566 |
+
`GteAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 567 |
+
SDPA API.
|
| 568 |
+
"""
|
| 569 |
+
def __init__(self, config: GteConfig, **kwargs):
|
| 570 |
+
super().__init__(config, **kwargs)
|
| 571 |
+
# torch.backends.cuda.enable_mem_efficient_sdp(False)
|
| 572 |
+
# logger.warning(
|
| 573 |
+
# "Disable memory efficient attention kernel for `GteSdpaAttention`, you can set "
|
| 574 |
+
# "`use_memory_efficient_attention=True` if it expected to use."
|
| 575 |
+
# )
|
| 576 |
+
|
| 577 |
+
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 578 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 579 |
+
query_states.transpose(1, 2),
|
| 580 |
+
key_states.transpose(1, 2),
|
| 581 |
+
value_states.transpose(1, 2),
|
| 582 |
+
attn_mask=attention_bias,
|
| 583 |
+
dropout_p=self.dropout.p if self.training else 0.0,
|
| 584 |
+
)
|
| 585 |
+
attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
|
| 586 |
+
return attn_output, None
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
GTE_ATTENTION_CLASSES = {
|
| 590 |
+
"eager": GteAttention,
|
| 591 |
+
# "flash_attention_2": , # TODO
|
| 592 |
+
"sdpa": GteSdpaAttention,
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class GteGatedMLP(nn.Module):
|
| 597 |
+
"""
|
| 598 |
+
GLU Variants Improve Transformer.
|
| 599 |
+
"""
|
| 600 |
+
|
| 601 |
+
def __init__(self, config: GteConfig):
|
| 602 |
+
super().__init__()
|
| 603 |
+
self.intermediate_size = config.intermediate_size
|
| 604 |
+
self.up_gate_proj = nn.Linear(config.hidden_size, self.intermediate_size * 2, bias=False)
|
| 605 |
+
self.down_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=True)
|
| 606 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 607 |
+
if config.hidden_dropout_prob > 0:
|
| 608 |
+
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 609 |
+
else:
|
| 610 |
+
self.hidden_dropout = None
|
| 611 |
+
|
| 612 |
+
def forward(self, hidden_states):
|
| 613 |
+
up_gate = self.up_gate_proj(hidden_states)
|
| 614 |
+
up_states, gate = torch.split(up_gate, self.intermediate_size, dim=-1)
|
| 615 |
+
gate = self.act_fn(gate)
|
| 616 |
+
gated_states = gate * up_states
|
| 617 |
+
if self.hidden_dropout is not None:
|
| 618 |
+
gated_states = self.hidden_dropout(gated_states)
|
| 619 |
+
down_states = self.down_proj(gated_states)
|
| 620 |
+
return down_states
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
class GteLayer(nn.Module):
|
| 624 |
+
def __init__(
|
| 625 |
+
self,
|
| 626 |
+
config: GteConfig,
|
| 627 |
+
pack_qkv=None,
|
| 628 |
+
use_memory_efficient_attention=None,
|
| 629 |
+
attn_implementation=None
|
| 630 |
+
):
|
| 631 |
+
super().__init__()
|
| 632 |
+
if attn_implementation is None:
|
| 633 |
+
attn_implementation = config._attn_implementation
|
| 634 |
+
if use_memory_efficient_attention is None:
|
| 635 |
+
use_memory_efficient_attention = config.use_memory_efficient_attention
|
| 636 |
+
if use_memory_efficient_attention:
|
| 637 |
+
if attn_implementation != 'eager':
|
| 638 |
+
logger.warning_once(f"Override {attn_implementation=} to 'eager' as {use_memory_efficient_attention=}")
|
| 639 |
+
attn_implementation = 'eager' # Since it will be SDPA by default for torch>=2.1.1
|
| 640 |
+
self.attention = GTE_ATTENTION_CLASSES[attn_implementation](
|
| 641 |
+
config, pack_qkv=pack_qkv, use_memory_efficient_attention=use_memory_efficient_attention
|
| 642 |
+
)
|
| 643 |
+
self.mlp = GteGatedMLP(config)
|
| 644 |
+
|
| 645 |
+
ln_class = LAYER_NORM[config.layer_norm_type]
|
| 646 |
+
self.attn_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
|
| 647 |
+
self.mlp_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
|
| 648 |
+
|
| 649 |
+
if config.hidden_dropout_prob > 0:
|
| 650 |
+
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 651 |
+
else:
|
| 652 |
+
self.hidden_dropout = None
|
| 653 |
+
|
| 654 |
+
def forward(
|
| 655 |
+
self,
|
| 656 |
+
hidden_states: torch.Tensor,
|
| 657 |
+
attention_bias: torch.FloatTensor,
|
| 658 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 659 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 660 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 661 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 662 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 663 |
+
output_attentions: Optional[bool] = False,
|
| 664 |
+
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 665 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 666 |
+
# Multi head self attention
|
| 667 |
+
residual = hidden_states if qkv_inputs is None else qkv_inputs[0]
|
| 668 |
+
attention_outputs = self.attention(
|
| 669 |
+
hidden_states,
|
| 670 |
+
attention_bias,
|
| 671 |
+
rope_embeds,
|
| 672 |
+
padding_inputs,
|
| 673 |
+
attention_scale,
|
| 674 |
+
head_mask,
|
| 675 |
+
output_attentions=output_attentions,
|
| 676 |
+
qkv_inputs=qkv_inputs,
|
| 677 |
+
)
|
| 678 |
+
hidden_states = attention_outputs[0]
|
| 679 |
+
if self.hidden_dropout is not None:
|
| 680 |
+
hidden_states = self.hidden_dropout(hidden_states)
|
| 681 |
+
hidden_states = residual + hidden_states
|
| 682 |
+
|
| 683 |
+
# In pretraining, after the attention of last layer, we only need the masked tokens.
|
| 684 |
+
if subset_indices is not None:
|
| 685 |
+
hidden_states = hidden_states[subset_indices]
|
| 686 |
+
|
| 687 |
+
hidden_states = self.attn_ln(hidden_states)
|
| 688 |
+
|
| 689 |
+
# Fully Connected
|
| 690 |
+
residual = hidden_states
|
| 691 |
+
hidden_states = self.mlp(hidden_states)
|
| 692 |
+
if self.hidden_dropout is not None:
|
| 693 |
+
hidden_states = self.hidden_dropout(hidden_states)
|
| 694 |
+
hidden_states = residual + hidden_states
|
| 695 |
+
hidden_states = self.mlp_ln(hidden_states)
|
| 696 |
+
|
| 697 |
+
# add self attentions if we output attention weights
|
| 698 |
+
outputs = (hidden_states,) + attention_outputs[1:]
|
| 699 |
+
return outputs
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
class GteEncoder(nn.Module):
|
| 703 |
+
def __init__(self, config):
|
| 704 |
+
super().__init__()
|
| 705 |
+
self.config = config
|
| 706 |
+
self.layer = nn.ModuleList([GteLayer(config) for _ in range(config.num_hidden_layers)])
|
| 707 |
+
self.gradient_checkpointing = False
|
| 708 |
+
|
| 709 |
+
def forward(
|
| 710 |
+
self,
|
| 711 |
+
hidden_states: torch.Tensor,
|
| 712 |
+
attention_bias: Optional[torch.FloatTensor] = None,
|
| 713 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 714 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 715 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 716 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 717 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 718 |
+
output_attentions: Optional[bool] = False,
|
| 719 |
+
output_hidden_states: Optional[bool] = False,
|
| 720 |
+
return_dict: Optional[bool] = True,
|
| 721 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
|
| 722 |
+
all_hidden_states = () if output_hidden_states else None
|
| 723 |
+
all_self_attentions = () if output_attentions else None
|
| 724 |
+
|
| 725 |
+
for i, layer_module in enumerate(self.layer):
|
| 726 |
+
if output_hidden_states:
|
| 727 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 728 |
+
|
| 729 |
+
if i >= len(self.layer) - 1:
|
| 730 |
+
layer_subset_indices = subset_indices
|
| 731 |
+
else:
|
| 732 |
+
layer_subset_indices = None
|
| 733 |
+
|
| 734 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 735 |
+
|
| 736 |
+
if self.gradient_checkpointing and self.training:
|
| 737 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 738 |
+
layer_module.__call__,
|
| 739 |
+
hidden_states,
|
| 740 |
+
attention_bias,
|
| 741 |
+
rope_embeds,
|
| 742 |
+
padding_inputs,
|
| 743 |
+
attention_scale,
|
| 744 |
+
layer_subset_indices,
|
| 745 |
+
layer_head_mask,
|
| 746 |
+
)
|
| 747 |
+
else:
|
| 748 |
+
layer_outputs = layer_module(
|
| 749 |
+
hidden_states,
|
| 750 |
+
attention_bias,
|
| 751 |
+
rope_embeds,
|
| 752 |
+
padding_inputs,
|
| 753 |
+
attention_scale,
|
| 754 |
+
layer_subset_indices,
|
| 755 |
+
layer_head_mask,
|
| 756 |
+
output_attentions,
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
hidden_states = layer_outputs[0]
|
| 760 |
+
if output_attentions:
|
| 761 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 762 |
+
|
| 763 |
+
if output_hidden_states:
|
| 764 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 765 |
+
|
| 766 |
+
if not return_dict:
|
| 767 |
+
return tuple(
|
| 768 |
+
v
|
| 769 |
+
for v in [
|
| 770 |
+
hidden_states,
|
| 771 |
+
all_hidden_states,
|
| 772 |
+
all_self_attentions,
|
| 773 |
+
]
|
| 774 |
+
if v is not None
|
| 775 |
+
)
|
| 776 |
+
return BaseModelOutput(
|
| 777 |
+
last_hidden_state=hidden_states,
|
| 778 |
+
hidden_states=all_hidden_states,
|
| 779 |
+
attentions=all_self_attentions,
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Gte
|
| 784 |
+
class GtePooler(nn.Module):
|
| 785 |
+
def __init__(self, config):
|
| 786 |
+
super().__init__()
|
| 787 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 788 |
+
self.activation = nn.Tanh()
|
| 789 |
+
|
| 790 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 791 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 792 |
+
# to the first token.
|
| 793 |
+
first_token_tensor = hidden_states[:, 0]
|
| 794 |
+
pooled_output = self.dense(first_token_tensor)
|
| 795 |
+
pooled_output = self.activation(pooled_output)
|
| 796 |
+
return pooled_output
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
class GtePreTrainedModel(PreTrainedModel):
|
| 800 |
+
"""
|
| 801 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 802 |
+
models.
|
| 803 |
+
"""
|
| 804 |
+
|
| 805 |
+
config_class = GteConfig
|
| 806 |
+
base_model_prefix = "gte"
|
| 807 |
+
supports_gradient_checkpointing = True
|
| 808 |
+
_supports_sdpa = True
|
| 809 |
+
|
| 810 |
+
def _init_weights(self, module):
|
| 811 |
+
"""Initialize the weights"""
|
| 812 |
+
if isinstance(module, nn.Linear):
|
| 813 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 814 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 815 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 816 |
+
if module.bias is not None:
|
| 817 |
+
module.bias.data.zero_()
|
| 818 |
+
elif isinstance(module, nn.Embedding):
|
| 819 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 820 |
+
if module.padding_idx is not None:
|
| 821 |
+
module.weight.data[module.padding_idx].zero_()
|
| 822 |
+
elif isinstance(module, nn.LayerNorm):
|
| 823 |
+
module.bias.data.zero_()
|
| 824 |
+
module.weight.data.fill_(1.0)
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
class GteModel(GtePreTrainedModel):
|
| 828 |
+
"""
|
| 829 |
+
The bare Gte Model transformer outputting raw hidden-states without any specific head on top.
|
| 830 |
+
"""
|
| 831 |
+
|
| 832 |
+
def __init__(self, config: GteConfig, add_pooling_layer=False):
|
| 833 |
+
super().__init__(config)
|
| 834 |
+
self.config = config
|
| 835 |
+
|
| 836 |
+
self.embeddings = GteEmbeddings(config)
|
| 837 |
+
self.encoder = GteEncoder(config)
|
| 838 |
+
|
| 839 |
+
self.pooler = GtePooler(config) if add_pooling_layer else None
|
| 840 |
+
|
| 841 |
+
# Initialize weights and apply final processing
|
| 842 |
+
self.post_init()
|
| 843 |
+
|
| 844 |
+
def get_input_embeddings(self):
|
| 845 |
+
return self.embeddings.word_embeddings
|
| 846 |
+
|
| 847 |
+
def set_input_embeddings(self, value):
|
| 848 |
+
self.embeddings.word_embeddings = value
|
| 849 |
+
|
| 850 |
+
def forward(
|
| 851 |
+
self,
|
| 852 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 853 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 854 |
+
length: Optional[List[int]] = None,
|
| 855 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 856 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 857 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 858 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 859 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 860 |
+
output_attentions: Optional[bool] = None,
|
| 861 |
+
output_hidden_states: Optional[bool] = None,
|
| 862 |
+
return_dict: Optional[bool] = None,
|
| 863 |
+
unpad_inputs: Optional[bool] = None,
|
| 864 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
|
| 865 |
+
r"""
|
| 866 |
+
length (`list` of length `batch_size`, *optional*):
|
| 867 |
+
If is `None`, return padded `last_hidden_state`.
|
| 868 |
+
subset_indices ():
|
| 869 |
+
pass
|
| 870 |
+
unpad_inputs (`bool`, *optional*):
|
| 871 |
+
pass
|
| 872 |
+
"""
|
| 873 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 874 |
+
output_hidden_states = (
|
| 875 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 876 |
+
)
|
| 877 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 878 |
+
unpad_inputs = unpad_inputs if unpad_inputs is not None else self.config.unpad_inputs
|
| 879 |
+
output_padded = length is None
|
| 880 |
+
|
| 881 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 882 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 883 |
+
elif input_ids is not None:
|
| 884 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
| 885 |
+
input_shape = input_ids.size()
|
| 886 |
+
elif inputs_embeds is not None:
|
| 887 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 888 |
+
else:
|
| 889 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 890 |
+
|
| 891 |
+
# TODO: not used
|
| 892 |
+
# # Prepare head mask if needed
|
| 893 |
+
# # 1.0 in head_mask indicate we keep the head
|
| 894 |
+
# # attention_probs has shape bsz x n_heads x N x N
|
| 895 |
+
# # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 896 |
+
# # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 897 |
+
# head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 898 |
+
|
| 899 |
+
# Get embeddings, may unpad them
|
| 900 |
+
(embedding_output, attention_mask, rope_embeds, length) = self.embeddings(
|
| 901 |
+
unpad_inputs,
|
| 902 |
+
input_ids=input_ids,
|
| 903 |
+
attention_mask=attention_mask,
|
| 904 |
+
length=length,
|
| 905 |
+
token_type_ids=token_type_ids,
|
| 906 |
+
position_ids=position_ids,
|
| 907 |
+
inputs_embeds=inputs_embeds
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
batch_size, seq_length = input_shape
|
| 911 |
+
if unpad_inputs and self.config.use_memory_efficient_attention:
|
| 912 |
+
attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
|
| 913 |
+
else:
|
| 914 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 915 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 916 |
+
attention_bias = self.get_extended_attention_mask(attention_mask, input_shape)
|
| 917 |
+
if self.config.use_memory_efficient_attention:
|
| 918 |
+
# Invalid shape for attention bias: torch.Size([48, 1, 1, 512]) (expected (48, 12, 512, 512))
|
| 919 |
+
attention_bias = attention_bias.expand(-1, self.config.num_attention_heads, seq_length, -1)
|
| 920 |
+
|
| 921 |
+
padding_inputs = None
|
| 922 |
+
if unpad_inputs and (output_padded or not self.config.use_memory_efficient_attention):
|
| 923 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 924 |
+
if not self.config.use_memory_efficient_attention:
|
| 925 |
+
padding_inputs = (indices, *input_shape)
|
| 926 |
+
|
| 927 |
+
attention_scale = None
|
| 928 |
+
if self.config.logn_attention_scale:
|
| 929 |
+
logger.warning_once("TODO: logn_attention_scale")
|
| 930 |
+
# # attention scale log_512(input_len)
|
| 931 |
+
# attention_scale = attention_mask.sum(1).log() / torch.tensor(self.config.max_position_embeddings).log()
|
| 932 |
+
# # inference-time logn scale need clip 1
|
| 933 |
+
# if self.config.logn_attention_clip1:
|
| 934 |
+
# attention_scale.clip_(1)
|
| 935 |
+
# attention_scale = attention_scale[:, None, None, None]
|
| 936 |
+
# else:
|
| 937 |
+
# attention_scale = None
|
| 938 |
+
|
| 939 |
+
encoder_outputs = self.encoder(
|
| 940 |
+
embedding_output,
|
| 941 |
+
attention_bias=attention_bias,
|
| 942 |
+
rope_embeds=rope_embeds,
|
| 943 |
+
padding_inputs=padding_inputs,
|
| 944 |
+
attention_scale=attention_scale,
|
| 945 |
+
subset_indices=subset_indices,
|
| 946 |
+
head_mask=head_mask,
|
| 947 |
+
output_attentions=output_attentions,
|
| 948 |
+
output_hidden_states=output_hidden_states,
|
| 949 |
+
return_dict=return_dict,
|
| 950 |
+
)
|
| 951 |
+
sequence_output = encoder_outputs[0]
|
| 952 |
+
if unpad_inputs and output_padded:
|
| 953 |
+
sequence_output = pad_input(
|
| 954 |
+
sequence_output.squeeze(), indices, batch_size, seq_length
|
| 955 |
+
)
|
| 956 |
+
|
| 957 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 958 |
+
|
| 959 |
+
if not return_dict:
|
| 960 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 961 |
+
|
| 962 |
+
return BaseModelOutputWithPooling(
|
| 963 |
+
last_hidden_state=sequence_output,
|
| 964 |
+
pooler_output=pooled_output,
|
| 965 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 966 |
+
attentions=encoder_outputs.attentions,
|
| 967 |
+
)
|
rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e7b6609fe563b370938978376facb17f50a40ac5c5783de068dbb420f3a1c89
|
| 3 |
+
size 16325
|
rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7cb379c4851b0d5b7fc61e78c98bda5fb7f79b3a63d042f5956fd04b94f60340
|
| 3 |
+
size 16325
|
rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10f92f990949edec9b4d89a7af4a6e4dece0fe1abeb74a50754914ee3ad289de
|
| 3 |
+
size 16325
|
rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0c865684e5c63037bd91f71e351aff838a56f39b197c35776bfbbb1228dee96
|
| 3 |
+
size 16325
|
rng_state_4.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e3a7abf52f98abacb9950ba4df9dae9e32b4d6afbe02052c319ff3eee16cc2b
|
| 3 |
+
size 16325
|
rng_state_5.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d52d6235cc69f5c81eeb4d38916e9c203752b76ead12967a99ec7673c2632a2f
|
| 3 |
+
size 16325
|
rng_state_6.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82d458fff8fa3d48a46f8adcfeef2eb9d52409781fa4f305c09326d6ae5f49c1
|
| 3 |
+
size 16325
|
rng_state_7.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3466b1c26a369c6a50445324f0a90682e9f908a8cd05f16540e7f3ec4ef797aa
|
| 3 |
+
size 16325
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"cls_token": {
|
| 10 |
+
"content": "<s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"eos_token": {
|
| 17 |
+
"content": "</s>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"mask_token": {
|
| 24 |
+
"content": "<mask>",
|
| 25 |
+
"lstrip": true,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"pad_token": {
|
| 31 |
+
"content": "<pad>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
},
|
| 37 |
+
"sep_token": {
|
| 38 |
+
"content": "</s>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false
|
| 43 |
+
},
|
| 44 |
+
"unk_token": {
|
| 45 |
+
"content": "<unk>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false
|
| 50 |
+
}
|
| 51 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
|
| 3 |
+
size 17082734
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "<s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<pad>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "<unk>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"250001": {
|
| 36 |
+
"content": "<mask>",
|
| 37 |
+
"lstrip": true,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"bos_token": "<s>",
|
| 45 |
+
"clean_up_tokenization_spaces": true,
|
| 46 |
+
"cls_token": "<s>",
|
| 47 |
+
"eos_token": "</s>",
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
+
"mask_token": "<mask>",
|
| 50 |
+
"max_length": 512,
|
| 51 |
+
"model_max_length": 32768,
|
| 52 |
+
"pad_to_multiple_of": null,
|
| 53 |
+
"pad_token": "<pad>",
|
| 54 |
+
"pad_token_type_id": 0,
|
| 55 |
+
"padding_side": "right",
|
| 56 |
+
"sep_token": "</s>",
|
| 57 |
+
"stride": 0,
|
| 58 |
+
"tokenizer_class": "XLMRobertaTokenizerFast",
|
| 59 |
+
"truncation_side": "right",
|
| 60 |
+
"truncation_strategy": "longest_first",
|
| 61 |
+
"unk_token": "<unk>"
|
| 62 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 4.2949308755760365,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 30,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.7373271889400922,
|
| 14 |
+
"grad_norm": 1.258885555564063,
|
| 15 |
+
"learning_rate": 6.989700043360187e-06,
|
| 16 |
+
"loss": 0.2967,
|
| 17 |
+
"step": 5
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 1.4423963133640554,
|
| 21 |
+
"grad_norm": 0.8498651909394829,
|
| 22 |
+
"learning_rate": 9.999999999999999e-06,
|
| 23 |
+
"loss": 0.257,
|
| 24 |
+
"step": 10
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 2.1474654377880182,
|
| 28 |
+
"grad_norm": 0.6656377186139756,
|
| 29 |
+
"learning_rate": 1.1760912590556813e-05,
|
| 30 |
+
"loss": 0.2261,
|
| 31 |
+
"step": 15
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 2.8847926267281108,
|
| 35 |
+
"grad_norm": 0.6127831891899057,
|
| 36 |
+
"learning_rate": 1.301029995663981e-05,
|
| 37 |
+
"loss": 0.2073,
|
| 38 |
+
"step": 20
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 3.589861751152074,
|
| 42 |
+
"grad_norm": 0.5549379002562179,
|
| 43 |
+
"learning_rate": 1.3979400086720374e-05,
|
| 44 |
+
"loss": 0.1842,
|
| 45 |
+
"step": 25
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 4.2949308755760365,
|
| 49 |
+
"grad_norm": 0.4947135885932024,
|
| 50 |
+
"learning_rate": 1.4771212547196623e-05,
|
| 51 |
+
"loss": 0.1678,
|
| 52 |
+
"step": 30
|
| 53 |
+
}
|
| 54 |
+
],
|
| 55 |
+
"logging_steps": 5,
|
| 56 |
+
"max_steps": 35,
|
| 57 |
+
"num_input_tokens_seen": 0,
|
| 58 |
+
"num_train_epochs": 5,
|
| 59 |
+
"save_steps": 5,
|
| 60 |
+
"stateful_callbacks": {
|
| 61 |
+
"TrainerControl": {
|
| 62 |
+
"args": {
|
| 63 |
+
"should_epoch_stop": false,
|
| 64 |
+
"should_evaluate": false,
|
| 65 |
+
"should_log": false,
|
| 66 |
+
"should_save": true,
|
| 67 |
+
"should_training_stop": false
|
| 68 |
+
},
|
| 69 |
+
"attributes": {}
|
| 70 |
+
}
|
| 71 |
+
},
|
| 72 |
+
"total_flos": 481142620815360.0,
|
| 73 |
+
"train_batch_size": 32,
|
| 74 |
+
"trial_name": null,
|
| 75 |
+
"trial_params": null
|
| 76 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebba4eb29d0aa822e1ed2d143fb82c99d4a9a51f4e1c60425cf2949a4c9123b1
|
| 3 |
+
size 8529
|
zero_to_fp32.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import numpy as np
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
from collections import OrderedDict
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
|
| 31 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 32 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 33 |
+
from deepspeed.utils import logger
|
| 34 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 35 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 36 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class zero_model_state:
|
| 41 |
+
buffers: dict()
|
| 42 |
+
param_shapes: dict()
|
| 43 |
+
shared_params: list
|
| 44 |
+
ds_version: int
|
| 45 |
+
frozen_param_shapes: dict()
|
| 46 |
+
frozen_param_fragments: dict()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
debug = 0
|
| 50 |
+
|
| 51 |
+
# load to cpu
|
| 52 |
+
device = torch.device('cpu')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def atoi(text):
|
| 56 |
+
return int(text) if text.isdigit() else text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def natural_keys(text):
|
| 60 |
+
'''
|
| 61 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 62 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 63 |
+
(See Toothy's implementation in the comments)
|
| 64 |
+
'''
|
| 65 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 69 |
+
if not os.path.isdir(checkpoint_dir):
|
| 70 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 71 |
+
|
| 72 |
+
# there should be only one file
|
| 73 |
+
if zero_stage <= 2:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 75 |
+
elif zero_stage == 3:
|
| 76 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 77 |
+
|
| 78 |
+
if not os.path.exists(file):
|
| 79 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 80 |
+
|
| 81 |
+
return file
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 85 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 86 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 87 |
+
|
| 88 |
+
if len(ckpt_files) == 0:
|
| 89 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 90 |
+
|
| 91 |
+
return ckpt_files
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_optim_files(checkpoint_dir):
|
| 95 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_model_state_files(checkpoint_dir):
|
| 99 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_model_states(files):
|
| 103 |
+
zero_model_states = []
|
| 104 |
+
for file in files:
|
| 105 |
+
state_dict = torch.load(file, map_location=device, weights_only=False)
|
| 106 |
+
|
| 107 |
+
if BUFFER_NAMES not in state_dict:
|
| 108 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 109 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 110 |
+
if debug:
|
| 111 |
+
print("Found buffers:", buffer_names)
|
| 112 |
+
|
| 113 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 114 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 115 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 116 |
+
|
| 117 |
+
# collect parameters that are included in param_shapes
|
| 118 |
+
param_names = []
|
| 119 |
+
for s in param_shapes:
|
| 120 |
+
for name in s.keys():
|
| 121 |
+
param_names.append(name)
|
| 122 |
+
|
| 123 |
+
# update with frozen parameters
|
| 124 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 125 |
+
if frozen_param_shapes is not None:
|
| 126 |
+
if debug:
|
| 127 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 128 |
+
param_names += list(frozen_param_shapes.keys())
|
| 129 |
+
|
| 130 |
+
# handle shared params
|
| 131 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 132 |
+
|
| 133 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 134 |
+
|
| 135 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 136 |
+
|
| 137 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 138 |
+
param_shapes=param_shapes,
|
| 139 |
+
shared_params=shared_params,
|
| 140 |
+
ds_version=ds_version,
|
| 141 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 142 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 143 |
+
zero_model_states.append(z_model_state)
|
| 144 |
+
|
| 145 |
+
return zero_model_states
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 149 |
+
total_files = len(files)
|
| 150 |
+
state_dicts = []
|
| 151 |
+
for f in tqdm(files, desc='Loading checkpoint shards'):
|
| 152 |
+
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
| 153 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 154 |
+
# and also handle the case where it was already removed by another helper script
|
| 155 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 156 |
+
state_dicts.append(state_dict)
|
| 157 |
+
|
| 158 |
+
if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 159 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 160 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 161 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 162 |
+
|
| 163 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 164 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 165 |
+
# use the max of the partition_count to get the dp world_size.
|
| 166 |
+
|
| 167 |
+
if type(world_size) is list:
|
| 168 |
+
world_size = max(world_size)
|
| 169 |
+
|
| 170 |
+
if world_size != total_files:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 173 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# the groups are named differently in each stage
|
| 177 |
+
if zero_stage <= 2:
|
| 178 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 179 |
+
elif zero_stage == 3:
|
| 180 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 183 |
+
|
| 184 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 185 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 189 |
+
"""
|
| 190 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 197 |
+
|
| 198 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 199 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 200 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 201 |
+
|
| 202 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 203 |
+
|
| 204 |
+
zero_model_states = parse_model_states(model_files)
|
| 205 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 206 |
+
|
| 207 |
+
if zero_stage <= 2:
|
| 208 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 209 |
+
exclude_frozen_parameters)
|
| 210 |
+
elif zero_stage == 3:
|
| 211 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 212 |
+
exclude_frozen_parameters)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 221 |
+
|
| 222 |
+
if debug:
|
| 223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 225 |
+
|
| 226 |
+
wanted_params = len(frozen_param_shapes)
|
| 227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 231 |
+
|
| 232 |
+
total_params = 0
|
| 233 |
+
total_numel = 0
|
| 234 |
+
for name, shape in frozen_param_shapes.items():
|
| 235 |
+
total_params += 1
|
| 236 |
+
unpartitioned_numel = shape.numel()
|
| 237 |
+
total_numel += unpartitioned_numel
|
| 238 |
+
|
| 239 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 240 |
+
|
| 241 |
+
if debug:
|
| 242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 243 |
+
|
| 244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _has_callable(obj, fn):
|
| 248 |
+
attr = getattr(obj, fn, None)
|
| 249 |
+
return callable(attr)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 253 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 254 |
+
|
| 255 |
+
# Reconstruction protocol:
|
| 256 |
+
#
|
| 257 |
+
# XXX: document this
|
| 258 |
+
|
| 259 |
+
if debug:
|
| 260 |
+
for i in range(world_size):
|
| 261 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 262 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 263 |
+
|
| 264 |
+
# XXX: memory usage doubles here (zero2)
|
| 265 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 266 |
+
merged_single_partition_of_fp32_groups = []
|
| 267 |
+
for i in range(num_param_groups):
|
| 268 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 269 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 270 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 271 |
+
avail_numel = sum(
|
| 272 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 273 |
+
|
| 274 |
+
if debug:
|
| 275 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 276 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 277 |
+
# not asserting if there is a mismatch due to possible padding
|
| 278 |
+
print(f"Have {avail_numel} numels to process.")
|
| 279 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 280 |
+
|
| 281 |
+
# params
|
| 282 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 283 |
+
# out-of-core computing solution
|
| 284 |
+
total_numel = 0
|
| 285 |
+
total_params = 0
|
| 286 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 287 |
+
offset = 0
|
| 288 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 289 |
+
for name, shape in shapes.items():
|
| 290 |
+
|
| 291 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 292 |
+
total_numel += unpartitioned_numel
|
| 293 |
+
total_params += 1
|
| 294 |
+
|
| 295 |
+
if debug:
|
| 296 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 297 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 298 |
+
offset += unpartitioned_numel
|
| 299 |
+
|
| 300 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 301 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 302 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 303 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 304 |
+
align_to = 2 * world_size
|
| 305 |
+
|
| 306 |
+
def zero2_align(x):
|
| 307 |
+
return align_to * math.ceil(x / align_to)
|
| 308 |
+
|
| 309 |
+
if debug:
|
| 310 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 311 |
+
|
| 312 |
+
offset = zero2_align(offset)
|
| 313 |
+
avail_numel = zero2_align(avail_numel)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
# Sanity check
|
| 319 |
+
if offset != avail_numel:
|
| 320 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 321 |
+
|
| 322 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 326 |
+
exclude_frozen_parameters):
|
| 327 |
+
state_dict = OrderedDict()
|
| 328 |
+
|
| 329 |
+
# buffers
|
| 330 |
+
buffers = zero_model_states[0].buffers
|
| 331 |
+
state_dict.update(buffers)
|
| 332 |
+
if debug:
|
| 333 |
+
print(f"added {len(buffers)} buffers")
|
| 334 |
+
|
| 335 |
+
if not exclude_frozen_parameters:
|
| 336 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 337 |
+
|
| 338 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 339 |
+
|
| 340 |
+
# recover shared parameters
|
| 341 |
+
for pair in zero_model_states[0].shared_params:
|
| 342 |
+
if pair[1] in state_dict:
|
| 343 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 344 |
+
|
| 345 |
+
return state_dict
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 349 |
+
remainder = unpartitioned_numel % world_size
|
| 350 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 351 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 352 |
+
return partitioned_numel, padding_numel
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 356 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
if debug:
|
| 360 |
+
for i in range(world_size):
|
| 361 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 362 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 363 |
+
|
| 364 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 365 |
+
wanted_params = len(frozen_param_shapes)
|
| 366 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 367 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 368 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 369 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 370 |
+
|
| 371 |
+
total_params = 0
|
| 372 |
+
total_numel = 0
|
| 373 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 374 |
+
total_params += 1
|
| 375 |
+
unpartitioned_numel = shape.numel()
|
| 376 |
+
total_numel += unpartitioned_numel
|
| 377 |
+
|
| 378 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 379 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 380 |
+
|
| 381 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
print(
|
| 385 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class GatheredTensor:
|
| 392 |
+
"""
|
| 393 |
+
A pseudo tensor that collects partitioned weights.
|
| 394 |
+
It is more memory efficient when there are multiple groups.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
| 398 |
+
self.flat_groups = flat_groups
|
| 399 |
+
self.flat_groups_offset = flat_groups_offset
|
| 400 |
+
self.offset = offset
|
| 401 |
+
self.partitioned_numel = partitioned_numel
|
| 402 |
+
self.shape = shape
|
| 403 |
+
self.dtype = self.flat_groups[0][0].dtype
|
| 404 |
+
|
| 405 |
+
def contiguous(self):
|
| 406 |
+
"""
|
| 407 |
+
Merge partitioned weights from flat_groups into a single tensor.
|
| 408 |
+
"""
|
| 409 |
+
end_idx = self.offset + self.partitioned_numel
|
| 410 |
+
world_size = len(self.flat_groups)
|
| 411 |
+
pad_flat_param_chunks = []
|
| 412 |
+
|
| 413 |
+
for rank_i in range(world_size):
|
| 414 |
+
# for each rank, we need to collect weights from related group/groups
|
| 415 |
+
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
| 416 |
+
start_group_id = None
|
| 417 |
+
end_group_id = None
|
| 418 |
+
for group_id in range(len(self.flat_groups_offset)):
|
| 419 |
+
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
| 420 |
+
start_group_id = group_id
|
| 421 |
+
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
| 422 |
+
end_group_id = group_id
|
| 423 |
+
break
|
| 424 |
+
# collect weights from related group/groups
|
| 425 |
+
for group_id in range(start_group_id, end_group_id + 1):
|
| 426 |
+
flat_tensor = flat_groups_at_rank_i[group_id]
|
| 427 |
+
start_offset = self.offset - self.flat_groups_offset[group_id]
|
| 428 |
+
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
| 429 |
+
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
| 430 |
+
|
| 431 |
+
# collect weights from all ranks
|
| 432 |
+
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
| 433 |
+
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
| 434 |
+
return param
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 438 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 439 |
+
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
| 440 |
+
|
| 441 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 442 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 443 |
+
|
| 444 |
+
# merge list of dicts, preserving order
|
| 445 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 446 |
+
|
| 447 |
+
if debug:
|
| 448 |
+
for i in range(world_size):
|
| 449 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 450 |
+
|
| 451 |
+
wanted_params = len(param_shapes)
|
| 452 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 453 |
+
# not asserting if there is a mismatch due to possible padding
|
| 454 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 455 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 456 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 457 |
+
|
| 458 |
+
# params
|
| 459 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 460 |
+
# out-of-core computing solution
|
| 461 |
+
offset = 0
|
| 462 |
+
total_numel = 0
|
| 463 |
+
total_params = 0
|
| 464 |
+
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
| 465 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
| 466 |
+
unpartitioned_numel = shape.numel()
|
| 467 |
+
total_numel += unpartitioned_numel
|
| 468 |
+
total_params += 1
|
| 469 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 470 |
+
|
| 471 |
+
if debug:
|
| 472 |
+
print(
|
| 473 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# memory efficient tensor
|
| 477 |
+
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
| 478 |
+
state_dict[name] = tensor
|
| 479 |
+
offset += partitioned_numel
|
| 480 |
+
|
| 481 |
+
offset *= world_size
|
| 482 |
+
|
| 483 |
+
# Sanity check
|
| 484 |
+
if offset != avail_numel:
|
| 485 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 486 |
+
|
| 487 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 491 |
+
exclude_frozen_parameters):
|
| 492 |
+
state_dict = OrderedDict()
|
| 493 |
+
|
| 494 |
+
# buffers
|
| 495 |
+
buffers = zero_model_states[0].buffers
|
| 496 |
+
state_dict.update(buffers)
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"added {len(buffers)} buffers")
|
| 499 |
+
|
| 500 |
+
if not exclude_frozen_parameters:
|
| 501 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 502 |
+
|
| 503 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 504 |
+
|
| 505 |
+
# recover shared parameters
|
| 506 |
+
for pair in zero_model_states[0].shared_params:
|
| 507 |
+
if pair[1] in state_dict:
|
| 508 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 509 |
+
|
| 510 |
+
return state_dict
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
| 514 |
+
"""
|
| 515 |
+
Convert state_dict of GatheredTensor to torch tensor
|
| 516 |
+
"""
|
| 517 |
+
torch_state_dict = {}
|
| 518 |
+
converted_tensors = {}
|
| 519 |
+
for name, tensor in state_dict.items():
|
| 520 |
+
tensor_id = id(tensor)
|
| 521 |
+
if tensor_id in converted_tensors: # shared tensors
|
| 522 |
+
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
| 523 |
+
torch_state_dict[name] = shared_tensor
|
| 524 |
+
else:
|
| 525 |
+
converted_tensors[tensor_id] = name
|
| 526 |
+
if return_empty_tensor:
|
| 527 |
+
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
| 528 |
+
else:
|
| 529 |
+
torch_state_dict[name] = tensor.contiguous()
|
| 530 |
+
return torch_state_dict
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 534 |
+
tag=None,
|
| 535 |
+
exclude_frozen_parameters=False,
|
| 536 |
+
lazy_mode=False):
|
| 537 |
+
"""
|
| 538 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 539 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 540 |
+
via a model hub.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 544 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 545 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 546 |
+
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
| 547 |
+
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
- pytorch ``state_dict``
|
| 551 |
+
|
| 552 |
+
A typical usage might be ::
|
| 553 |
+
|
| 554 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 555 |
+
# do the training and checkpoint saving
|
| 556 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 557 |
+
model = model.cpu() # move to cpu
|
| 558 |
+
model.load_state_dict(state_dict)
|
| 559 |
+
# submit to model hub or save the model to share with others
|
| 560 |
+
|
| 561 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 562 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 563 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 564 |
+
|
| 565 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 566 |
+
|
| 567 |
+
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
| 568 |
+
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 569 |
+
the checkpoint. Or you can load state_dict in lazy mode ::
|
| 570 |
+
|
| 571 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 572 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
| 573 |
+
for name, lazy_tensor in state_dict.item():
|
| 574 |
+
tensor = lazy_tensor.contiguous() # to cpu
|
| 575 |
+
print(name, tensor)
|
| 576 |
+
# del tensor to release memory if it no longer in use
|
| 577 |
+
"""
|
| 578 |
+
if tag is None:
|
| 579 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 580 |
+
if os.path.isfile(latest_path):
|
| 581 |
+
with open(latest_path, 'r') as fd:
|
| 582 |
+
tag = fd.read().strip()
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 585 |
+
|
| 586 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 587 |
+
|
| 588 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 589 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 590 |
+
|
| 591 |
+
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 592 |
+
if lazy_mode:
|
| 593 |
+
return state_dict
|
| 594 |
+
else:
|
| 595 |
+
return to_torch_tensor(state_dict)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 599 |
+
output_dir,
|
| 600 |
+
max_shard_size="5GB",
|
| 601 |
+
safe_serialization=False,
|
| 602 |
+
tag=None,
|
| 603 |
+
exclude_frozen_parameters=False):
|
| 604 |
+
"""
|
| 605 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 606 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 610 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 611 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 612 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 613 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 614 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
# Dependency pre-check
|
| 618 |
+
if safe_serialization:
|
| 619 |
+
try:
|
| 620 |
+
from safetensors.torch import save_file
|
| 621 |
+
except ImportError:
|
| 622 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 623 |
+
raise
|
| 624 |
+
if max_shard_size is not None:
|
| 625 |
+
try:
|
| 626 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 627 |
+
except ImportError:
|
| 628 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 629 |
+
raise
|
| 630 |
+
|
| 631 |
+
# Convert zero checkpoint to state_dict
|
| 632 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 633 |
+
tag,
|
| 634 |
+
exclude_frozen_parameters,
|
| 635 |
+
lazy_mode=True)
|
| 636 |
+
|
| 637 |
+
# Shard the model if it is too big.
|
| 638 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 639 |
+
if max_shard_size is not None:
|
| 640 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 641 |
+
# an memory-efficient approach for sharding
|
| 642 |
+
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
| 643 |
+
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
| 644 |
+
filename_pattern=filename_pattern,
|
| 645 |
+
max_shard_size=max_shard_size)
|
| 646 |
+
else:
|
| 647 |
+
from collections import namedtuple
|
| 648 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 649 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 650 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 651 |
+
|
| 652 |
+
# Save the model by shard
|
| 653 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 654 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 655 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 656 |
+
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
| 657 |
+
shard_state_dict = to_torch_tensor(shard_state_dict)
|
| 658 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 659 |
+
if safe_serialization:
|
| 660 |
+
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
| 661 |
+
else:
|
| 662 |
+
torch.save(shard_state_dict, output_path)
|
| 663 |
+
# release the memory of current shard
|
| 664 |
+
for tensor_name in list(shard_state_dict.keys()):
|
| 665 |
+
del state_dict[tensor_name]
|
| 666 |
+
del shard_state_dict[tensor_name]
|
| 667 |
+
del shard_state_dict
|
| 668 |
+
gc.collect()
|
| 669 |
+
|
| 670 |
+
# Save index if sharded
|
| 671 |
+
if state_dict_split.is_sharded:
|
| 672 |
+
index = {
|
| 673 |
+
"metadata": state_dict_split.metadata,
|
| 674 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 675 |
+
}
|
| 676 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 677 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 678 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 679 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 680 |
+
f.write(content)
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 684 |
+
"""
|
| 685 |
+
1. Put the provided model to cpu
|
| 686 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 687 |
+
3. Load it into the provided model
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
- ``model``: the model object to update
|
| 691 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 692 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
- ``model`: modified model
|
| 696 |
+
|
| 697 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 698 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 699 |
+
conveniently placed for you in the checkpoint folder.
|
| 700 |
+
|
| 701 |
+
A typical usage might be ::
|
| 702 |
+
|
| 703 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 704 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 705 |
+
# submit to model hub or save the model to share with others
|
| 706 |
+
|
| 707 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 708 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 709 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
logger.info("Extracting fp32 weights")
|
| 713 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 714 |
+
|
| 715 |
+
logger.info("Overwriting model with fp32 weights")
|
| 716 |
+
model = model.cpu()
|
| 717 |
+
model.load_state_dict(state_dict, strict=False)
|
| 718 |
+
|
| 719 |
+
return model
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
if __name__ == "__main__":
|
| 723 |
+
parser = argparse.ArgumentParser()
|
| 724 |
+
parser.add_argument("checkpoint_dir",
|
| 725 |
+
type=str,
|
| 726 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 727 |
+
parser.add_argument("output_dir",
|
| 728 |
+
type=str,
|
| 729 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 730 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 731 |
+
parser.add_argument(
|
| 732 |
+
"--max_shard_size",
|
| 733 |
+
type=str,
|
| 734 |
+
default="5GB",
|
| 735 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 736 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 737 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 738 |
+
"without CPU OOM issues.")
|
| 739 |
+
parser.add_argument(
|
| 740 |
+
"--safe_serialization",
|
| 741 |
+
default=False,
|
| 742 |
+
action='store_true',
|
| 743 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 744 |
+
parser.add_argument("-t",
|
| 745 |
+
"--tag",
|
| 746 |
+
type=str,
|
| 747 |
+
default=None,
|
| 748 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 749 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 750 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 751 |
+
args = parser.parse_args()
|
| 752 |
+
|
| 753 |
+
debug = args.debug
|
| 754 |
+
|
| 755 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 756 |
+
args.output_dir,
|
| 757 |
+
max_shard_size=args.max_shard_size,
|
| 758 |
+
safe_serialization=args.safe_serialization,
|
| 759 |
+
tag=args.tag,
|
| 760 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|