Upload 30 files
Browse files- .gitattributes +36 -0
- LICENSE.md +54 -0
- README.md +50 -0
- ae.safetensors +3 -0
- dev_grid.jpg +3 -0
- flux1-dev.safetensors +3 -0
- model_index.json +32 -0
- pipeline_flux_de_distill.py +796 -0
- scheduler/scheduler_config.json +11 -0
- text_encoder/config.json +25 -0
- text_encoder/model.safetensors +3 -0
- text_encoder_2/config.json +32 -0
- text_encoder_2/model-00001-of-00002.safetensors +3 -0
- text_encoder_2/model-00002-of-00002.safetensors +3 -0
- text_encoder_2/model.safetensors.index.json +226 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +30 -0
- tokenizer/tokenizer_config.json +30 -0
- tokenizer/vocab.json +0 -0
- tokenizer_2/special_tokens_map.json +125 -0
- tokenizer_2/spiece.model +3 -0
- tokenizer_2/tokenizer.json +0 -0
- tokenizer_2/tokenizer_config.json +940 -0
- transformer/config.json +18 -0
- transformer/diffusion_pytorch_model-00001-of-00003.safetensors +3 -0
- transformer/diffusion_pytorch_model-00002-of-00003.safetensors +3 -0
- transformer/diffusion_pytorch_model-00003-of-00003.safetensors +3 -0
- transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
- vae/config.json +38 -0
- vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
dev_grid.jpg filter=lfs diff=lfs merge=lfs -text
|
LICENSE.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FLUX.1 [dev] Non-Commercial License v1.1.1
|
| 2 |
+
|
| 3 |
+
Black Forest Labs Inc. (“we” or “our” or “Company”) is pleased to make available the weights, parameters and inference code for the FLUX.1 [dev] Model (as defined below) freely available for your non-commercial and non-production use as set forth in this FLUX.1 [dev] Non-Commercial License (“License”). The “FLUX.1 [dev] Model” means the FLUX.1 [dev] AI models and models denoted as FLUX.1 [dev], including but not limited to FLUX.1 [dev], FLUX.1 Fill [dev], FLUX.1 Depth [dev], FLUX.1 Canny [dev], FLUX.1 Redux [dev], FLUX.1 Canny [dev] LoRA, FLUX.1 Depth [dev] LoRA, and FLUX.1 Kontext [dev], and their elements which includes algorithms, software, checkpoints, parameters, source code (inference code, evaluation code, and if applicable, fine-tuning code) and any other materials associated with the FLUX.1 [dev] AI models made available by Company under this License, including if any, the technical documentation, manuals and instructions for the use and operation thereof (collectively, “FLUX.1 [dev] Model”). Note that we may also make available certain elements of what is included in the definition of “FLUX.1 [dev] Model” under a separate license, such as the inference code, and nothing in this License will be deemed to restrict or limit any other licenses granted by us in such elements.
|
| 4 |
+
|
| 5 |
+
By downloading, accessing, using, Distributing (as defined below), or creating a Derivative (as defined below) of the FLUX.1 [dev] Model, you agree to the terms of this License. If you do not agree to this License, then you do not have any rights to access, use, Distribute or create a Derivative of the FLUX.1 [dev] Model and you must immediately cease using the FLUX.1 [dev] Model. If you are agreeing to be bound by the terms of this License on behalf of your employer or other entity, you represent and warrant to us that you have full legal authority to bind your employer or such entity to this License. If you do not have the requisite authority, you may not accept the License or access the FLUX.1 [dev] Model on behalf of your employer or other entity.
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
- a. “Derivative” means any (i) modified version of the FLUX.1 [dev] Model (including but not limited to any customized or fine-tuned version thereof), (ii) work based on the FLUX.1 [dev] Model, or (iii) any other derivative work thereof. For the avoidance of doubt, Outputs are not considered Derivatives under this License.
|
| 9 |
+
- b. “Distribution” or “Distribute” or “Distributing” means providing or making available, by any means, a copy of the FLUX.1 [dev] Models and/or the Derivatives as the case may be.
|
| 10 |
+
- c. “Non-Commercial Purpose” means any of the following uses, but only so far as you do not receive any direct or indirect payment arising from the use of the FLUX.1 [dev] Model, Derivatives, or FLUX Content Filters (as defined below): (i) personal use for research, experiment, and testing for the benefit of public knowledge, personal study, private entertainment, hobby projects, or otherwise not directly or indirectly connected to any commercial activities, business operations, or employment responsibilities; (ii) use by commercial or for-profit entities for testing, evaluation, or non-commercial research and development in a non-production environment; and (iii) use by any charitable organization for charitable purposes, or for testing or evaluation. For clarity, use (a) for revenue-generating activity, (b) in direct interactions with or that has impact on end users, or (c) to train, fine tune or distill other models for commercial use, in each case is not a Non-Commercial Purpose.
|
| 11 |
+
- d. “Outputs” means any content generated by the operation of the FLUX.1 [dev] Models or the Derivatives from an input (such as an image input) or prompt (i.e., text instructions) provided by users. For the avoidance of doubt, Outputs do not include any components of the FLUX.1 [dev] Models, such as any fine-tuned versions of the FLUX.1 [dev] Models, the weights, or parameters.
|
| 12 |
+
- e. “you” or “your” means the individual or entity entering into this License with Company.
|
| 13 |
+
|
| 14 |
+
2. License Grant.
|
| 15 |
+
- a. License. Subject to your compliance with this License, Company grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license to access, use, create Derivatives of, and Distribute the FLUX.1 [dev] Models and Derivatives solely for your Non-Commercial Purposes. The foregoing license is personal to you, and you may not assign or sublicense this License or any other rights or obligations under this License without Company’s prior written consent; any such assignment or sublicense will be void and will automatically and immediately terminate this License. Any restrictions set forth herein regarding the FLUX.1 [dev] Model also apply to any Derivative you create or that are created on your behalf.
|
| 16 |
+
- b. Non-Commercial Use Only. You may only access, use, Distribute, or create Derivatives of the FLUX.1 [dev] Model or Derivatives for Non-Commercial Purposes. If you want to use a FLUX.1 [dev] Model or a Derivative for any purpose that is not expressly authorized under this License, such as for a commercial activity, you must request a license from Company, which Company may grant to you in Company’s sole discretion and which additional use may be subject to a fee, royalty or other revenue share. Please see www.bfl.ai if you would like a commercial license.
|
| 17 |
+
- c. Reserved Rights. The grant of rights expressly set forth in this License are the complete grant of rights to you in the FLUX.1 [dev] Model, and no other licenses are granted, whether by waiver, estoppel, implication, equity or otherwise. Company and its licensors reserve all rights not expressly granted by this License.
|
| 18 |
+
- d. Outputs. We claim no ownership rights in and to the Outputs. You are solely responsible for the Outputs you generate and their subsequent uses in accordance with this License. You may use Output for any purpose (including for commercial purposes), except as expressly prohibited herein. You may not use the Output to train, fine-tune or distill a model that is competitive with the FLUX.1 [dev] Model or the FLUX.1 Kontext [dev] Model.
|
| 19 |
+
- e. You may access, use, Distribute, or create Output of the FLUX.1 [dev] Model or Derivatives if you: (i) (A) implement and maintain content filtering measures (“Content Filters”) for your use of the FLUX.1 [dev] Model or Derivatives to prevent the creation, display, transmission, generation, or dissemination of unlawful or infringing content, which may include Content Filters that we may make available for use with the FLUX.1 [dev] Model (“FLUX Content Filters”), or (B) ensure Output undergoes review for unlawful or infringing content before public or non-public distribution, display, transmission or dissemination; and (ii) ensure Output includes disclosure (or other indication) that the Output was generated or modified using artificial intelligence technologies to the extent required under applicable law.
|
| 20 |
+
|
| 21 |
+
3. Distribution. Subject to this License, you may Distribute copies of the FLUX.1 [dev] Model and/or Derivatives made by you, under the following conditions:
|
| 22 |
+
- a. you must make available a copy of this License to third-party recipients of the FLUX.1 [dev] Models and/or Derivatives you Distribute, and specify that any rights to use the FLUX.1 [dev] Models and/or Derivatives shall be directly granted by Company to said third-party recipients pursuant to this License;
|
| 23 |
+
- b. you must prominently display the following notice alongside the Distribution of the FLUX.1 [dev] Model or Derivative (such as via a “Notice” text file distributed as part of such FLUX.1 [dev] Model or Derivative) (the “Attribution Notice”):
|
| 24 |
+
|
| 25 |
+
“The FLUX.1 [dev] Model is licensed by Black Forest Labs Inc. under the FLUX.1 [dev] Non-Commercial License. Copyright Black Forest Labs Inc.
|
| 26 |
+
IN NO EVENT SHALL BLACK FOREST LABS INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH USE OF THIS MODEL.”
|
| 27 |
+
|
| 28 |
+
- c. in the case of Distribution of Derivatives made by you: (i) you must also include in the Attribution Notice a statement that you have modified the applicable FLUX.1 [dev] Model; (ii) any terms and conditions you impose on any third-party recipients relating to Derivatives made by or for you shall neither limit such third-party recipients’ use of the FLUX.1 [dev] Model or any Derivatives made by or for Company in accordance with this License nor conflict with any of its terms and conditions and must include disclaimer of warranties and limitation of liability provisions that are at least as protective of Company as those set forth herein; and (iii) you must not misrepresent or imply, through any means, that the Derivatives made by or for you and/or any modified version of the FLUX.1 [dev] Model you Distribute under your name and responsibility is an official product of the Company or has been endorsed, approved or validated by the Company, unless you are authorized by Company to do so in writing.
|
| 29 |
+
|
| 30 |
+
4. Restrictions. You will not, and will not permit, assist or cause any third party to
|
| 31 |
+
- a. use, modify, copy, reproduce, create Derivatives of, or Distribute the FLUX.1 [dev] Model (or any Derivative thereof, or any data produced by the FLUX.1 [dev] Model), in whole or in part, (i) for any commercial or production purposes, (ii) military purposes, (iii) purposes of surveillance, including any research or development relating to surveillance, (iv) biometric processing, (v) in any manner that infringes, misappropriates, or otherwise violates (or is likely to infringe, misappropriate, or otherwise violate) any third party’s legal rights, including rights of publicity or “digital replica” rights, (vi) in any unlawful, fraudulent, defamatory, or abusive activity, (vii) to generate unlawful content, including child sexual abuse material, or non-consensual intimate images; or (viii) in any manner that violates any applicable law and violating any privacy or security laws, rules, regulations, directives, or governmental requirements (including the General Data Privacy Regulation (Regulation (EU) 2016/679), the California Consumer Privacy Act, any and all laws governing the processing of biometric information, and the EU Artificial Intelligence Act (Regulation (EU) 2024/1689), as well as all amendments and successor laws to any of the foregoing;
|
| 32 |
+
- b. alter or remove copyright and other proprietary notices which appear on or in any portion of the FLUX.1 [dev] Model;
|
| 33 |
+
- c. utilize any equipment, device, software, or other means to circumvent or remove any security or protection used by Company in connection with the FLUX.1 [dev] Model, or to circumvent or remove any usage restrictions, or to enable functionality disabled by FLUX.1 [dev] Model;
|
| 34 |
+
- d. offer or impose any terms on the FLUX.1 [dev] Model that alter, restrict, or are inconsistent with the terms of this License;
|
| 35 |
+
- e. violate any applicable U.S. and non-U.S. export control and trade sanctions laws (“Export Laws”) in connection with your use or Distribution of any FLUX.1 [dev] Model;
|
| 36 |
+
- f. directly or indirectly Distribute, export, or otherwise transfer FLUX.1 [dev] Model (i) to any individual, entity, or country prohibited by Export Laws; (ii) to anyone on U.S. or non-U.S. government restricted parties lists; (iii) for any purpose prohibited by Export Laws, including nuclear, chemical or biological weapons, or missile technology applications; (iv) use or download FLUX.1 [dev] Model if you or they are (a) located in a comprehensively sanctioned jurisdiction, (b) currently listed on any U.S. or non-U.S. restricted parties list, or (c) for any purpose prohibited by Export Laws; and (v) will not disguise your location through IP proxying or other methods.
|
| 37 |
+
|
| 38 |
+
5. DISCLAIMERS. THE FLUX.1 [dev] MODEL AND FLUX CONTENT FILTERS ARE PROVIDED “AS IS” AND “WITH ALL FAULTS” WITH NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED. COMPANY EXPRESSLY DISCLAIMS ALL REPRESENTATIONS AND WARRANTIES, EXPRESS OR IMPLIED, WHETHER BY STATUTE, CUSTOM, USAGE OR OTHERWISE AS TO ANY MATTERS RELATED TO THE FLUX.1 [dev] MODEL AND FLUX CONTENT FILTERS, INCLUDING BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, SATISFACTORY QUALITY, OR NON-INFRINGEMENT. COMPANY MAKES NO WARRANTIES OR REPRESENTATIONS THAT THE FLUX.1 [dev] MODEL AND FLUX CONTENT FILTERS WILL BE ERROR FREE OR FREE OF VIRUSES OR OTHER HARMFUL COMPONENTS, OR PRODUCE ANY PARTICULAR RESULTS.
|
| 39 |
+
|
| 40 |
+
6. LIMITATION OF LIABILITY. TO THE FULLEST EXTENT PERMITTED BY LAW, IN NO EVENT WILL COMPANY BE LIABLE TO YOU OR YOUR EMPLOYEES, AFFILIATES, USERS, OFFICERS OR DIRECTORS (A) UNDER ANY THEORY OF LIABILITY, WHETHER BASED IN CONTRACT, TORT, NEGLIGENCE, STRICT LIABILITY, WARRANTY, OR OTHERWISE UNDER THIS LICENSE, OR (B) FOR ANY INDIRECT, CONSEQUENTIAL, EXEMPLARY, INCIDENTAL, PUNITIVE OR SPECIAL DAMAGES OR LOST PROFITS, EVEN IF COMPANY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE FLUX.1 [dev] MODEL, ITS CONSTITUENT COMPONENTS, FLUX CONTENT FILTERS, AND ANY OUTPUT (COLLECTIVELY, “MODEL MATERIALS”) ARE NOT DESIGNED OR INTENDED FOR USE IN ANY APPLICATION OR SITUATION WHERE FAILURE OR FAULT OF THE MODEL MATERIALS COULD REASONABLY BE ANTICIPATED TO LEAD TO SERIOUS INJURY OF ANY PERSON, INCLUDING POTENTIAL DISCRIMINATION OR VIOLATION OF AN INDIVIDUAL’S PRIVACY RIGHTS, OR TO SEVERE PHYSICAL, PROPERTY, OR ENVIRONMENTAL DAMAGE (EACH, A “HIGH-RISK USE”). IF YOU ELECT TO USE ANY OF THE MODEL MATERIALS FOR A HIGH-RISK USE, YOU DO SO AT YOUR OWN RISK. YOU AGREE TO DESIGN AND IMPLEMENT APPROPRIATE DECISION-MAKING AND RISK-MITIGATION PROCEDURES AND POLICIES IN CONNECTION WITH A HIGH-RISK USE SUCH THAT EVEN IF THERE IS A FAILURE OR FAULT IN ANY OF THE MODEL MATERIALS, THE SAFETY OF PERSONS OR PROPERTY AFFECTED BY THE ACTIVITY STAYS AT A LEVEL THAT IS REASONABLE, APPROPRIATE, AND LAWFUL FOR THE FIELD OF THE HIGH-RISK USE.
|
| 41 |
+
|
| 42 |
+
7. INDEMNIFICATION. You will indemnify, defend and hold harmless Company and our subsidiaries and affiliates, and each of our respective shareholders, directors, officers, employees, agents, successors, and assigns (collectively, the “Company Parties”) from and against any losses, liabilities, damages, fines, penalties, and expenses (including reasonable attorneys’ fees) incurred by any Company Party in connection with any claim, demand, allegation, lawsuit, proceeding, or investigation (collectively, “Claims”) arising out of or related to (a) your access to or use of the FLUX.1 [dev] Model (including in connection with any Output, results or data generated from such access or use, or from your access or use of any FLUX Content Filters), including any High-Risk Use; (b) your Content Filters, including your failure to implement any Content Filters where required by this License such as in Section 2(e); (c) your violation of this License; or (d) your violation, misappropriation or infringement of any rights of another (including intellectual property or other proprietary rights and privacy rights). You will promptly notify the Company Parties of any such Claims, and cooperate with Company Parties in defending such Claims. You will also grant the Company Parties sole control of the defense or settlement, at Company’s sole option, of any Claims. This indemnity is in addition to, and not in lieu of, any other indemnities or remedies set forth in a written agreement between you and Company or the other Company Parties.
|
| 43 |
+
|
| 44 |
+
8. Termination; Survival.
|
| 45 |
+
- a. This License will automatically terminate upon any breach by you of the terms of this License.
|
| 46 |
+
- b. We may terminate this License, in whole or in part, at any time upon notice (including electronic) to you.
|
| 47 |
+
- c. If you initiate any legal action or proceedings against Company or any other entity (including a cross-claim or counterclaim in a lawsuit), alleging that the FLUX.1 [dev] Model, any Derivative, or FLUX Content Filters, or any part thereof, infringe upon intellectual property or other rights owned or licensable by you, then any licenses granted to you under this License will immediately terminate as of the date such legal action or claim is filed or initiated.
|
| 48 |
+
- d. Upon termination of this License, you must cease all use, access or Distribution of the FLUX.1 [dev] Model, any Derivatives, and any FLUX Content Filters. The following sections survive termination of this License 2(c), 2(d), 4-11.
|
| 49 |
+
|
| 50 |
+
9. Third Party Materials. The FLUX.1 [dev] Model may contain third-party software or other components (including free and open source software) (all of the foregoing, “Third Party Materials”), which are subject to the license terms of the respective third-party licensors. Your dealings or correspondence with third parties and your use of or interaction with any Third Party Materials are solely between you and the third party. Company does not control or endorse, and makes no representations or warranties regarding, any Third Party Materials, and your access to and use of such Third Party Materials are at your own risk.
|
| 51 |
+
|
| 52 |
+
10. Trademarks. You have not been granted any trademark license as part of this License and may not use any name, logo or trademark associated with Company without the prior written permission of Company, except to the extent necessary to make the reference required in the Attribution Notice as specified above or as is reasonably necessary in describing the FLUX.1 [dev] Model and its creators.
|
| 53 |
+
|
| 54 |
+
11. General. This License will be governed and construed under the laws of the State of Delaware without regard to conflicts of law provisions. If any provision or part of a provision of this License is unlawful, void or unenforceable, that provision or part of the provision is deemed severed from this License, and will not affect the validity and enforceability of any remaining provisions. The failure of Company to exercise or enforce any right or provision of this License will not operate as a waiver of such right or provision. This License does not confer any third-party beneficiary rights upon any other person or entity. This License, together with the documentation, contains the entire understanding between you and Company regarding the subject matter of this License, and supersedes all other written or oral agreements and understandings between you and Company regarding such subject matter.
|
README.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: other
|
| 3 |
+
license_name: flux-1-dev-non-commercial-license
|
| 4 |
+
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
base_model:
|
| 8 |
+
- black-forest-labs/FLUX.1-dev
|
| 9 |
+
library_name: diffusers
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
This is a diffusers version of [nyanko7/flux-dev-de-distill](https://huggingface.co/nyanko7/flux-dev-de-distill) that generates exactly the same results as the original. But please note that there is a certain degree of quality degradation in comparison with [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) in our test.
|
| 14 |
+
We are training its fine-tuning models to check its effectiveness, feel free to let us know if you have any interesting findings.
|
| 15 |
+
|
| 16 |
+
## Usage
|
| 17 |
+
```python
|
| 18 |
+
import torch
|
| 19 |
+
from diffusers import FluxTransformer2DModel
|
| 20 |
+
from pipeline_flux_de_distill import FluxPipeline
|
| 21 |
+
|
| 22 |
+
model_path = "black-forest-labs/FLUX.1-dev"
|
| 23 |
+
|
| 24 |
+
transformer = FluxTransformer2DModel.from_pretrained(
|
| 25 |
+
"InstantX/flux-dev-de-distill-diffusers",
|
| 26 |
+
torch_dtype=torch.bfloat16
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
pipeline = FluxPipeline.from_pretrained(model_path, transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
|
| 30 |
+
|
| 31 |
+
prompt = "a tiny astronaut hatching from an egg on the moon"
|
| 32 |
+
negative_prompt = "bad photo"
|
| 33 |
+
|
| 34 |
+
image = pipeline(
|
| 35 |
+
prompt=prompt,
|
| 36 |
+
negative_prompt=negative_prompt,
|
| 37 |
+
guidance_scale=3.5,
|
| 38 |
+
num_inference_steps=24,
|
| 39 |
+
).images[0]
|
| 40 |
+
|
| 41 |
+
image.save("de-distill.jpg")
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
## Other Resources
|
| 45 |
+
- [MinusZoneAI/flux-dev-de-distill-fp8](https://huggingface.co/MinusZoneAI/flux-dev-de-distill-fp8), [TheYuriLover/flux-dev-de-distill-GGUF](https://huggingface.co/TheYuriLover/flux-dev-de-distill-GGUF)
|
| 46 |
+
- [ashen0209/Flux-Dev2Pro](https://huggingface.co/ashen0209/Flux-Dev2Pro), [ostris/OpenFLUX.1](https://huggingface.co/ostris/OpenFLUX.1)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
## Acknowledgements
|
| 50 |
+
This project is co-sponsored by [HuggingFace](https://huggingface.co/) and [fal](https://huggingface.co/fal).
|
ae.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38
|
| 3 |
+
size 335304388
|
dev_grid.jpg
ADDED
|
Git LFS Details
|
flux1-dev.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7
|
| 3 |
+
size 23802932552
|
model_index.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "FluxPipeline",
|
| 3 |
+
"_diffusers_version": "0.30.0.dev0",
|
| 4 |
+
"scheduler": [
|
| 5 |
+
"diffusers",
|
| 6 |
+
"FlowMatchEulerDiscreteScheduler"
|
| 7 |
+
],
|
| 8 |
+
"text_encoder": [
|
| 9 |
+
"transformers",
|
| 10 |
+
"CLIPTextModel"
|
| 11 |
+
],
|
| 12 |
+
"text_encoder_2": [
|
| 13 |
+
"transformers",
|
| 14 |
+
"T5EncoderModel"
|
| 15 |
+
],
|
| 16 |
+
"tokenizer": [
|
| 17 |
+
"transformers",
|
| 18 |
+
"CLIPTokenizer"
|
| 19 |
+
],
|
| 20 |
+
"tokenizer_2": [
|
| 21 |
+
"transformers",
|
| 22 |
+
"T5TokenizerFast"
|
| 23 |
+
],
|
| 24 |
+
"transformer": [
|
| 25 |
+
"diffusers",
|
| 26 |
+
"FluxTransformer2DModel"
|
| 27 |
+
],
|
| 28 |
+
"vae": [
|
| 29 |
+
"diffusers",
|
| 30 |
+
"AutoencoderKL"
|
| 31 |
+
]
|
| 32 |
+
}
|
pipeline_flux_de_distill.py
ADDED
|
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Black Forest Labs, The HuggingFace Team and InstantX Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
| 21 |
+
|
| 22 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 23 |
+
from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin
|
| 24 |
+
from diffusers.models.autoencoders import AutoencoderKL
|
| 25 |
+
from diffusers.models.transformers import FluxTransformer2DModel
|
| 26 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 27 |
+
from diffusers.utils import (
|
| 28 |
+
USE_PEFT_BACKEND,
|
| 29 |
+
is_torch_xla_available,
|
| 30 |
+
logging,
|
| 31 |
+
replace_example_docstring,
|
| 32 |
+
scale_lora_layers,
|
| 33 |
+
unscale_lora_layers,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 37 |
+
from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if is_torch_xla_available():
|
| 41 |
+
import torch_xla.core.xla_model as xm
|
| 42 |
+
|
| 43 |
+
XLA_AVAILABLE = True
|
| 44 |
+
else:
|
| 45 |
+
XLA_AVAILABLE = False
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 49 |
+
|
| 50 |
+
EXAMPLE_DOC_STRING = """
|
| 51 |
+
Examples:
|
| 52 |
+
```py
|
| 53 |
+
>>> import torch
|
| 54 |
+
>>> from diffusers import FluxPipeline
|
| 55 |
+
|
| 56 |
+
>>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
|
| 57 |
+
>>> pipe.to("cuda")
|
| 58 |
+
>>> prompt = "A cat holding a sign that says hello world"
|
| 59 |
+
>>> # Depending on the variant being used, the pipeline call will slightly vary.
|
| 60 |
+
>>> # Refer to the pipeline documentation for more details.
|
| 61 |
+
>>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0]
|
| 62 |
+
>>> image.save("flux.png")
|
| 63 |
+
```
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def calculate_shift(
|
| 68 |
+
image_seq_len,
|
| 69 |
+
base_seq_len: int = 256,
|
| 70 |
+
max_seq_len: int = 4096,
|
| 71 |
+
base_shift: float = 0.5,
|
| 72 |
+
max_shift: float = 1.16,
|
| 73 |
+
):
|
| 74 |
+
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
| 75 |
+
b = base_shift - m * base_seq_len
|
| 76 |
+
mu = image_seq_len * m + b
|
| 77 |
+
return mu
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 81 |
+
def retrieve_timesteps(
|
| 82 |
+
scheduler,
|
| 83 |
+
num_inference_steps: Optional[int] = None,
|
| 84 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 85 |
+
timesteps: Optional[List[int]] = None,
|
| 86 |
+
sigmas: Optional[List[float]] = None,
|
| 87 |
+
**kwargs,
|
| 88 |
+
):
|
| 89 |
+
"""
|
| 90 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 91 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
scheduler (`SchedulerMixin`):
|
| 95 |
+
The scheduler to get timesteps from.
|
| 96 |
+
num_inference_steps (`int`):
|
| 97 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 98 |
+
must be `None`.
|
| 99 |
+
device (`str` or `torch.device`, *optional*):
|
| 100 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 101 |
+
timesteps (`List[int]`, *optional*):
|
| 102 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 103 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 104 |
+
sigmas (`List[float]`, *optional*):
|
| 105 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 106 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 107 |
+
|
| 108 |
+
Returns:
|
| 109 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 110 |
+
second element is the number of inference steps.
|
| 111 |
+
"""
|
| 112 |
+
if timesteps is not None and sigmas is not None:
|
| 113 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 114 |
+
if timesteps is not None:
|
| 115 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 116 |
+
if not accepts_timesteps:
|
| 117 |
+
raise ValueError(
|
| 118 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 119 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 120 |
+
)
|
| 121 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 122 |
+
timesteps = scheduler.timesteps
|
| 123 |
+
num_inference_steps = len(timesteps)
|
| 124 |
+
elif sigmas is not None:
|
| 125 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 126 |
+
if not accept_sigmas:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 129 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 130 |
+
)
|
| 131 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 132 |
+
timesteps = scheduler.timesteps
|
| 133 |
+
num_inference_steps = len(timesteps)
|
| 134 |
+
else:
|
| 135 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 136 |
+
timesteps = scheduler.timesteps
|
| 137 |
+
return timesteps, num_inference_steps
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class FluxPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
|
| 141 |
+
r"""
|
| 142 |
+
The Flux pipeline for text-to-image generation.
|
| 143 |
+
|
| 144 |
+
Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
transformer ([`FluxTransformer2DModel`]):
|
| 148 |
+
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
|
| 149 |
+
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
| 150 |
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
| 151 |
+
vae ([`AutoencoderKL`]):
|
| 152 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 153 |
+
text_encoder ([`CLIPTextModel`]):
|
| 154 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 155 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 156 |
+
text_encoder_2 ([`T5EncoderModel`]):
|
| 157 |
+
[T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
|
| 158 |
+
the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
|
| 159 |
+
tokenizer (`CLIPTokenizer`):
|
| 160 |
+
Tokenizer of class
|
| 161 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 162 |
+
tokenizer_2 (`T5TokenizerFast`):
|
| 163 |
+
Second Tokenizer of class
|
| 164 |
+
[T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
| 168 |
+
_optional_components = []
|
| 169 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds"]
|
| 170 |
+
|
| 171 |
+
def __init__(
|
| 172 |
+
self,
|
| 173 |
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
| 174 |
+
vae: AutoencoderKL,
|
| 175 |
+
text_encoder: CLIPTextModel,
|
| 176 |
+
tokenizer: CLIPTokenizer,
|
| 177 |
+
text_encoder_2: T5EncoderModel,
|
| 178 |
+
tokenizer_2: T5TokenizerFast,
|
| 179 |
+
transformer: FluxTransformer2DModel,
|
| 180 |
+
):
|
| 181 |
+
super().__init__()
|
| 182 |
+
|
| 183 |
+
self.register_modules(
|
| 184 |
+
vae=vae,
|
| 185 |
+
text_encoder=text_encoder,
|
| 186 |
+
text_encoder_2=text_encoder_2,
|
| 187 |
+
tokenizer=tokenizer,
|
| 188 |
+
tokenizer_2=tokenizer_2,
|
| 189 |
+
transformer=transformer,
|
| 190 |
+
scheduler=scheduler,
|
| 191 |
+
)
|
| 192 |
+
self.vae_scale_factor = (
|
| 193 |
+
2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
|
| 194 |
+
)
|
| 195 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 196 |
+
self.tokenizer_max_length = (
|
| 197 |
+
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
| 198 |
+
)
|
| 199 |
+
self.default_sample_size = 64
|
| 200 |
+
|
| 201 |
+
def _get_t5_prompt_embeds(
|
| 202 |
+
self,
|
| 203 |
+
prompt: Union[str, List[str]] = None,
|
| 204 |
+
num_images_per_prompt: int = 1,
|
| 205 |
+
max_sequence_length: int = 512,
|
| 206 |
+
device: Optional[torch.device] = None,
|
| 207 |
+
dtype: Optional[torch.dtype] = None,
|
| 208 |
+
):
|
| 209 |
+
device = device or self._execution_device
|
| 210 |
+
dtype = dtype or self.text_encoder.dtype
|
| 211 |
+
|
| 212 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 213 |
+
batch_size = len(prompt)
|
| 214 |
+
|
| 215 |
+
text_inputs = self.tokenizer_2(
|
| 216 |
+
prompt,
|
| 217 |
+
padding="max_length",
|
| 218 |
+
max_length=max_sequence_length,
|
| 219 |
+
truncation=True,
|
| 220 |
+
return_length=False,
|
| 221 |
+
return_overflowing_tokens=False,
|
| 222 |
+
return_tensors="pt",
|
| 223 |
+
)
|
| 224 |
+
text_input_ids = text_inputs.input_ids
|
| 225 |
+
untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
|
| 226 |
+
|
| 227 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 228 |
+
removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
|
| 229 |
+
logger.warning(
|
| 230 |
+
"The following part of your input was truncated because `max_sequence_length` is set to "
|
| 231 |
+
f" {max_sequence_length} tokens: {removed_text}"
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
|
| 235 |
+
|
| 236 |
+
dtype = self.text_encoder_2.dtype
|
| 237 |
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
| 238 |
+
|
| 239 |
+
_, seq_len, _ = prompt_embeds.shape
|
| 240 |
+
|
| 241 |
+
# duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
|
| 242 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 243 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 244 |
+
|
| 245 |
+
return prompt_embeds
|
| 246 |
+
|
| 247 |
+
def _get_clip_prompt_embeds(
|
| 248 |
+
self,
|
| 249 |
+
prompt: Union[str, List[str]],
|
| 250 |
+
num_images_per_prompt: int = 1,
|
| 251 |
+
device: Optional[torch.device] = None,
|
| 252 |
+
):
|
| 253 |
+
device = device or self._execution_device
|
| 254 |
+
|
| 255 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 256 |
+
batch_size = len(prompt)
|
| 257 |
+
|
| 258 |
+
text_inputs = self.tokenizer(
|
| 259 |
+
prompt,
|
| 260 |
+
padding="max_length",
|
| 261 |
+
max_length=self.tokenizer_max_length,
|
| 262 |
+
truncation=True,
|
| 263 |
+
return_overflowing_tokens=False,
|
| 264 |
+
return_length=False,
|
| 265 |
+
return_tensors="pt",
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
text_input_ids = text_inputs.input_ids
|
| 269 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 270 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 271 |
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
|
| 272 |
+
logger.warning(
|
| 273 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 274 |
+
f" {self.tokenizer_max_length} tokens: {removed_text}"
|
| 275 |
+
)
|
| 276 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
|
| 277 |
+
|
| 278 |
+
# Use pooled output of CLIPTextModel
|
| 279 |
+
prompt_embeds = prompt_embeds.pooler_output
|
| 280 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
| 281 |
+
|
| 282 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 283 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
|
| 284 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
|
| 285 |
+
|
| 286 |
+
return prompt_embeds
|
| 287 |
+
|
| 288 |
+
def encode_prompt(
|
| 289 |
+
self,
|
| 290 |
+
prompt: Union[str, List[str]],
|
| 291 |
+
prompt_2: Union[str, List[str]],
|
| 292 |
+
negative_prompt: Union[str, List[str]],
|
| 293 |
+
device: Optional[torch.device] = None,
|
| 294 |
+
num_images_per_prompt: int = 1,
|
| 295 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 296 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 297 |
+
max_sequence_length: int = 512,
|
| 298 |
+
lora_scale: Optional[float] = None,
|
| 299 |
+
):
|
| 300 |
+
r"""
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 304 |
+
prompt to be encoded
|
| 305 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 306 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 307 |
+
used in all text-encoders
|
| 308 |
+
device: (`torch.device`):
|
| 309 |
+
torch device
|
| 310 |
+
num_images_per_prompt (`int`):
|
| 311 |
+
number of images that should be generated per prompt
|
| 312 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 313 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 314 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 315 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 316 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 317 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 318 |
+
lora_scale (`float`, *optional*):
|
| 319 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 320 |
+
"""
|
| 321 |
+
device = device or self._execution_device
|
| 322 |
+
|
| 323 |
+
# set lora scale so that monkey patched LoRA
|
| 324 |
+
# function of text encoder can correctly access it
|
| 325 |
+
if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
|
| 326 |
+
self._lora_scale = lora_scale
|
| 327 |
+
|
| 328 |
+
# dynamically adjust the LoRA scale
|
| 329 |
+
if self.text_encoder is not None and USE_PEFT_BACKEND:
|
| 330 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 331 |
+
if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
|
| 332 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 333 |
+
|
| 334 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 335 |
+
negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 336 |
+
|
| 337 |
+
if prompt_embeds is None:
|
| 338 |
+
prompt_2 = prompt_2 or prompt
|
| 339 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 340 |
+
|
| 341 |
+
# We only use the pooled prompt output from the CLIPTextModel
|
| 342 |
+
pooled_prompt_embeds = self._get_clip_prompt_embeds(
|
| 343 |
+
prompt=prompt,
|
| 344 |
+
device=device,
|
| 345 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 346 |
+
)
|
| 347 |
+
prompt_embeds = self._get_t5_prompt_embeds(
|
| 348 |
+
prompt=prompt_2,
|
| 349 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 350 |
+
max_sequence_length=max_sequence_length,
|
| 351 |
+
device=device,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
# We only use the pooled prompt output from the CLIPTextModel
|
| 355 |
+
negative_pooled_prompt_embeds = self._get_clip_prompt_embeds(
|
| 356 |
+
prompt=negative_prompt,
|
| 357 |
+
device=device,
|
| 358 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 359 |
+
)
|
| 360 |
+
negative_prompt_embeds = self._get_t5_prompt_embeds(
|
| 361 |
+
prompt=negative_prompt,
|
| 362 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 363 |
+
max_sequence_length=max_sequence_length,
|
| 364 |
+
device=device,
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
if self.text_encoder is not None:
|
| 368 |
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 369 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 370 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 371 |
+
|
| 372 |
+
if self.text_encoder_2 is not None:
|
| 373 |
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 374 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 375 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 376 |
+
|
| 377 |
+
dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
|
| 378 |
+
text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
|
| 379 |
+
|
| 380 |
+
return prompt_embeds, pooled_prompt_embeds, text_ids, negative_prompt_embeds, negative_pooled_prompt_embeds
|
| 381 |
+
|
| 382 |
+
def check_inputs(
|
| 383 |
+
self,
|
| 384 |
+
prompt,
|
| 385 |
+
prompt_2,
|
| 386 |
+
height,
|
| 387 |
+
width,
|
| 388 |
+
prompt_embeds=None,
|
| 389 |
+
pooled_prompt_embeds=None,
|
| 390 |
+
callback_on_step_end_tensor_inputs=None,
|
| 391 |
+
max_sequence_length=None,
|
| 392 |
+
):
|
| 393 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 394 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 395 |
+
|
| 396 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 397 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 398 |
+
):
|
| 399 |
+
raise ValueError(
|
| 400 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
if prompt is not None and prompt_embeds is not None:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 406 |
+
" only forward one of the two."
|
| 407 |
+
)
|
| 408 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 409 |
+
raise ValueError(
|
| 410 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 411 |
+
" only forward one of the two."
|
| 412 |
+
)
|
| 413 |
+
elif prompt is None and prompt_embeds is None:
|
| 414 |
+
raise ValueError(
|
| 415 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 416 |
+
)
|
| 417 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 418 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 419 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 420 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 421 |
+
|
| 422 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 423 |
+
raise ValueError(
|
| 424 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
if max_sequence_length is not None and max_sequence_length > 512:
|
| 428 |
+
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
|
| 429 |
+
|
| 430 |
+
@staticmethod
|
| 431 |
+
def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
|
| 432 |
+
latent_image_ids = torch.zeros(height // 2, width // 2, 3)
|
| 433 |
+
latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None]
|
| 434 |
+
latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :]
|
| 435 |
+
|
| 436 |
+
latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
|
| 437 |
+
|
| 438 |
+
latent_image_ids = latent_image_ids.reshape(
|
| 439 |
+
latent_image_id_height * latent_image_id_width, latent_image_id_channels
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
return latent_image_ids.to(device=device, dtype=dtype)
|
| 443 |
+
|
| 444 |
+
@staticmethod
|
| 445 |
+
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
|
| 446 |
+
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
|
| 447 |
+
latents = latents.permute(0, 2, 4, 1, 3, 5)
|
| 448 |
+
latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
|
| 449 |
+
|
| 450 |
+
return latents
|
| 451 |
+
|
| 452 |
+
@staticmethod
|
| 453 |
+
def _unpack_latents(latents, height, width, vae_scale_factor):
|
| 454 |
+
batch_size, num_patches, channels = latents.shape
|
| 455 |
+
|
| 456 |
+
height = height // vae_scale_factor
|
| 457 |
+
width = width // vae_scale_factor
|
| 458 |
+
|
| 459 |
+
latents = latents.view(batch_size, height, width, channels // 4, 2, 2)
|
| 460 |
+
latents = latents.permute(0, 3, 1, 4, 2, 5)
|
| 461 |
+
|
| 462 |
+
latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2)
|
| 463 |
+
|
| 464 |
+
return latents
|
| 465 |
+
|
| 466 |
+
def enable_vae_slicing(self):
|
| 467 |
+
r"""
|
| 468 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
| 469 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
| 470 |
+
"""
|
| 471 |
+
self.vae.enable_slicing()
|
| 472 |
+
|
| 473 |
+
def disable_vae_slicing(self):
|
| 474 |
+
r"""
|
| 475 |
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
| 476 |
+
computing decoding in one step.
|
| 477 |
+
"""
|
| 478 |
+
self.vae.disable_slicing()
|
| 479 |
+
|
| 480 |
+
def enable_vae_tiling(self):
|
| 481 |
+
r"""
|
| 482 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
| 483 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
| 484 |
+
processing larger images.
|
| 485 |
+
"""
|
| 486 |
+
self.vae.enable_tiling()
|
| 487 |
+
|
| 488 |
+
def disable_vae_tiling(self):
|
| 489 |
+
r"""
|
| 490 |
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
| 491 |
+
computing decoding in one step.
|
| 492 |
+
"""
|
| 493 |
+
self.vae.disable_tiling()
|
| 494 |
+
|
| 495 |
+
def prepare_latents(
|
| 496 |
+
self,
|
| 497 |
+
batch_size,
|
| 498 |
+
num_channels_latents,
|
| 499 |
+
height,
|
| 500 |
+
width,
|
| 501 |
+
dtype,
|
| 502 |
+
device,
|
| 503 |
+
generator,
|
| 504 |
+
latents=None,
|
| 505 |
+
):
|
| 506 |
+
height = 2 * (int(height) // self.vae_scale_factor)
|
| 507 |
+
width = 2 * (int(width) // self.vae_scale_factor)
|
| 508 |
+
|
| 509 |
+
shape = (batch_size, num_channels_latents, height, width)
|
| 510 |
+
|
| 511 |
+
if latents is not None:
|
| 512 |
+
latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
|
| 513 |
+
return latents.to(device=device, dtype=dtype), latent_image_ids
|
| 514 |
+
|
| 515 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 516 |
+
raise ValueError(
|
| 517 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 518 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 522 |
+
latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
|
| 523 |
+
|
| 524 |
+
latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
|
| 525 |
+
|
| 526 |
+
return latents, latent_image_ids
|
| 527 |
+
|
| 528 |
+
@property
|
| 529 |
+
def guidance_scale(self):
|
| 530 |
+
return self._guidance_scale
|
| 531 |
+
|
| 532 |
+
@property
|
| 533 |
+
def do_classifier_free_guidance(self):
|
| 534 |
+
return self._guidance_scale > 1
|
| 535 |
+
|
| 536 |
+
@property
|
| 537 |
+
def joint_attention_kwargs(self):
|
| 538 |
+
return self._joint_attention_kwargs
|
| 539 |
+
|
| 540 |
+
@property
|
| 541 |
+
def num_timesteps(self):
|
| 542 |
+
return self._num_timesteps
|
| 543 |
+
|
| 544 |
+
@property
|
| 545 |
+
def interrupt(self):
|
| 546 |
+
return self._interrupt
|
| 547 |
+
|
| 548 |
+
@torch.no_grad()
|
| 549 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 550 |
+
def __call__(
|
| 551 |
+
self,
|
| 552 |
+
prompt: Union[str, List[str]] = None,
|
| 553 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 554 |
+
negative_prompt: Union[str, List[str]] = None,
|
| 555 |
+
height: Optional[int] = None,
|
| 556 |
+
width: Optional[int] = None,
|
| 557 |
+
num_inference_steps: int = 28,
|
| 558 |
+
timesteps: List[int] = None,
|
| 559 |
+
guidance_scale: float = 3.5,
|
| 560 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 561 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 562 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 563 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 564 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 565 |
+
output_type: Optional[str] = "pil",
|
| 566 |
+
return_dict: bool = True,
|
| 567 |
+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 568 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 569 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 570 |
+
max_sequence_length: int = 512,
|
| 571 |
+
):
|
| 572 |
+
r"""
|
| 573 |
+
Function invoked when calling the pipeline for generation.
|
| 574 |
+
|
| 575 |
+
Args:
|
| 576 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 577 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 578 |
+
instead.
|
| 579 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 580 |
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 581 |
+
will be used instead
|
| 582 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 583 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 584 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 585 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 586 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 587 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 588 |
+
expense of slower inference.
|
| 589 |
+
timesteps (`List[int]`, *optional*):
|
| 590 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 591 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 592 |
+
passed will be used. Must be in descending order.
|
| 593 |
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
| 594 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
| 595 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 596 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
| 597 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 598 |
+
usually at the expense of lower image quality.
|
| 599 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 600 |
+
The number of images to generate per prompt.
|
| 601 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 602 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 603 |
+
to make generation deterministic.
|
| 604 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 605 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 606 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 607 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 608 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 609 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 610 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 611 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 612 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 613 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 614 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 615 |
+
The output format of the generate image. Choose between
|
| 616 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 617 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 618 |
+
Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
|
| 619 |
+
joint_attention_kwargs (`dict`, *optional*):
|
| 620 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 621 |
+
`self.processor` in
|
| 622 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 623 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 624 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 625 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 626 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 627 |
+
`callback_on_step_end_tensor_inputs`.
|
| 628 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 629 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 630 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 631 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 632 |
+
max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
|
| 633 |
+
|
| 634 |
+
Examples:
|
| 635 |
+
|
| 636 |
+
Returns:
|
| 637 |
+
[`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
|
| 638 |
+
is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
|
| 639 |
+
images.
|
| 640 |
+
"""
|
| 641 |
+
|
| 642 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 643 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 644 |
+
|
| 645 |
+
# 1. Check inputs. Raise error if not correct
|
| 646 |
+
self.check_inputs(
|
| 647 |
+
prompt,
|
| 648 |
+
prompt_2,
|
| 649 |
+
height,
|
| 650 |
+
width,
|
| 651 |
+
prompt_embeds=prompt_embeds,
|
| 652 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 653 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 654 |
+
max_sequence_length=max_sequence_length,
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
self._guidance_scale = guidance_scale
|
| 658 |
+
self._joint_attention_kwargs = joint_attention_kwargs
|
| 659 |
+
self._interrupt = False
|
| 660 |
+
|
| 661 |
+
# 2. Define call parameters
|
| 662 |
+
if prompt is not None and isinstance(prompt, str):
|
| 663 |
+
batch_size = 1
|
| 664 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 665 |
+
batch_size = len(prompt)
|
| 666 |
+
else:
|
| 667 |
+
batch_size = prompt_embeds.shape[0]
|
| 668 |
+
|
| 669 |
+
device = self._execution_device
|
| 670 |
+
|
| 671 |
+
lora_scale = (
|
| 672 |
+
self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
|
| 673 |
+
)
|
| 674 |
+
(
|
| 675 |
+
prompt_embeds,
|
| 676 |
+
pooled_prompt_embeds,
|
| 677 |
+
text_ids,
|
| 678 |
+
negative_prompt_embeds,
|
| 679 |
+
negative_pooled_prompt_embeds
|
| 680 |
+
) = self.encode_prompt(
|
| 681 |
+
prompt=prompt,
|
| 682 |
+
prompt_2=prompt_2,
|
| 683 |
+
negative_prompt=negative_prompt,
|
| 684 |
+
prompt_embeds=prompt_embeds,
|
| 685 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 686 |
+
device=device,
|
| 687 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 688 |
+
max_sequence_length=max_sequence_length,
|
| 689 |
+
lora_scale=lora_scale,
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
if self.do_classifier_free_guidance:
|
| 693 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 694 |
+
pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
| 695 |
+
|
| 696 |
+
# 4. Prepare latent variables
|
| 697 |
+
num_channels_latents = self.transformer.config.in_channels // 4
|
| 698 |
+
latents, latent_image_ids = self.prepare_latents(
|
| 699 |
+
batch_size * num_images_per_prompt,
|
| 700 |
+
num_channels_latents,
|
| 701 |
+
height,
|
| 702 |
+
width,
|
| 703 |
+
prompt_embeds.dtype,
|
| 704 |
+
device,
|
| 705 |
+
generator,
|
| 706 |
+
latents,
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
# 5. Prepare timesteps
|
| 710 |
+
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
|
| 711 |
+
image_seq_len = latents.shape[1]
|
| 712 |
+
mu = calculate_shift(
|
| 713 |
+
image_seq_len,
|
| 714 |
+
self.scheduler.config.base_image_seq_len,
|
| 715 |
+
self.scheduler.config.max_image_seq_len,
|
| 716 |
+
self.scheduler.config.base_shift,
|
| 717 |
+
self.scheduler.config.max_shift,
|
| 718 |
+
)
|
| 719 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
| 720 |
+
self.scheduler,
|
| 721 |
+
num_inference_steps,
|
| 722 |
+
device,
|
| 723 |
+
timesteps,
|
| 724 |
+
sigmas,
|
| 725 |
+
mu=mu,
|
| 726 |
+
)
|
| 727 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 728 |
+
self._num_timesteps = len(timesteps)
|
| 729 |
+
|
| 730 |
+
# 6. Denoising loop
|
| 731 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 732 |
+
for i, t in enumerate(timesteps):
|
| 733 |
+
if self.interrupt:
|
| 734 |
+
continue
|
| 735 |
+
|
| 736 |
+
# expand the latents if we are doing classifier free guidance
|
| 737 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 738 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 739 |
+
timestep = t.expand(latent_model_input.shape[0])
|
| 740 |
+
|
| 741 |
+
noise_pred = self.transformer(
|
| 742 |
+
hidden_states=latent_model_input,
|
| 743 |
+
timestep=timestep / 1000,
|
| 744 |
+
pooled_projections=pooled_prompt_embeds,
|
| 745 |
+
encoder_hidden_states=prompt_embeds,
|
| 746 |
+
txt_ids=text_ids,
|
| 747 |
+
img_ids=latent_image_ids,
|
| 748 |
+
joint_attention_kwargs=self.joint_attention_kwargs,
|
| 749 |
+
return_dict=False,
|
| 750 |
+
)[0]
|
| 751 |
+
|
| 752 |
+
if self.do_classifier_free_guidance:
|
| 753 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 754 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 755 |
+
|
| 756 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 757 |
+
latents_dtype = latents.dtype
|
| 758 |
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 759 |
+
|
| 760 |
+
if latents.dtype != latents_dtype:
|
| 761 |
+
if torch.backends.mps.is_available():
|
| 762 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 763 |
+
latents = latents.to(latents_dtype)
|
| 764 |
+
|
| 765 |
+
if callback_on_step_end is not None:
|
| 766 |
+
callback_kwargs = {}
|
| 767 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 768 |
+
callback_kwargs[k] = locals()[k]
|
| 769 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 770 |
+
|
| 771 |
+
latents = callback_outputs.pop("latents", latents)
|
| 772 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 773 |
+
|
| 774 |
+
# call the callback, if provided
|
| 775 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 776 |
+
progress_bar.update()
|
| 777 |
+
|
| 778 |
+
if XLA_AVAILABLE:
|
| 779 |
+
xm.mark_step()
|
| 780 |
+
|
| 781 |
+
if output_type == "latent":
|
| 782 |
+
image = latents
|
| 783 |
+
|
| 784 |
+
else:
|
| 785 |
+
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
|
| 786 |
+
latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
|
| 787 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 788 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 789 |
+
|
| 790 |
+
# Offload all models
|
| 791 |
+
self.maybe_free_model_hooks()
|
| 792 |
+
|
| 793 |
+
if not return_dict:
|
| 794 |
+
return (image,)
|
| 795 |
+
|
| 796 |
+
return FluxPipelineOutput(images=image)
|
scheduler/scheduler_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "FlowMatchEulerDiscreteScheduler",
|
| 3 |
+
"_diffusers_version": "0.30.0.dev0",
|
| 4 |
+
"base_image_seq_len": 256,
|
| 5 |
+
"base_shift": 0.5,
|
| 6 |
+
"max_image_seq_len": 4096,
|
| 7 |
+
"max_shift": 1.15,
|
| 8 |
+
"num_train_timesteps": 1000,
|
| 9 |
+
"shift": 3.0,
|
| 10 |
+
"use_dynamic_shifting": true
|
| 11 |
+
}
|
text_encoder/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "openai/clip-vit-large-patch14",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPTextModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"dropout": 0.0,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "quick_gelu",
|
| 11 |
+
"hidden_size": 768,
|
| 12 |
+
"initializer_factor": 1.0,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 3072,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 77,
|
| 17 |
+
"model_type": "clip_text_model",
|
| 18 |
+
"num_attention_heads": 12,
|
| 19 |
+
"num_hidden_layers": 12,
|
| 20 |
+
"pad_token_id": 1,
|
| 21 |
+
"projection_dim": 768,
|
| 22 |
+
"torch_dtype": "bfloat16",
|
| 23 |
+
"transformers_version": "4.43.3",
|
| 24 |
+
"vocab_size": 49408
|
| 25 |
+
}
|
text_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082
|
| 3 |
+
size 246144352
|
text_encoder_2/config.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "google/t5-v1_1-xxl",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"T5EncoderModel"
|
| 5 |
+
],
|
| 6 |
+
"classifier_dropout": 0.0,
|
| 7 |
+
"d_ff": 10240,
|
| 8 |
+
"d_kv": 64,
|
| 9 |
+
"d_model": 4096,
|
| 10 |
+
"decoder_start_token_id": 0,
|
| 11 |
+
"dense_act_fn": "gelu_new",
|
| 12 |
+
"dropout_rate": 0.1,
|
| 13 |
+
"eos_token_id": 1,
|
| 14 |
+
"feed_forward_proj": "gated-gelu",
|
| 15 |
+
"initializer_factor": 1.0,
|
| 16 |
+
"is_encoder_decoder": true,
|
| 17 |
+
"is_gated_act": true,
|
| 18 |
+
"layer_norm_epsilon": 1e-06,
|
| 19 |
+
"model_type": "t5",
|
| 20 |
+
"num_decoder_layers": 24,
|
| 21 |
+
"num_heads": 64,
|
| 22 |
+
"num_layers": 24,
|
| 23 |
+
"output_past": true,
|
| 24 |
+
"pad_token_id": 0,
|
| 25 |
+
"relative_attention_max_distance": 128,
|
| 26 |
+
"relative_attention_num_buckets": 32,
|
| 27 |
+
"tie_word_embeddings": false,
|
| 28 |
+
"torch_dtype": "bfloat16",
|
| 29 |
+
"transformers_version": "4.43.3",
|
| 30 |
+
"use_cache": true,
|
| 31 |
+
"vocab_size": 32128
|
| 32 |
+
}
|
text_encoder_2/model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e
|
| 3 |
+
size 4994582224
|
text_encoder_2/model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5640855b301fcdbceddfa90ae8066cd9414aff020552a201a255ecf2059da00
|
| 3 |
+
size 4530066360
|
text_encoder_2/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 9524621312
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"encoder.block.0.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 7 |
+
"encoder.block.0.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 8 |
+
"encoder.block.0.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 9 |
+
"encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "model-00001-of-00002.safetensors",
|
| 10 |
+
"encoder.block.0.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 11 |
+
"encoder.block.0.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 12 |
+
"encoder.block.0.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 13 |
+
"encoder.block.0.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 14 |
+
"encoder.block.0.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 15 |
+
"encoder.block.0.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 16 |
+
"encoder.block.1.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 17 |
+
"encoder.block.1.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 18 |
+
"encoder.block.1.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 19 |
+
"encoder.block.1.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 20 |
+
"encoder.block.1.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 21 |
+
"encoder.block.1.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 22 |
+
"encoder.block.1.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 23 |
+
"encoder.block.1.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 24 |
+
"encoder.block.1.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 25 |
+
"encoder.block.10.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 26 |
+
"encoder.block.10.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 27 |
+
"encoder.block.10.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 28 |
+
"encoder.block.10.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 29 |
+
"encoder.block.10.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 30 |
+
"encoder.block.10.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 31 |
+
"encoder.block.10.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 32 |
+
"encoder.block.10.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 33 |
+
"encoder.block.10.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 34 |
+
"encoder.block.11.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 35 |
+
"encoder.block.11.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 36 |
+
"encoder.block.11.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 37 |
+
"encoder.block.11.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 38 |
+
"encoder.block.11.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 39 |
+
"encoder.block.11.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 40 |
+
"encoder.block.11.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 41 |
+
"encoder.block.11.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 42 |
+
"encoder.block.11.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 43 |
+
"encoder.block.12.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 44 |
+
"encoder.block.12.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 45 |
+
"encoder.block.12.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 46 |
+
"encoder.block.12.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 47 |
+
"encoder.block.12.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 48 |
+
"encoder.block.12.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 49 |
+
"encoder.block.12.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 50 |
+
"encoder.block.12.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 51 |
+
"encoder.block.12.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 52 |
+
"encoder.block.13.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 53 |
+
"encoder.block.13.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 54 |
+
"encoder.block.13.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 55 |
+
"encoder.block.13.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 56 |
+
"encoder.block.13.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 57 |
+
"encoder.block.13.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 58 |
+
"encoder.block.13.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 59 |
+
"encoder.block.13.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 60 |
+
"encoder.block.13.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 61 |
+
"encoder.block.14.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 62 |
+
"encoder.block.14.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 63 |
+
"encoder.block.14.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 64 |
+
"encoder.block.14.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 65 |
+
"encoder.block.14.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 66 |
+
"encoder.block.14.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 67 |
+
"encoder.block.14.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 68 |
+
"encoder.block.14.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 69 |
+
"encoder.block.14.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 70 |
+
"encoder.block.15.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 71 |
+
"encoder.block.15.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 72 |
+
"encoder.block.15.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 73 |
+
"encoder.block.15.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 74 |
+
"encoder.block.15.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 75 |
+
"encoder.block.15.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 76 |
+
"encoder.block.15.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 77 |
+
"encoder.block.15.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 78 |
+
"encoder.block.15.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 79 |
+
"encoder.block.16.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 80 |
+
"encoder.block.16.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 81 |
+
"encoder.block.16.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 82 |
+
"encoder.block.16.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 83 |
+
"encoder.block.16.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 84 |
+
"encoder.block.16.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 85 |
+
"encoder.block.16.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 86 |
+
"encoder.block.16.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 87 |
+
"encoder.block.16.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 88 |
+
"encoder.block.17.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 89 |
+
"encoder.block.17.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 90 |
+
"encoder.block.17.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 91 |
+
"encoder.block.17.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 92 |
+
"encoder.block.17.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 93 |
+
"encoder.block.17.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 94 |
+
"encoder.block.17.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 95 |
+
"encoder.block.17.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 96 |
+
"encoder.block.17.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 97 |
+
"encoder.block.18.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 98 |
+
"encoder.block.18.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 99 |
+
"encoder.block.18.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 100 |
+
"encoder.block.18.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 101 |
+
"encoder.block.18.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 102 |
+
"encoder.block.18.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 103 |
+
"encoder.block.18.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 104 |
+
"encoder.block.18.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 105 |
+
"encoder.block.18.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 106 |
+
"encoder.block.19.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 107 |
+
"encoder.block.19.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 108 |
+
"encoder.block.19.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 109 |
+
"encoder.block.19.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 110 |
+
"encoder.block.19.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 111 |
+
"encoder.block.19.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 112 |
+
"encoder.block.19.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 113 |
+
"encoder.block.19.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 114 |
+
"encoder.block.19.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 115 |
+
"encoder.block.2.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 116 |
+
"encoder.block.2.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 117 |
+
"encoder.block.2.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 118 |
+
"encoder.block.2.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 119 |
+
"encoder.block.2.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 120 |
+
"encoder.block.2.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 121 |
+
"encoder.block.2.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 122 |
+
"encoder.block.2.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 123 |
+
"encoder.block.2.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 124 |
+
"encoder.block.20.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 125 |
+
"encoder.block.20.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 126 |
+
"encoder.block.20.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 127 |
+
"encoder.block.20.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 128 |
+
"encoder.block.20.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 129 |
+
"encoder.block.20.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 130 |
+
"encoder.block.20.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 131 |
+
"encoder.block.20.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 132 |
+
"encoder.block.20.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 133 |
+
"encoder.block.21.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 134 |
+
"encoder.block.21.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 135 |
+
"encoder.block.21.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 136 |
+
"encoder.block.21.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 137 |
+
"encoder.block.21.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 138 |
+
"encoder.block.21.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 139 |
+
"encoder.block.21.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 140 |
+
"encoder.block.21.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 141 |
+
"encoder.block.21.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 142 |
+
"encoder.block.22.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 143 |
+
"encoder.block.22.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 144 |
+
"encoder.block.22.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 145 |
+
"encoder.block.22.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 146 |
+
"encoder.block.22.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 147 |
+
"encoder.block.22.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 148 |
+
"encoder.block.22.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 149 |
+
"encoder.block.22.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 150 |
+
"encoder.block.22.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 151 |
+
"encoder.block.23.layer.0.SelfAttention.k.weight": "model-00002-of-00002.safetensors",
|
| 152 |
+
"encoder.block.23.layer.0.SelfAttention.o.weight": "model-00002-of-00002.safetensors",
|
| 153 |
+
"encoder.block.23.layer.0.SelfAttention.q.weight": "model-00002-of-00002.safetensors",
|
| 154 |
+
"encoder.block.23.layer.0.SelfAttention.v.weight": "model-00002-of-00002.safetensors",
|
| 155 |
+
"encoder.block.23.layer.0.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 156 |
+
"encoder.block.23.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00002.safetensors",
|
| 157 |
+
"encoder.block.23.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00002.safetensors",
|
| 158 |
+
"encoder.block.23.layer.1.DenseReluDense.wo.weight": "model-00002-of-00002.safetensors",
|
| 159 |
+
"encoder.block.23.layer.1.layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 160 |
+
"encoder.block.3.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 161 |
+
"encoder.block.3.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 162 |
+
"encoder.block.3.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 163 |
+
"encoder.block.3.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 164 |
+
"encoder.block.3.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 165 |
+
"encoder.block.3.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 166 |
+
"encoder.block.3.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 167 |
+
"encoder.block.3.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 168 |
+
"encoder.block.3.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 169 |
+
"encoder.block.4.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 170 |
+
"encoder.block.4.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 171 |
+
"encoder.block.4.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 172 |
+
"encoder.block.4.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 173 |
+
"encoder.block.4.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 174 |
+
"encoder.block.4.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 175 |
+
"encoder.block.4.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 176 |
+
"encoder.block.4.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 177 |
+
"encoder.block.4.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 178 |
+
"encoder.block.5.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 179 |
+
"encoder.block.5.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 180 |
+
"encoder.block.5.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 181 |
+
"encoder.block.5.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 182 |
+
"encoder.block.5.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 183 |
+
"encoder.block.5.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 184 |
+
"encoder.block.5.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 185 |
+
"encoder.block.5.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 186 |
+
"encoder.block.5.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 187 |
+
"encoder.block.6.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 188 |
+
"encoder.block.6.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 189 |
+
"encoder.block.6.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 190 |
+
"encoder.block.6.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 191 |
+
"encoder.block.6.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 192 |
+
"encoder.block.6.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 193 |
+
"encoder.block.6.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 194 |
+
"encoder.block.6.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 195 |
+
"encoder.block.6.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 196 |
+
"encoder.block.7.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 197 |
+
"encoder.block.7.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 198 |
+
"encoder.block.7.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 199 |
+
"encoder.block.7.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 200 |
+
"encoder.block.7.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 201 |
+
"encoder.block.7.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 202 |
+
"encoder.block.7.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 203 |
+
"encoder.block.7.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 204 |
+
"encoder.block.7.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 205 |
+
"encoder.block.8.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 206 |
+
"encoder.block.8.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 207 |
+
"encoder.block.8.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 208 |
+
"encoder.block.8.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 209 |
+
"encoder.block.8.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 210 |
+
"encoder.block.8.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 211 |
+
"encoder.block.8.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 212 |
+
"encoder.block.8.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 213 |
+
"encoder.block.8.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 214 |
+
"encoder.block.9.layer.0.SelfAttention.k.weight": "model-00001-of-00002.safetensors",
|
| 215 |
+
"encoder.block.9.layer.0.SelfAttention.o.weight": "model-00001-of-00002.safetensors",
|
| 216 |
+
"encoder.block.9.layer.0.SelfAttention.q.weight": "model-00001-of-00002.safetensors",
|
| 217 |
+
"encoder.block.9.layer.0.SelfAttention.v.weight": "model-00001-of-00002.safetensors",
|
| 218 |
+
"encoder.block.9.layer.0.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 219 |
+
"encoder.block.9.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00002.safetensors",
|
| 220 |
+
"encoder.block.9.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00002.safetensors",
|
| 221 |
+
"encoder.block.9.layer.1.DenseReluDense.wo.weight": "model-00001-of-00002.safetensors",
|
| 222 |
+
"encoder.block.9.layer.1.layer_norm.weight": "model-00001-of-00002.safetensors",
|
| 223 |
+
"encoder.final_layer_norm.weight": "model-00002-of-00002.safetensors",
|
| 224 |
+
"shared.weight": "model-00001-of-00002.safetensors"
|
| 225 |
+
}
|
| 226 |
+
}
|
tokenizer/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer/special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|startoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|endoftext|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<|endoftext|>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"49406": {
|
| 5 |
+
"content": "<|startoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"49407": {
|
| 13 |
+
"content": "<|endoftext|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
}
|
| 20 |
+
},
|
| 21 |
+
"bos_token": "<|startoftext|>",
|
| 22 |
+
"clean_up_tokenization_spaces": true,
|
| 23 |
+
"do_lower_case": true,
|
| 24 |
+
"eos_token": "<|endoftext|>",
|
| 25 |
+
"errors": "replace",
|
| 26 |
+
"model_max_length": 77,
|
| 27 |
+
"pad_token": "<|endoftext|>",
|
| 28 |
+
"tokenizer_class": "CLIPTokenizer",
|
| 29 |
+
"unk_token": "<|endoftext|>"
|
| 30 |
+
}
|
tokenizer/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_2/special_tokens_map.json
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<extra_id_0>",
|
| 4 |
+
"<extra_id_1>",
|
| 5 |
+
"<extra_id_2>",
|
| 6 |
+
"<extra_id_3>",
|
| 7 |
+
"<extra_id_4>",
|
| 8 |
+
"<extra_id_5>",
|
| 9 |
+
"<extra_id_6>",
|
| 10 |
+
"<extra_id_7>",
|
| 11 |
+
"<extra_id_8>",
|
| 12 |
+
"<extra_id_9>",
|
| 13 |
+
"<extra_id_10>",
|
| 14 |
+
"<extra_id_11>",
|
| 15 |
+
"<extra_id_12>",
|
| 16 |
+
"<extra_id_13>",
|
| 17 |
+
"<extra_id_14>",
|
| 18 |
+
"<extra_id_15>",
|
| 19 |
+
"<extra_id_16>",
|
| 20 |
+
"<extra_id_17>",
|
| 21 |
+
"<extra_id_18>",
|
| 22 |
+
"<extra_id_19>",
|
| 23 |
+
"<extra_id_20>",
|
| 24 |
+
"<extra_id_21>",
|
| 25 |
+
"<extra_id_22>",
|
| 26 |
+
"<extra_id_23>",
|
| 27 |
+
"<extra_id_24>",
|
| 28 |
+
"<extra_id_25>",
|
| 29 |
+
"<extra_id_26>",
|
| 30 |
+
"<extra_id_27>",
|
| 31 |
+
"<extra_id_28>",
|
| 32 |
+
"<extra_id_29>",
|
| 33 |
+
"<extra_id_30>",
|
| 34 |
+
"<extra_id_31>",
|
| 35 |
+
"<extra_id_32>",
|
| 36 |
+
"<extra_id_33>",
|
| 37 |
+
"<extra_id_34>",
|
| 38 |
+
"<extra_id_35>",
|
| 39 |
+
"<extra_id_36>",
|
| 40 |
+
"<extra_id_37>",
|
| 41 |
+
"<extra_id_38>",
|
| 42 |
+
"<extra_id_39>",
|
| 43 |
+
"<extra_id_40>",
|
| 44 |
+
"<extra_id_41>",
|
| 45 |
+
"<extra_id_42>",
|
| 46 |
+
"<extra_id_43>",
|
| 47 |
+
"<extra_id_44>",
|
| 48 |
+
"<extra_id_45>",
|
| 49 |
+
"<extra_id_46>",
|
| 50 |
+
"<extra_id_47>",
|
| 51 |
+
"<extra_id_48>",
|
| 52 |
+
"<extra_id_49>",
|
| 53 |
+
"<extra_id_50>",
|
| 54 |
+
"<extra_id_51>",
|
| 55 |
+
"<extra_id_52>",
|
| 56 |
+
"<extra_id_53>",
|
| 57 |
+
"<extra_id_54>",
|
| 58 |
+
"<extra_id_55>",
|
| 59 |
+
"<extra_id_56>",
|
| 60 |
+
"<extra_id_57>",
|
| 61 |
+
"<extra_id_58>",
|
| 62 |
+
"<extra_id_59>",
|
| 63 |
+
"<extra_id_60>",
|
| 64 |
+
"<extra_id_61>",
|
| 65 |
+
"<extra_id_62>",
|
| 66 |
+
"<extra_id_63>",
|
| 67 |
+
"<extra_id_64>",
|
| 68 |
+
"<extra_id_65>",
|
| 69 |
+
"<extra_id_66>",
|
| 70 |
+
"<extra_id_67>",
|
| 71 |
+
"<extra_id_68>",
|
| 72 |
+
"<extra_id_69>",
|
| 73 |
+
"<extra_id_70>",
|
| 74 |
+
"<extra_id_71>",
|
| 75 |
+
"<extra_id_72>",
|
| 76 |
+
"<extra_id_73>",
|
| 77 |
+
"<extra_id_74>",
|
| 78 |
+
"<extra_id_75>",
|
| 79 |
+
"<extra_id_76>",
|
| 80 |
+
"<extra_id_77>",
|
| 81 |
+
"<extra_id_78>",
|
| 82 |
+
"<extra_id_79>",
|
| 83 |
+
"<extra_id_80>",
|
| 84 |
+
"<extra_id_81>",
|
| 85 |
+
"<extra_id_82>",
|
| 86 |
+
"<extra_id_83>",
|
| 87 |
+
"<extra_id_84>",
|
| 88 |
+
"<extra_id_85>",
|
| 89 |
+
"<extra_id_86>",
|
| 90 |
+
"<extra_id_87>",
|
| 91 |
+
"<extra_id_88>",
|
| 92 |
+
"<extra_id_89>",
|
| 93 |
+
"<extra_id_90>",
|
| 94 |
+
"<extra_id_91>",
|
| 95 |
+
"<extra_id_92>",
|
| 96 |
+
"<extra_id_93>",
|
| 97 |
+
"<extra_id_94>",
|
| 98 |
+
"<extra_id_95>",
|
| 99 |
+
"<extra_id_96>",
|
| 100 |
+
"<extra_id_97>",
|
| 101 |
+
"<extra_id_98>",
|
| 102 |
+
"<extra_id_99>"
|
| 103 |
+
],
|
| 104 |
+
"eos_token": {
|
| 105 |
+
"content": "</s>",
|
| 106 |
+
"lstrip": false,
|
| 107 |
+
"normalized": false,
|
| 108 |
+
"rstrip": false,
|
| 109 |
+
"single_word": false
|
| 110 |
+
},
|
| 111 |
+
"pad_token": {
|
| 112 |
+
"content": "<pad>",
|
| 113 |
+
"lstrip": false,
|
| 114 |
+
"normalized": false,
|
| 115 |
+
"rstrip": false,
|
| 116 |
+
"single_word": false
|
| 117 |
+
},
|
| 118 |
+
"unk_token": {
|
| 119 |
+
"content": "<unk>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false
|
| 124 |
+
}
|
| 125 |
+
}
|
tokenizer_2/spiece.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
| 3 |
+
size 791656
|
tokenizer_2/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_2/tokenizer_config.json
ADDED
|
@@ -0,0 +1,940 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": true,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"0": {
|
| 5 |
+
"content": "<pad>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"1": {
|
| 13 |
+
"content": "</s>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"2": {
|
| 21 |
+
"content": "<unk>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"32000": {
|
| 29 |
+
"content": "<extra_id_99>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"32001": {
|
| 37 |
+
"content": "<extra_id_98>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"32002": {
|
| 45 |
+
"content": "<extra_id_97>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"32003": {
|
| 53 |
+
"content": "<extra_id_96>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"32004": {
|
| 61 |
+
"content": "<extra_id_95>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"32005": {
|
| 69 |
+
"content": "<extra_id_94>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"32006": {
|
| 77 |
+
"content": "<extra_id_93>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"32007": {
|
| 85 |
+
"content": "<extra_id_92>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"32008": {
|
| 93 |
+
"content": "<extra_id_91>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"32009": {
|
| 101 |
+
"content": "<extra_id_90>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"32010": {
|
| 109 |
+
"content": "<extra_id_89>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"32011": {
|
| 117 |
+
"content": "<extra_id_88>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"32012": {
|
| 125 |
+
"content": "<extra_id_87>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"32013": {
|
| 133 |
+
"content": "<extra_id_86>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"32014": {
|
| 141 |
+
"content": "<extra_id_85>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"32015": {
|
| 149 |
+
"content": "<extra_id_84>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"32016": {
|
| 157 |
+
"content": "<extra_id_83>",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"32017": {
|
| 165 |
+
"content": "<extra_id_82>",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"32018": {
|
| 173 |
+
"content": "<extra_id_81>",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"32019": {
|
| 181 |
+
"content": "<extra_id_80>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"32020": {
|
| 189 |
+
"content": "<extra_id_79>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
},
|
| 196 |
+
"32021": {
|
| 197 |
+
"content": "<extra_id_78>",
|
| 198 |
+
"lstrip": false,
|
| 199 |
+
"normalized": false,
|
| 200 |
+
"rstrip": false,
|
| 201 |
+
"single_word": false,
|
| 202 |
+
"special": true
|
| 203 |
+
},
|
| 204 |
+
"32022": {
|
| 205 |
+
"content": "<extra_id_77>",
|
| 206 |
+
"lstrip": false,
|
| 207 |
+
"normalized": false,
|
| 208 |
+
"rstrip": false,
|
| 209 |
+
"single_word": false,
|
| 210 |
+
"special": true
|
| 211 |
+
},
|
| 212 |
+
"32023": {
|
| 213 |
+
"content": "<extra_id_76>",
|
| 214 |
+
"lstrip": false,
|
| 215 |
+
"normalized": false,
|
| 216 |
+
"rstrip": false,
|
| 217 |
+
"single_word": false,
|
| 218 |
+
"special": true
|
| 219 |
+
},
|
| 220 |
+
"32024": {
|
| 221 |
+
"content": "<extra_id_75>",
|
| 222 |
+
"lstrip": false,
|
| 223 |
+
"normalized": false,
|
| 224 |
+
"rstrip": false,
|
| 225 |
+
"single_word": false,
|
| 226 |
+
"special": true
|
| 227 |
+
},
|
| 228 |
+
"32025": {
|
| 229 |
+
"content": "<extra_id_74>",
|
| 230 |
+
"lstrip": false,
|
| 231 |
+
"normalized": false,
|
| 232 |
+
"rstrip": false,
|
| 233 |
+
"single_word": false,
|
| 234 |
+
"special": true
|
| 235 |
+
},
|
| 236 |
+
"32026": {
|
| 237 |
+
"content": "<extra_id_73>",
|
| 238 |
+
"lstrip": false,
|
| 239 |
+
"normalized": false,
|
| 240 |
+
"rstrip": false,
|
| 241 |
+
"single_word": false,
|
| 242 |
+
"special": true
|
| 243 |
+
},
|
| 244 |
+
"32027": {
|
| 245 |
+
"content": "<extra_id_72>",
|
| 246 |
+
"lstrip": false,
|
| 247 |
+
"normalized": false,
|
| 248 |
+
"rstrip": false,
|
| 249 |
+
"single_word": false,
|
| 250 |
+
"special": true
|
| 251 |
+
},
|
| 252 |
+
"32028": {
|
| 253 |
+
"content": "<extra_id_71>",
|
| 254 |
+
"lstrip": false,
|
| 255 |
+
"normalized": false,
|
| 256 |
+
"rstrip": false,
|
| 257 |
+
"single_word": false,
|
| 258 |
+
"special": true
|
| 259 |
+
},
|
| 260 |
+
"32029": {
|
| 261 |
+
"content": "<extra_id_70>",
|
| 262 |
+
"lstrip": false,
|
| 263 |
+
"normalized": false,
|
| 264 |
+
"rstrip": false,
|
| 265 |
+
"single_word": false,
|
| 266 |
+
"special": true
|
| 267 |
+
},
|
| 268 |
+
"32030": {
|
| 269 |
+
"content": "<extra_id_69>",
|
| 270 |
+
"lstrip": false,
|
| 271 |
+
"normalized": false,
|
| 272 |
+
"rstrip": false,
|
| 273 |
+
"single_word": false,
|
| 274 |
+
"special": true
|
| 275 |
+
},
|
| 276 |
+
"32031": {
|
| 277 |
+
"content": "<extra_id_68>",
|
| 278 |
+
"lstrip": false,
|
| 279 |
+
"normalized": false,
|
| 280 |
+
"rstrip": false,
|
| 281 |
+
"single_word": false,
|
| 282 |
+
"special": true
|
| 283 |
+
},
|
| 284 |
+
"32032": {
|
| 285 |
+
"content": "<extra_id_67>",
|
| 286 |
+
"lstrip": false,
|
| 287 |
+
"normalized": false,
|
| 288 |
+
"rstrip": false,
|
| 289 |
+
"single_word": false,
|
| 290 |
+
"special": true
|
| 291 |
+
},
|
| 292 |
+
"32033": {
|
| 293 |
+
"content": "<extra_id_66>",
|
| 294 |
+
"lstrip": false,
|
| 295 |
+
"normalized": false,
|
| 296 |
+
"rstrip": false,
|
| 297 |
+
"single_word": false,
|
| 298 |
+
"special": true
|
| 299 |
+
},
|
| 300 |
+
"32034": {
|
| 301 |
+
"content": "<extra_id_65>",
|
| 302 |
+
"lstrip": false,
|
| 303 |
+
"normalized": false,
|
| 304 |
+
"rstrip": false,
|
| 305 |
+
"single_word": false,
|
| 306 |
+
"special": true
|
| 307 |
+
},
|
| 308 |
+
"32035": {
|
| 309 |
+
"content": "<extra_id_64>",
|
| 310 |
+
"lstrip": false,
|
| 311 |
+
"normalized": false,
|
| 312 |
+
"rstrip": false,
|
| 313 |
+
"single_word": false,
|
| 314 |
+
"special": true
|
| 315 |
+
},
|
| 316 |
+
"32036": {
|
| 317 |
+
"content": "<extra_id_63>",
|
| 318 |
+
"lstrip": false,
|
| 319 |
+
"normalized": false,
|
| 320 |
+
"rstrip": false,
|
| 321 |
+
"single_word": false,
|
| 322 |
+
"special": true
|
| 323 |
+
},
|
| 324 |
+
"32037": {
|
| 325 |
+
"content": "<extra_id_62>",
|
| 326 |
+
"lstrip": false,
|
| 327 |
+
"normalized": false,
|
| 328 |
+
"rstrip": false,
|
| 329 |
+
"single_word": false,
|
| 330 |
+
"special": true
|
| 331 |
+
},
|
| 332 |
+
"32038": {
|
| 333 |
+
"content": "<extra_id_61>",
|
| 334 |
+
"lstrip": false,
|
| 335 |
+
"normalized": false,
|
| 336 |
+
"rstrip": false,
|
| 337 |
+
"single_word": false,
|
| 338 |
+
"special": true
|
| 339 |
+
},
|
| 340 |
+
"32039": {
|
| 341 |
+
"content": "<extra_id_60>",
|
| 342 |
+
"lstrip": false,
|
| 343 |
+
"normalized": false,
|
| 344 |
+
"rstrip": false,
|
| 345 |
+
"single_word": false,
|
| 346 |
+
"special": true
|
| 347 |
+
},
|
| 348 |
+
"32040": {
|
| 349 |
+
"content": "<extra_id_59>",
|
| 350 |
+
"lstrip": false,
|
| 351 |
+
"normalized": false,
|
| 352 |
+
"rstrip": false,
|
| 353 |
+
"single_word": false,
|
| 354 |
+
"special": true
|
| 355 |
+
},
|
| 356 |
+
"32041": {
|
| 357 |
+
"content": "<extra_id_58>",
|
| 358 |
+
"lstrip": false,
|
| 359 |
+
"normalized": false,
|
| 360 |
+
"rstrip": false,
|
| 361 |
+
"single_word": false,
|
| 362 |
+
"special": true
|
| 363 |
+
},
|
| 364 |
+
"32042": {
|
| 365 |
+
"content": "<extra_id_57>",
|
| 366 |
+
"lstrip": false,
|
| 367 |
+
"normalized": false,
|
| 368 |
+
"rstrip": false,
|
| 369 |
+
"single_word": false,
|
| 370 |
+
"special": true
|
| 371 |
+
},
|
| 372 |
+
"32043": {
|
| 373 |
+
"content": "<extra_id_56>",
|
| 374 |
+
"lstrip": false,
|
| 375 |
+
"normalized": false,
|
| 376 |
+
"rstrip": false,
|
| 377 |
+
"single_word": false,
|
| 378 |
+
"special": true
|
| 379 |
+
},
|
| 380 |
+
"32044": {
|
| 381 |
+
"content": "<extra_id_55>",
|
| 382 |
+
"lstrip": false,
|
| 383 |
+
"normalized": false,
|
| 384 |
+
"rstrip": false,
|
| 385 |
+
"single_word": false,
|
| 386 |
+
"special": true
|
| 387 |
+
},
|
| 388 |
+
"32045": {
|
| 389 |
+
"content": "<extra_id_54>",
|
| 390 |
+
"lstrip": false,
|
| 391 |
+
"normalized": false,
|
| 392 |
+
"rstrip": false,
|
| 393 |
+
"single_word": false,
|
| 394 |
+
"special": true
|
| 395 |
+
},
|
| 396 |
+
"32046": {
|
| 397 |
+
"content": "<extra_id_53>",
|
| 398 |
+
"lstrip": false,
|
| 399 |
+
"normalized": false,
|
| 400 |
+
"rstrip": false,
|
| 401 |
+
"single_word": false,
|
| 402 |
+
"special": true
|
| 403 |
+
},
|
| 404 |
+
"32047": {
|
| 405 |
+
"content": "<extra_id_52>",
|
| 406 |
+
"lstrip": false,
|
| 407 |
+
"normalized": false,
|
| 408 |
+
"rstrip": false,
|
| 409 |
+
"single_word": false,
|
| 410 |
+
"special": true
|
| 411 |
+
},
|
| 412 |
+
"32048": {
|
| 413 |
+
"content": "<extra_id_51>",
|
| 414 |
+
"lstrip": false,
|
| 415 |
+
"normalized": false,
|
| 416 |
+
"rstrip": false,
|
| 417 |
+
"single_word": false,
|
| 418 |
+
"special": true
|
| 419 |
+
},
|
| 420 |
+
"32049": {
|
| 421 |
+
"content": "<extra_id_50>",
|
| 422 |
+
"lstrip": false,
|
| 423 |
+
"normalized": false,
|
| 424 |
+
"rstrip": false,
|
| 425 |
+
"single_word": false,
|
| 426 |
+
"special": true
|
| 427 |
+
},
|
| 428 |
+
"32050": {
|
| 429 |
+
"content": "<extra_id_49>",
|
| 430 |
+
"lstrip": false,
|
| 431 |
+
"normalized": false,
|
| 432 |
+
"rstrip": false,
|
| 433 |
+
"single_word": false,
|
| 434 |
+
"special": true
|
| 435 |
+
},
|
| 436 |
+
"32051": {
|
| 437 |
+
"content": "<extra_id_48>",
|
| 438 |
+
"lstrip": false,
|
| 439 |
+
"normalized": false,
|
| 440 |
+
"rstrip": false,
|
| 441 |
+
"single_word": false,
|
| 442 |
+
"special": true
|
| 443 |
+
},
|
| 444 |
+
"32052": {
|
| 445 |
+
"content": "<extra_id_47>",
|
| 446 |
+
"lstrip": false,
|
| 447 |
+
"normalized": false,
|
| 448 |
+
"rstrip": false,
|
| 449 |
+
"single_word": false,
|
| 450 |
+
"special": true
|
| 451 |
+
},
|
| 452 |
+
"32053": {
|
| 453 |
+
"content": "<extra_id_46>",
|
| 454 |
+
"lstrip": false,
|
| 455 |
+
"normalized": false,
|
| 456 |
+
"rstrip": false,
|
| 457 |
+
"single_word": false,
|
| 458 |
+
"special": true
|
| 459 |
+
},
|
| 460 |
+
"32054": {
|
| 461 |
+
"content": "<extra_id_45>",
|
| 462 |
+
"lstrip": false,
|
| 463 |
+
"normalized": false,
|
| 464 |
+
"rstrip": false,
|
| 465 |
+
"single_word": false,
|
| 466 |
+
"special": true
|
| 467 |
+
},
|
| 468 |
+
"32055": {
|
| 469 |
+
"content": "<extra_id_44>",
|
| 470 |
+
"lstrip": false,
|
| 471 |
+
"normalized": false,
|
| 472 |
+
"rstrip": false,
|
| 473 |
+
"single_word": false,
|
| 474 |
+
"special": true
|
| 475 |
+
},
|
| 476 |
+
"32056": {
|
| 477 |
+
"content": "<extra_id_43>",
|
| 478 |
+
"lstrip": false,
|
| 479 |
+
"normalized": false,
|
| 480 |
+
"rstrip": false,
|
| 481 |
+
"single_word": false,
|
| 482 |
+
"special": true
|
| 483 |
+
},
|
| 484 |
+
"32057": {
|
| 485 |
+
"content": "<extra_id_42>",
|
| 486 |
+
"lstrip": false,
|
| 487 |
+
"normalized": false,
|
| 488 |
+
"rstrip": false,
|
| 489 |
+
"single_word": false,
|
| 490 |
+
"special": true
|
| 491 |
+
},
|
| 492 |
+
"32058": {
|
| 493 |
+
"content": "<extra_id_41>",
|
| 494 |
+
"lstrip": false,
|
| 495 |
+
"normalized": false,
|
| 496 |
+
"rstrip": false,
|
| 497 |
+
"single_word": false,
|
| 498 |
+
"special": true
|
| 499 |
+
},
|
| 500 |
+
"32059": {
|
| 501 |
+
"content": "<extra_id_40>",
|
| 502 |
+
"lstrip": false,
|
| 503 |
+
"normalized": false,
|
| 504 |
+
"rstrip": false,
|
| 505 |
+
"single_word": false,
|
| 506 |
+
"special": true
|
| 507 |
+
},
|
| 508 |
+
"32060": {
|
| 509 |
+
"content": "<extra_id_39>",
|
| 510 |
+
"lstrip": false,
|
| 511 |
+
"normalized": false,
|
| 512 |
+
"rstrip": false,
|
| 513 |
+
"single_word": false,
|
| 514 |
+
"special": true
|
| 515 |
+
},
|
| 516 |
+
"32061": {
|
| 517 |
+
"content": "<extra_id_38>",
|
| 518 |
+
"lstrip": false,
|
| 519 |
+
"normalized": false,
|
| 520 |
+
"rstrip": false,
|
| 521 |
+
"single_word": false,
|
| 522 |
+
"special": true
|
| 523 |
+
},
|
| 524 |
+
"32062": {
|
| 525 |
+
"content": "<extra_id_37>",
|
| 526 |
+
"lstrip": false,
|
| 527 |
+
"normalized": false,
|
| 528 |
+
"rstrip": false,
|
| 529 |
+
"single_word": false,
|
| 530 |
+
"special": true
|
| 531 |
+
},
|
| 532 |
+
"32063": {
|
| 533 |
+
"content": "<extra_id_36>",
|
| 534 |
+
"lstrip": false,
|
| 535 |
+
"normalized": false,
|
| 536 |
+
"rstrip": false,
|
| 537 |
+
"single_word": false,
|
| 538 |
+
"special": true
|
| 539 |
+
},
|
| 540 |
+
"32064": {
|
| 541 |
+
"content": "<extra_id_35>",
|
| 542 |
+
"lstrip": false,
|
| 543 |
+
"normalized": false,
|
| 544 |
+
"rstrip": false,
|
| 545 |
+
"single_word": false,
|
| 546 |
+
"special": true
|
| 547 |
+
},
|
| 548 |
+
"32065": {
|
| 549 |
+
"content": "<extra_id_34>",
|
| 550 |
+
"lstrip": false,
|
| 551 |
+
"normalized": false,
|
| 552 |
+
"rstrip": false,
|
| 553 |
+
"single_word": false,
|
| 554 |
+
"special": true
|
| 555 |
+
},
|
| 556 |
+
"32066": {
|
| 557 |
+
"content": "<extra_id_33>",
|
| 558 |
+
"lstrip": false,
|
| 559 |
+
"normalized": false,
|
| 560 |
+
"rstrip": false,
|
| 561 |
+
"single_word": false,
|
| 562 |
+
"special": true
|
| 563 |
+
},
|
| 564 |
+
"32067": {
|
| 565 |
+
"content": "<extra_id_32>",
|
| 566 |
+
"lstrip": false,
|
| 567 |
+
"normalized": false,
|
| 568 |
+
"rstrip": false,
|
| 569 |
+
"single_word": false,
|
| 570 |
+
"special": true
|
| 571 |
+
},
|
| 572 |
+
"32068": {
|
| 573 |
+
"content": "<extra_id_31>",
|
| 574 |
+
"lstrip": false,
|
| 575 |
+
"normalized": false,
|
| 576 |
+
"rstrip": false,
|
| 577 |
+
"single_word": false,
|
| 578 |
+
"special": true
|
| 579 |
+
},
|
| 580 |
+
"32069": {
|
| 581 |
+
"content": "<extra_id_30>",
|
| 582 |
+
"lstrip": false,
|
| 583 |
+
"normalized": false,
|
| 584 |
+
"rstrip": false,
|
| 585 |
+
"single_word": false,
|
| 586 |
+
"special": true
|
| 587 |
+
},
|
| 588 |
+
"32070": {
|
| 589 |
+
"content": "<extra_id_29>",
|
| 590 |
+
"lstrip": false,
|
| 591 |
+
"normalized": false,
|
| 592 |
+
"rstrip": false,
|
| 593 |
+
"single_word": false,
|
| 594 |
+
"special": true
|
| 595 |
+
},
|
| 596 |
+
"32071": {
|
| 597 |
+
"content": "<extra_id_28>",
|
| 598 |
+
"lstrip": false,
|
| 599 |
+
"normalized": false,
|
| 600 |
+
"rstrip": false,
|
| 601 |
+
"single_word": false,
|
| 602 |
+
"special": true
|
| 603 |
+
},
|
| 604 |
+
"32072": {
|
| 605 |
+
"content": "<extra_id_27>",
|
| 606 |
+
"lstrip": false,
|
| 607 |
+
"normalized": false,
|
| 608 |
+
"rstrip": false,
|
| 609 |
+
"single_word": false,
|
| 610 |
+
"special": true
|
| 611 |
+
},
|
| 612 |
+
"32073": {
|
| 613 |
+
"content": "<extra_id_26>",
|
| 614 |
+
"lstrip": false,
|
| 615 |
+
"normalized": false,
|
| 616 |
+
"rstrip": false,
|
| 617 |
+
"single_word": false,
|
| 618 |
+
"special": true
|
| 619 |
+
},
|
| 620 |
+
"32074": {
|
| 621 |
+
"content": "<extra_id_25>",
|
| 622 |
+
"lstrip": false,
|
| 623 |
+
"normalized": false,
|
| 624 |
+
"rstrip": false,
|
| 625 |
+
"single_word": false,
|
| 626 |
+
"special": true
|
| 627 |
+
},
|
| 628 |
+
"32075": {
|
| 629 |
+
"content": "<extra_id_24>",
|
| 630 |
+
"lstrip": false,
|
| 631 |
+
"normalized": false,
|
| 632 |
+
"rstrip": false,
|
| 633 |
+
"single_word": false,
|
| 634 |
+
"special": true
|
| 635 |
+
},
|
| 636 |
+
"32076": {
|
| 637 |
+
"content": "<extra_id_23>",
|
| 638 |
+
"lstrip": false,
|
| 639 |
+
"normalized": false,
|
| 640 |
+
"rstrip": false,
|
| 641 |
+
"single_word": false,
|
| 642 |
+
"special": true
|
| 643 |
+
},
|
| 644 |
+
"32077": {
|
| 645 |
+
"content": "<extra_id_22>",
|
| 646 |
+
"lstrip": false,
|
| 647 |
+
"normalized": false,
|
| 648 |
+
"rstrip": false,
|
| 649 |
+
"single_word": false,
|
| 650 |
+
"special": true
|
| 651 |
+
},
|
| 652 |
+
"32078": {
|
| 653 |
+
"content": "<extra_id_21>",
|
| 654 |
+
"lstrip": false,
|
| 655 |
+
"normalized": false,
|
| 656 |
+
"rstrip": false,
|
| 657 |
+
"single_word": false,
|
| 658 |
+
"special": true
|
| 659 |
+
},
|
| 660 |
+
"32079": {
|
| 661 |
+
"content": "<extra_id_20>",
|
| 662 |
+
"lstrip": false,
|
| 663 |
+
"normalized": false,
|
| 664 |
+
"rstrip": false,
|
| 665 |
+
"single_word": false,
|
| 666 |
+
"special": true
|
| 667 |
+
},
|
| 668 |
+
"32080": {
|
| 669 |
+
"content": "<extra_id_19>",
|
| 670 |
+
"lstrip": false,
|
| 671 |
+
"normalized": false,
|
| 672 |
+
"rstrip": false,
|
| 673 |
+
"single_word": false,
|
| 674 |
+
"special": true
|
| 675 |
+
},
|
| 676 |
+
"32081": {
|
| 677 |
+
"content": "<extra_id_18>",
|
| 678 |
+
"lstrip": false,
|
| 679 |
+
"normalized": false,
|
| 680 |
+
"rstrip": false,
|
| 681 |
+
"single_word": false,
|
| 682 |
+
"special": true
|
| 683 |
+
},
|
| 684 |
+
"32082": {
|
| 685 |
+
"content": "<extra_id_17>",
|
| 686 |
+
"lstrip": false,
|
| 687 |
+
"normalized": false,
|
| 688 |
+
"rstrip": false,
|
| 689 |
+
"single_word": false,
|
| 690 |
+
"special": true
|
| 691 |
+
},
|
| 692 |
+
"32083": {
|
| 693 |
+
"content": "<extra_id_16>",
|
| 694 |
+
"lstrip": false,
|
| 695 |
+
"normalized": false,
|
| 696 |
+
"rstrip": false,
|
| 697 |
+
"single_word": false,
|
| 698 |
+
"special": true
|
| 699 |
+
},
|
| 700 |
+
"32084": {
|
| 701 |
+
"content": "<extra_id_15>",
|
| 702 |
+
"lstrip": false,
|
| 703 |
+
"normalized": false,
|
| 704 |
+
"rstrip": false,
|
| 705 |
+
"single_word": false,
|
| 706 |
+
"special": true
|
| 707 |
+
},
|
| 708 |
+
"32085": {
|
| 709 |
+
"content": "<extra_id_14>",
|
| 710 |
+
"lstrip": false,
|
| 711 |
+
"normalized": false,
|
| 712 |
+
"rstrip": false,
|
| 713 |
+
"single_word": false,
|
| 714 |
+
"special": true
|
| 715 |
+
},
|
| 716 |
+
"32086": {
|
| 717 |
+
"content": "<extra_id_13>",
|
| 718 |
+
"lstrip": false,
|
| 719 |
+
"normalized": false,
|
| 720 |
+
"rstrip": false,
|
| 721 |
+
"single_word": false,
|
| 722 |
+
"special": true
|
| 723 |
+
},
|
| 724 |
+
"32087": {
|
| 725 |
+
"content": "<extra_id_12>",
|
| 726 |
+
"lstrip": false,
|
| 727 |
+
"normalized": false,
|
| 728 |
+
"rstrip": false,
|
| 729 |
+
"single_word": false,
|
| 730 |
+
"special": true
|
| 731 |
+
},
|
| 732 |
+
"32088": {
|
| 733 |
+
"content": "<extra_id_11>",
|
| 734 |
+
"lstrip": false,
|
| 735 |
+
"normalized": false,
|
| 736 |
+
"rstrip": false,
|
| 737 |
+
"single_word": false,
|
| 738 |
+
"special": true
|
| 739 |
+
},
|
| 740 |
+
"32089": {
|
| 741 |
+
"content": "<extra_id_10>",
|
| 742 |
+
"lstrip": false,
|
| 743 |
+
"normalized": false,
|
| 744 |
+
"rstrip": false,
|
| 745 |
+
"single_word": false,
|
| 746 |
+
"special": true
|
| 747 |
+
},
|
| 748 |
+
"32090": {
|
| 749 |
+
"content": "<extra_id_9>",
|
| 750 |
+
"lstrip": false,
|
| 751 |
+
"normalized": false,
|
| 752 |
+
"rstrip": false,
|
| 753 |
+
"single_word": false,
|
| 754 |
+
"special": true
|
| 755 |
+
},
|
| 756 |
+
"32091": {
|
| 757 |
+
"content": "<extra_id_8>",
|
| 758 |
+
"lstrip": false,
|
| 759 |
+
"normalized": false,
|
| 760 |
+
"rstrip": false,
|
| 761 |
+
"single_word": false,
|
| 762 |
+
"special": true
|
| 763 |
+
},
|
| 764 |
+
"32092": {
|
| 765 |
+
"content": "<extra_id_7>",
|
| 766 |
+
"lstrip": false,
|
| 767 |
+
"normalized": false,
|
| 768 |
+
"rstrip": false,
|
| 769 |
+
"single_word": false,
|
| 770 |
+
"special": true
|
| 771 |
+
},
|
| 772 |
+
"32093": {
|
| 773 |
+
"content": "<extra_id_6>",
|
| 774 |
+
"lstrip": false,
|
| 775 |
+
"normalized": false,
|
| 776 |
+
"rstrip": false,
|
| 777 |
+
"single_word": false,
|
| 778 |
+
"special": true
|
| 779 |
+
},
|
| 780 |
+
"32094": {
|
| 781 |
+
"content": "<extra_id_5>",
|
| 782 |
+
"lstrip": false,
|
| 783 |
+
"normalized": false,
|
| 784 |
+
"rstrip": false,
|
| 785 |
+
"single_word": false,
|
| 786 |
+
"special": true
|
| 787 |
+
},
|
| 788 |
+
"32095": {
|
| 789 |
+
"content": "<extra_id_4>",
|
| 790 |
+
"lstrip": false,
|
| 791 |
+
"normalized": false,
|
| 792 |
+
"rstrip": false,
|
| 793 |
+
"single_word": false,
|
| 794 |
+
"special": true
|
| 795 |
+
},
|
| 796 |
+
"32096": {
|
| 797 |
+
"content": "<extra_id_3>",
|
| 798 |
+
"lstrip": false,
|
| 799 |
+
"normalized": false,
|
| 800 |
+
"rstrip": false,
|
| 801 |
+
"single_word": false,
|
| 802 |
+
"special": true
|
| 803 |
+
},
|
| 804 |
+
"32097": {
|
| 805 |
+
"content": "<extra_id_2>",
|
| 806 |
+
"lstrip": false,
|
| 807 |
+
"normalized": false,
|
| 808 |
+
"rstrip": false,
|
| 809 |
+
"single_word": false,
|
| 810 |
+
"special": true
|
| 811 |
+
},
|
| 812 |
+
"32098": {
|
| 813 |
+
"content": "<extra_id_1>",
|
| 814 |
+
"lstrip": false,
|
| 815 |
+
"normalized": false,
|
| 816 |
+
"rstrip": false,
|
| 817 |
+
"single_word": false,
|
| 818 |
+
"special": true
|
| 819 |
+
},
|
| 820 |
+
"32099": {
|
| 821 |
+
"content": "<extra_id_0>",
|
| 822 |
+
"lstrip": false,
|
| 823 |
+
"normalized": false,
|
| 824 |
+
"rstrip": false,
|
| 825 |
+
"single_word": false,
|
| 826 |
+
"special": true
|
| 827 |
+
}
|
| 828 |
+
},
|
| 829 |
+
"additional_special_tokens": [
|
| 830 |
+
"<extra_id_0>",
|
| 831 |
+
"<extra_id_1>",
|
| 832 |
+
"<extra_id_2>",
|
| 833 |
+
"<extra_id_3>",
|
| 834 |
+
"<extra_id_4>",
|
| 835 |
+
"<extra_id_5>",
|
| 836 |
+
"<extra_id_6>",
|
| 837 |
+
"<extra_id_7>",
|
| 838 |
+
"<extra_id_8>",
|
| 839 |
+
"<extra_id_9>",
|
| 840 |
+
"<extra_id_10>",
|
| 841 |
+
"<extra_id_11>",
|
| 842 |
+
"<extra_id_12>",
|
| 843 |
+
"<extra_id_13>",
|
| 844 |
+
"<extra_id_14>",
|
| 845 |
+
"<extra_id_15>",
|
| 846 |
+
"<extra_id_16>",
|
| 847 |
+
"<extra_id_17>",
|
| 848 |
+
"<extra_id_18>",
|
| 849 |
+
"<extra_id_19>",
|
| 850 |
+
"<extra_id_20>",
|
| 851 |
+
"<extra_id_21>",
|
| 852 |
+
"<extra_id_22>",
|
| 853 |
+
"<extra_id_23>",
|
| 854 |
+
"<extra_id_24>",
|
| 855 |
+
"<extra_id_25>",
|
| 856 |
+
"<extra_id_26>",
|
| 857 |
+
"<extra_id_27>",
|
| 858 |
+
"<extra_id_28>",
|
| 859 |
+
"<extra_id_29>",
|
| 860 |
+
"<extra_id_30>",
|
| 861 |
+
"<extra_id_31>",
|
| 862 |
+
"<extra_id_32>",
|
| 863 |
+
"<extra_id_33>",
|
| 864 |
+
"<extra_id_34>",
|
| 865 |
+
"<extra_id_35>",
|
| 866 |
+
"<extra_id_36>",
|
| 867 |
+
"<extra_id_37>",
|
| 868 |
+
"<extra_id_38>",
|
| 869 |
+
"<extra_id_39>",
|
| 870 |
+
"<extra_id_40>",
|
| 871 |
+
"<extra_id_41>",
|
| 872 |
+
"<extra_id_42>",
|
| 873 |
+
"<extra_id_43>",
|
| 874 |
+
"<extra_id_44>",
|
| 875 |
+
"<extra_id_45>",
|
| 876 |
+
"<extra_id_46>",
|
| 877 |
+
"<extra_id_47>",
|
| 878 |
+
"<extra_id_48>",
|
| 879 |
+
"<extra_id_49>",
|
| 880 |
+
"<extra_id_50>",
|
| 881 |
+
"<extra_id_51>",
|
| 882 |
+
"<extra_id_52>",
|
| 883 |
+
"<extra_id_53>",
|
| 884 |
+
"<extra_id_54>",
|
| 885 |
+
"<extra_id_55>",
|
| 886 |
+
"<extra_id_56>",
|
| 887 |
+
"<extra_id_57>",
|
| 888 |
+
"<extra_id_58>",
|
| 889 |
+
"<extra_id_59>",
|
| 890 |
+
"<extra_id_60>",
|
| 891 |
+
"<extra_id_61>",
|
| 892 |
+
"<extra_id_62>",
|
| 893 |
+
"<extra_id_63>",
|
| 894 |
+
"<extra_id_64>",
|
| 895 |
+
"<extra_id_65>",
|
| 896 |
+
"<extra_id_66>",
|
| 897 |
+
"<extra_id_67>",
|
| 898 |
+
"<extra_id_68>",
|
| 899 |
+
"<extra_id_69>",
|
| 900 |
+
"<extra_id_70>",
|
| 901 |
+
"<extra_id_71>",
|
| 902 |
+
"<extra_id_72>",
|
| 903 |
+
"<extra_id_73>",
|
| 904 |
+
"<extra_id_74>",
|
| 905 |
+
"<extra_id_75>",
|
| 906 |
+
"<extra_id_76>",
|
| 907 |
+
"<extra_id_77>",
|
| 908 |
+
"<extra_id_78>",
|
| 909 |
+
"<extra_id_79>",
|
| 910 |
+
"<extra_id_80>",
|
| 911 |
+
"<extra_id_81>",
|
| 912 |
+
"<extra_id_82>",
|
| 913 |
+
"<extra_id_83>",
|
| 914 |
+
"<extra_id_84>",
|
| 915 |
+
"<extra_id_85>",
|
| 916 |
+
"<extra_id_86>",
|
| 917 |
+
"<extra_id_87>",
|
| 918 |
+
"<extra_id_88>",
|
| 919 |
+
"<extra_id_89>",
|
| 920 |
+
"<extra_id_90>",
|
| 921 |
+
"<extra_id_91>",
|
| 922 |
+
"<extra_id_92>",
|
| 923 |
+
"<extra_id_93>",
|
| 924 |
+
"<extra_id_94>",
|
| 925 |
+
"<extra_id_95>",
|
| 926 |
+
"<extra_id_96>",
|
| 927 |
+
"<extra_id_97>",
|
| 928 |
+
"<extra_id_98>",
|
| 929 |
+
"<extra_id_99>"
|
| 930 |
+
],
|
| 931 |
+
"clean_up_tokenization_spaces": true,
|
| 932 |
+
"eos_token": "</s>",
|
| 933 |
+
"extra_ids": 100,
|
| 934 |
+
"legacy": true,
|
| 935 |
+
"model_max_length": 512,
|
| 936 |
+
"pad_token": "<pad>",
|
| 937 |
+
"sp_model_kwargs": {},
|
| 938 |
+
"tokenizer_class": "T5Tokenizer",
|
| 939 |
+
"unk_token": "<unk>"
|
| 940 |
+
}
|
transformer/config.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "FluxTransformer2DModel",
|
| 3 |
+
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
+
"attention_head_dim": 128,
|
| 5 |
+
"axes_dims_rope": [
|
| 6 |
+
16,
|
| 7 |
+
56,
|
| 8 |
+
56
|
| 9 |
+
],
|
| 10 |
+
"guidance_embeds": false,
|
| 11 |
+
"in_channels": 64,
|
| 12 |
+
"joint_attention_dim": 4096,
|
| 13 |
+
"num_attention_heads": 24,
|
| 14 |
+
"num_layers": 19,
|
| 15 |
+
"num_single_layers": 38,
|
| 16 |
+
"patch_size": 1,
|
| 17 |
+
"pooled_projection_dim": 768
|
| 18 |
+
}
|
transformer/diffusion_pytorch_model-00001-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8a99f3d0262748309658a6fa5371e5a7557f888367a32a2dd4d5616475c85fd8
|
| 3 |
+
size 9962580296
|
transformer/diffusion_pytorch_model-00002-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb9948ec50b4fed2fa4d651101bc024502d74dfa1028a75778eb07a6992ebcfe
|
| 3 |
+
size 9949328904
|
transformer/diffusion_pytorch_model-00003-of-00003.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d1e825e357857d6621ffcda32b56ee911d2bbfa2869af6b36afacd45eae69fa
|
| 3 |
+
size 3870584832
|
transformer/diffusion_pytorch_model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vae/config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "AutoencoderKL",
|
| 3 |
+
"_diffusers_version": "0.30.0.dev0",
|
| 4 |
+
"_name_or_path": "../checkpoints/flux-dev",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"block_out_channels": [
|
| 7 |
+
128,
|
| 8 |
+
256,
|
| 9 |
+
512,
|
| 10 |
+
512
|
| 11 |
+
],
|
| 12 |
+
"down_block_types": [
|
| 13 |
+
"DownEncoderBlock2D",
|
| 14 |
+
"DownEncoderBlock2D",
|
| 15 |
+
"DownEncoderBlock2D",
|
| 16 |
+
"DownEncoderBlock2D"
|
| 17 |
+
],
|
| 18 |
+
"force_upcast": true,
|
| 19 |
+
"in_channels": 3,
|
| 20 |
+
"latent_channels": 16,
|
| 21 |
+
"latents_mean": null,
|
| 22 |
+
"latents_std": null,
|
| 23 |
+
"layers_per_block": 2,
|
| 24 |
+
"mid_block_add_attention": true,
|
| 25 |
+
"norm_num_groups": 32,
|
| 26 |
+
"out_channels": 3,
|
| 27 |
+
"sample_size": 1024,
|
| 28 |
+
"scaling_factor": 0.3611,
|
| 29 |
+
"shift_factor": 0.1159,
|
| 30 |
+
"up_block_types": [
|
| 31 |
+
"UpDecoderBlock2D",
|
| 32 |
+
"UpDecoderBlock2D",
|
| 33 |
+
"UpDecoderBlock2D",
|
| 34 |
+
"UpDecoderBlock2D"
|
| 35 |
+
],
|
| 36 |
+
"use_post_quant_conv": false,
|
| 37 |
+
"use_quant_conv": false
|
| 38 |
+
}
|
vae/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3
|
| 3 |
+
size 167666902
|