Upload 159 files
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- Hugging Face.Quantization Fundamentals/0_Introduction_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/1_Handling Big Models_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/2_Data Types and Sizes_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/3_Loading Models by data type_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/4_Quantization Theory_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/5_Quantization of LLMs_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/6_Conclusion_HFQF.mp4 +3 -0
- Hugging Face.Quantization Fundamentals/Materials/2_Data Types and Sizes_L2_data_types.ipynb +393 -0
- Hugging Face.Quantization Fundamentals/Materials/2_Data Types and Sizes_requirements.txt +12 -0
- Hugging Face.Quantization Fundamentals/Materials/3_Loading Models by data type_L3_models_with_different_data_types.ipynb +1516 -0
- Hugging Face.Quantization Fundamentals/Materials/3_Loading Models by data type_helper.py +74 -0
- Hugging Face.Quantization Fundamentals/Materials/4_Quantization Theory_L4_quantization_theory.ipynb +371 -0
- Hugging Face.Quantization Fundamentals/Materials/4_Quantization Theory_helper.py +44 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/README.md +166 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/config.json +459 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/preprocessor_config.json +24 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/pytorch_model.bin +3 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/README.md +152 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/config.json +169 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/preprocessor_config.json +17 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/pytorch_model.bin +3 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/special_tokens_map.json +7 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/tokenizer.json +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/tokenizer_config.json +21 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/vocab.txt +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/README.md +125 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/config.json +169 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/preprocessor_config.json +25 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/pytorch_model.bin +3 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/special_tokens_map.json +7 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/tokenizer.json +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/tokenizer_config.json +17 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/vocab.txt +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/README.md +129 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/config.json +169 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/model.safetensors +3 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/preprocessor_config.json +25 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/special_tokens_map.json +7 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/tokenizer.json +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/tokenizer_config.json +25 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/vocab.txt +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/README.md +57 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/added_tokens.json +1 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/config.json +42 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/merges.txt +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/pytorch_model.bin +3 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/special_tokens_map.json +1 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/tokenizer.json +0 -0
- Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/tokenizer_config.json +1 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 59 |
HuggingFace.Quantization_in_Depth/Materials/models/facebook/nllb-200-distilled-600M/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 59 |
HuggingFace.Quantization_in_Depth/Materials/models/facebook/nllb-200-distilled-600M/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
Hugging[[:space:]]Face.Quantization[[:space:]]Fundamentals/Materials/Models/facebook/nllb-200-distilled-600M/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
Hugging Face.Quantization Fundamentals/0_Introduction_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee27f037e86b7f8867f61cca252f196d2b7f9e8a8d25c26851df6b249ebbc710
|
| 3 |
+
size 123644857
|
Hugging Face.Quantization Fundamentals/1_Handling Big Models_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ecc1d639334da335eef6ff2caf82bc0adaf3377187708f6335a9ff59d8413c9a
|
| 3 |
+
size 195092976
|
Hugging Face.Quantization Fundamentals/2_Data Types and Sizes_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4ea87e5838379b95064a4f8a6d91fb480eb24b16bb3641f257e9f38f5684b72
|
| 3 |
+
size 670432870
|
Hugging Face.Quantization Fundamentals/3_Loading Models by data type_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fbb34b87ea5418665cc694c078c71ad9f212c44c0719c26bc872213f8ca0d40
|
| 3 |
+
size 596180681
|
Hugging Face.Quantization Fundamentals/4_Quantization Theory_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa233f42902804bc3bfafc583913bab561cd866736b0008e24697151f7962a0c
|
| 3 |
+
size 581333919
|
Hugging Face.Quantization Fundamentals/5_Quantization of LLMs_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66b81a30a4166557691c2fc5eaf286529604722ff277a20329f9400b48c3cd8c
|
| 3 |
+
size 260391012
|
Hugging Face.Quantization Fundamentals/6_Conclusion_HFQF.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:306acfbcbcb5cce23696d87668b1a495da7732fb57bf0b69bec1a0576cc87069
|
| 3 |
+
size 29668595
|
Hugging Face.Quantization Fundamentals/Materials/2_Data Types and Sizes_L2_data_types.ipynb
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"You can download the `requirements.txt` for this course from the workspace of this lab. `File --> Open...`"
|
| 8 |
+
]
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"cell_type": "markdown",
|
| 12 |
+
"metadata": {
|
| 13 |
+
"id": "r8teiBZ6dP5g"
|
| 14 |
+
},
|
| 15 |
+
"source": [
|
| 16 |
+
"# Lesson 2: Data Types and Sizes\n",
|
| 17 |
+
"\n",
|
| 18 |
+
"In this lab, you will learn about the common data types used to store the parameters of machine learning models.\n",
|
| 19 |
+
"\n",
|
| 20 |
+
"\n",
|
| 21 |
+
"The libraries are already installed in the classroom. If you're running this notebook on your own machine, you can install the following:\n",
|
| 22 |
+
"\n",
|
| 23 |
+
"```Python\n",
|
| 24 |
+
"!pip install torch==2.1.1\n",
|
| 25 |
+
"```"
|
| 26 |
+
]
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"cell_type": "code",
|
| 30 |
+
"execution_count": null,
|
| 31 |
+
"metadata": {
|
| 32 |
+
"id": "x_9J8WavkQGl"
|
| 33 |
+
},
|
| 34 |
+
"outputs": [],
|
| 35 |
+
"source": []
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"cell_type": "code",
|
| 39 |
+
"execution_count": null,
|
| 40 |
+
"metadata": {
|
| 41 |
+
"id": "nFC9wgzxBZvz"
|
| 42 |
+
},
|
| 43 |
+
"outputs": [],
|
| 44 |
+
"source": [
|
| 45 |
+
"import torch"
|
| 46 |
+
]
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"cell_type": "markdown",
|
| 50 |
+
"metadata": {
|
| 51 |
+
"id": "hW7psx41rn0h"
|
| 52 |
+
},
|
| 53 |
+
"source": [
|
| 54 |
+
"### Integers"
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "code",
|
| 59 |
+
"execution_count": null,
|
| 60 |
+
"metadata": {},
|
| 61 |
+
"outputs": [],
|
| 62 |
+
"source": [
|
| 63 |
+
"# Information of `8-bit unsigned integer`\n",
|
| 64 |
+
"torch.iinfo(torch.uint8)"
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"cell_type": "code",
|
| 69 |
+
"execution_count": null,
|
| 70 |
+
"metadata": {},
|
| 71 |
+
"outputs": [],
|
| 72 |
+
"source": [
|
| 73 |
+
"# Information of `8-bit (signed) integer`\n",
|
| 74 |
+
"torch.iinfo(torch.int8)"
|
| 75 |
+
]
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"cell_type": "code",
|
| 79 |
+
"execution_count": null,
|
| 80 |
+
"metadata": {},
|
| 81 |
+
"outputs": [],
|
| 82 |
+
"source": [
|
| 83 |
+
"### Information of `64-bit (signed) integer`\n"
|
| 84 |
+
]
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"cell_type": "code",
|
| 88 |
+
"execution_count": null,
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"outputs": [],
|
| 91 |
+
"source": [
|
| 92 |
+
"### Information of `32-bit (signed) integer`\n"
|
| 93 |
+
]
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"cell_type": "code",
|
| 97 |
+
"execution_count": null,
|
| 98 |
+
"metadata": {},
|
| 99 |
+
"outputs": [],
|
| 100 |
+
"source": [
|
| 101 |
+
"### Information of `16-bit (signed) integer`\n"
|
| 102 |
+
]
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"cell_type": "markdown",
|
| 106 |
+
"metadata": {},
|
| 107 |
+
"source": [
|
| 108 |
+
"### Floating Points "
|
| 109 |
+
]
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"cell_type": "code",
|
| 113 |
+
"execution_count": null,
|
| 114 |
+
"metadata": {},
|
| 115 |
+
"outputs": [],
|
| 116 |
+
"source": [
|
| 117 |
+
"# by default, python stores float data in fp64\n",
|
| 118 |
+
"value = 1/3"
|
| 119 |
+
]
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"cell_type": "code",
|
| 123 |
+
"execution_count": null,
|
| 124 |
+
"metadata": {},
|
| 125 |
+
"outputs": [],
|
| 126 |
+
"source": [
|
| 127 |
+
"format(value, '.60f')"
|
| 128 |
+
]
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"cell_type": "code",
|
| 132 |
+
"execution_count": null,
|
| 133 |
+
"metadata": {},
|
| 134 |
+
"outputs": [],
|
| 135 |
+
"source": [
|
| 136 |
+
"# 64-bit floating point\n",
|
| 137 |
+
"tensor_fp64 = torch.tensor(value, dtype = torch.float64)"
|
| 138 |
+
]
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"cell_type": "code",
|
| 142 |
+
"execution_count": null,
|
| 143 |
+
"metadata": {},
|
| 144 |
+
"outputs": [],
|
| 145 |
+
"source": [
|
| 146 |
+
"print(f\"fp64 tensor: {format(tensor_fp64.item(), '.60f')}\")"
|
| 147 |
+
]
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"cell_type": "code",
|
| 151 |
+
"execution_count": null,
|
| 152 |
+
"metadata": {},
|
| 153 |
+
"outputs": [],
|
| 154 |
+
"source": [
|
| 155 |
+
"tensor_fp32 = torch.tensor(value, dtype = torch.float32)\n",
|
| 156 |
+
"tensor_fp16 = torch.tensor(value, dtype = torch.float16)\n",
|
| 157 |
+
"tensor_bf16 = torch.tensor(value, dtype = torch.bfloat16)"
|
| 158 |
+
]
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"cell_type": "code",
|
| 162 |
+
"execution_count": null,
|
| 163 |
+
"metadata": {},
|
| 164 |
+
"outputs": [],
|
| 165 |
+
"source": [
|
| 166 |
+
"print(f\"fp64 tensor: {format(tensor_fp64.item(), '.60f')}\")\n",
|
| 167 |
+
"print(f\"fp32 tensor: {format(tensor_fp32.item(), '.60f')}\")\n",
|
| 168 |
+
"print(f\"fp16 tensor: {format(tensor_fp16.item(), '.60f')}\")\n",
|
| 169 |
+
"print(f\"bf16 tensor: {format(tensor_bf16.item(), '.60f')}\")"
|
| 170 |
+
]
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"cell_type": "code",
|
| 174 |
+
"execution_count": null,
|
| 175 |
+
"metadata": {
|
| 176 |
+
"colab": {
|
| 177 |
+
"base_uri": "https://localhost:8080/"
|
| 178 |
+
},
|
| 179 |
+
"executionInfo": {
|
| 180 |
+
"elapsed": 294,
|
| 181 |
+
"status": "ok",
|
| 182 |
+
"timestamp": 1699482682456,
|
| 183 |
+
"user": {
|
| 184 |
+
"displayName": "Marc Sun",
|
| 185 |
+
"userId": "00829270524676809963"
|
| 186 |
+
},
|
| 187 |
+
"user_tz": 300
|
| 188 |
+
},
|
| 189 |
+
"id": "hUukczHrBodt",
|
| 190 |
+
"outputId": "4e75bdf1-25b6-4e0c-9c08-18208e943b10"
|
| 191 |
+
},
|
| 192 |
+
"outputs": [],
|
| 193 |
+
"source": [
|
| 194 |
+
"# Information of `16-bit brain floating point`\n",
|
| 195 |
+
"torch.finfo(torch.bfloat16)"
|
| 196 |
+
]
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"cell_type": "code",
|
| 200 |
+
"execution_count": null,
|
| 201 |
+
"metadata": {},
|
| 202 |
+
"outputs": [],
|
| 203 |
+
"source": [
|
| 204 |
+
"# Information of `32-bit floating point`\n",
|
| 205 |
+
"torch.finfo(torch.float32)"
|
| 206 |
+
]
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"cell_type": "code",
|
| 210 |
+
"execution_count": null,
|
| 211 |
+
"metadata": {},
|
| 212 |
+
"outputs": [],
|
| 213 |
+
"source": [
|
| 214 |
+
"### Information of `16-bit floating point`\n"
|
| 215 |
+
]
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"cell_type": "code",
|
| 219 |
+
"execution_count": null,
|
| 220 |
+
"metadata": {},
|
| 221 |
+
"outputs": [],
|
| 222 |
+
"source": [
|
| 223 |
+
"### Information of `64-bit floating point`\n"
|
| 224 |
+
]
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"cell_type": "markdown",
|
| 228 |
+
"metadata": {
|
| 229 |
+
"id": "3DbtWVVc_jiW"
|
| 230 |
+
},
|
| 231 |
+
"source": [
|
| 232 |
+
"### Downcasting"
|
| 233 |
+
]
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"cell_type": "code",
|
| 237 |
+
"execution_count": null,
|
| 238 |
+
"metadata": {
|
| 239 |
+
"id": "JHDsqfauBID9"
|
| 240 |
+
},
|
| 241 |
+
"outputs": [],
|
| 242 |
+
"source": [
|
| 243 |
+
"# random pytorch tensor: float32, size=1000\n",
|
| 244 |
+
"tensor_fp32 = torch.rand(1000, dtype = torch.float32)"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"cell_type": "markdown",
|
| 249 |
+
"metadata": {},
|
| 250 |
+
"source": [
|
| 251 |
+
"**Note:** As it is random, the values you get will be different from the video."
|
| 252 |
+
]
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"cell_type": "code",
|
| 256 |
+
"execution_count": null,
|
| 257 |
+
"metadata": {},
|
| 258 |
+
"outputs": [],
|
| 259 |
+
"source": [
|
| 260 |
+
"# first 5 elements of the random tensor\n",
|
| 261 |
+
"tensor_fp32[:5]"
|
| 262 |
+
]
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"cell_type": "code",
|
| 266 |
+
"execution_count": null,
|
| 267 |
+
"metadata": {
|
| 268 |
+
"id": "xXLANbSx_nx4"
|
| 269 |
+
},
|
| 270 |
+
"outputs": [],
|
| 271 |
+
"source": [
|
| 272 |
+
"# downcast the tensor to bfloat16 using the \"to\" method\n",
|
| 273 |
+
"tensor_fp32_to_bf16 = tensor_fp32.to(dtype = torch.bfloat16)"
|
| 274 |
+
]
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"cell_type": "code",
|
| 278 |
+
"execution_count": null,
|
| 279 |
+
"metadata": {},
|
| 280 |
+
"outputs": [],
|
| 281 |
+
"source": [
|
| 282 |
+
"tensor_fp32_to_bf16[:5]"
|
| 283 |
+
]
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"cell_type": "code",
|
| 287 |
+
"execution_count": null,
|
| 288 |
+
"metadata": {},
|
| 289 |
+
"outputs": [],
|
| 290 |
+
"source": [
|
| 291 |
+
"# tensor_fp32 x tensor_fp32\n",
|
| 292 |
+
"m_float32 = torch.dot(tensor_fp32, tensor_fp32)"
|
| 293 |
+
]
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"cell_type": "code",
|
| 297 |
+
"execution_count": null,
|
| 298 |
+
"metadata": {},
|
| 299 |
+
"outputs": [],
|
| 300 |
+
"source": [
|
| 301 |
+
"m_float32"
|
| 302 |
+
]
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"cell_type": "code",
|
| 306 |
+
"execution_count": null,
|
| 307 |
+
"metadata": {},
|
| 308 |
+
"outputs": [],
|
| 309 |
+
"source": [
|
| 310 |
+
"# tensor_fp32_to_bf16 x tensor_fp32_to_bf16\n",
|
| 311 |
+
"m_bfloat16 = torch.dot(tensor_fp32_to_bf16, tensor_fp32_to_bf16)"
|
| 312 |
+
]
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"cell_type": "code",
|
| 316 |
+
"execution_count": null,
|
| 317 |
+
"metadata": {},
|
| 318 |
+
"outputs": [],
|
| 319 |
+
"source": [
|
| 320 |
+
"m_bfloat16"
|
| 321 |
+
]
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"cell_type": "markdown",
|
| 325 |
+
"metadata": {},
|
| 326 |
+
"source": [
|
| 327 |
+
"#### Note\n",
|
| 328 |
+
"- You'll use \"downcasting\" as a simple form of quantization in the next lesson."
|
| 329 |
+
]
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"cell_type": "code",
|
| 333 |
+
"execution_count": null,
|
| 334 |
+
"metadata": {},
|
| 335 |
+
"outputs": [],
|
| 336 |
+
"source": []
|
| 337 |
+
}
|
| 338 |
+
],
|
| 339 |
+
"metadata": {
|
| 340 |
+
"accelerator": "GPU",
|
| 341 |
+
"colab": {
|
| 342 |
+
"authorship_tag": "ABX9TyNKuOYgD21Zty8Z1Smilnjc",
|
| 343 |
+
"collapsed_sections": [
|
| 344 |
+
"x_9J8WavkQGl",
|
| 345 |
+
"sulufN1wkK_L",
|
| 346 |
+
"hqPUM0f8oGgf",
|
| 347 |
+
"TgmPMm-ZvdXX",
|
| 348 |
+
"MJuh8aqo9LO-",
|
| 349 |
+
"3bOevU0Ez4KB",
|
| 350 |
+
"LgtYIhSf0Uu0",
|
| 351 |
+
"9DH7-tDDvK5N",
|
| 352 |
+
"TuZhoPK6weuR",
|
| 353 |
+
"WAOeSNraxZeF",
|
| 354 |
+
"UGXo-IljxmNG",
|
| 355 |
+
"_N3G-dX32awT",
|
| 356 |
+
"hW7psx41rn0h",
|
| 357 |
+
"OmcNPw6zlRp7",
|
| 358 |
+
"3_zvORGrnTR8",
|
| 359 |
+
"jh9IuiovrnoF",
|
| 360 |
+
"isDcbkXxxiTf",
|
| 361 |
+
"wM-xkASw1odi",
|
| 362 |
+
"3DbtWVVc_jiW",
|
| 363 |
+
"ViImpV5rAvyp",
|
| 364 |
+
"RdcyknnjBD99",
|
| 365 |
+
"-eYj4UUXCAlJ",
|
| 366 |
+
"MCLe1N4GCSQT",
|
| 367 |
+
"GIY7IrOv_3cD",
|
| 368 |
+
"8HlFKDBKGNG8"
|
| 369 |
+
],
|
| 370 |
+
"gpuType": "T4",
|
| 371 |
+
"provenance": []
|
| 372 |
+
},
|
| 373 |
+
"kernelspec": {
|
| 374 |
+
"display_name": "Python 3 (ipykernel)",
|
| 375 |
+
"language": "python",
|
| 376 |
+
"name": "python3"
|
| 377 |
+
},
|
| 378 |
+
"language_info": {
|
| 379 |
+
"codemirror_mode": {
|
| 380 |
+
"name": "ipython",
|
| 381 |
+
"version": 3
|
| 382 |
+
},
|
| 383 |
+
"file_extension": ".py",
|
| 384 |
+
"mimetype": "text/x-python",
|
| 385 |
+
"name": "python",
|
| 386 |
+
"nbconvert_exporter": "python",
|
| 387 |
+
"pygments_lexer": "ipython3",
|
| 388 |
+
"version": "3.10.9"
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
"nbformat": 4,
|
| 392 |
+
"nbformat_minor": 4
|
| 393 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/2_Data Types and Sizes_requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# requirements file
|
| 2 |
+
# For Python 3.9.18
|
| 3 |
+
|
| 4 |
+
accelerate==0.26.1
|
| 5 |
+
ipython==8.18.1
|
| 6 |
+
ipywidgets==8.1.2
|
| 7 |
+
numpy==1.23.5
|
| 8 |
+
Pillow==9.4.0
|
| 9 |
+
quanto==0.0.11
|
| 10 |
+
torch==2.1.1
|
| 11 |
+
transformers==4.35.0
|
| 12 |
+
sentencepiece==0.2.0
|
Hugging Face.Quantization Fundamentals/Materials/3_Loading Models by data type_L3_models_with_different_data_types.ipynb
ADDED
|
@@ -0,0 +1,1516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "VPau94vm3IpB"
|
| 7 |
+
},
|
| 8 |
+
"source": [
|
| 9 |
+
"# Lesson 3: Loading ML Models with Different Data Types\n",
|
| 10 |
+
"\n",
|
| 11 |
+
"In this lab, you will load ML models in different datatypes."
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"cell_type": "markdown",
|
| 16 |
+
"metadata": {
|
| 17 |
+
"id": "XwU9odrv4uph"
|
| 18 |
+
},
|
| 19 |
+
"source": [
|
| 20 |
+
"- Load the Dummy Model from the helper file.\n",
|
| 21 |
+
"- To access the `helper.py` file, you can click `File --> Open...`, on the top left."
|
| 22 |
+
]
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"cell_type": "code",
|
| 26 |
+
"execution_count": null,
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"outputs": [],
|
| 29 |
+
"source": [
|
| 30 |
+
"#123456789#123456789#123456789#123456789#123456789#123456789#123456789"
|
| 31 |
+
]
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"cell_type": "code",
|
| 35 |
+
"execution_count": null,
|
| 36 |
+
"metadata": {
|
| 37 |
+
"id": "sNkJ_nJd4F7F"
|
| 38 |
+
},
|
| 39 |
+
"outputs": [],
|
| 40 |
+
"source": [
|
| 41 |
+
"from helper import DummyModel"
|
| 42 |
+
]
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"cell_type": "code",
|
| 46 |
+
"execution_count": null,
|
| 47 |
+
"metadata": {
|
| 48 |
+
"colab": {
|
| 49 |
+
"base_uri": "https://localhost:8080/"
|
| 50 |
+
},
|
| 51 |
+
"executionInfo": {
|
| 52 |
+
"elapsed": 433,
|
| 53 |
+
"status": "ok",
|
| 54 |
+
"timestamp": 1699380416965,
|
| 55 |
+
"user": {
|
| 56 |
+
"displayName": "Younes Belkada",
|
| 57 |
+
"userId": "15414910276690549281"
|
| 58 |
+
},
|
| 59 |
+
"user_tz": -60
|
| 60 |
+
},
|
| 61 |
+
"id": "xhMZuiiE4tR5",
|
| 62 |
+
"outputId": "2f36a01c-fb85-49f6-ee0d-36b5777167b4"
|
| 63 |
+
},
|
| 64 |
+
"outputs": [],
|
| 65 |
+
"source": [
|
| 66 |
+
"model = DummyModel()"
|
| 67 |
+
]
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"cell_type": "code",
|
| 71 |
+
"execution_count": null,
|
| 72 |
+
"metadata": {},
|
| 73 |
+
"outputs": [],
|
| 74 |
+
"source": [
|
| 75 |
+
"model"
|
| 76 |
+
]
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"cell_type": "markdown",
|
| 80 |
+
"metadata": {
|
| 81 |
+
"id": "dJoz_iQM5bgV"
|
| 82 |
+
},
|
| 83 |
+
"source": [
|
| 84 |
+
"- Create a function to inspect the data types of the parameters in a model."
|
| 85 |
+
]
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"cell_type": "code",
|
| 89 |
+
"execution_count": null,
|
| 90 |
+
"metadata": {
|
| 91 |
+
"colab": {
|
| 92 |
+
"base_uri": "https://localhost:8080/"
|
| 93 |
+
},
|
| 94 |
+
"executionInfo": {
|
| 95 |
+
"elapsed": 293,
|
| 96 |
+
"status": "ok",
|
| 97 |
+
"timestamp": 1699380006138,
|
| 98 |
+
"user": {
|
| 99 |
+
"displayName": "Younes Belkada",
|
| 100 |
+
"userId": "15414910276690549281"
|
| 101 |
+
},
|
| 102 |
+
"user_tz": -60
|
| 103 |
+
},
|
| 104 |
+
"id": "KVci0pER5ccu",
|
| 105 |
+
"outputId": "7f4a6fc1-b168-42a2-baf4-ff5f00fba731"
|
| 106 |
+
},
|
| 107 |
+
"outputs": [],
|
| 108 |
+
"source": [
|
| 109 |
+
"def print_param_dtype(model):\n",
|
| 110 |
+
" for name, param in model.named_parameters():\n",
|
| 111 |
+
" print(f\"{name} is loaded in {param.dtype}\")"
|
| 112 |
+
]
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"cell_type": "code",
|
| 116 |
+
"execution_count": null,
|
| 117 |
+
"metadata": {
|
| 118 |
+
"scrolled": true
|
| 119 |
+
},
|
| 120 |
+
"outputs": [],
|
| 121 |
+
"source": [
|
| 122 |
+
"print_param_dtype(model)"
|
| 123 |
+
]
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"cell_type": "markdown",
|
| 127 |
+
"metadata": {
|
| 128 |
+
"id": "nN792T445lG2"
|
| 129 |
+
},
|
| 130 |
+
"source": [
|
| 131 |
+
"## Model Casting: `float16`\n",
|
| 132 |
+
"\n",
|
| 133 |
+
"- Cast the model into a different precision."
|
| 134 |
+
]
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"cell_type": "code",
|
| 138 |
+
"execution_count": null,
|
| 139 |
+
"metadata": {
|
| 140 |
+
"id": "-_qUBPhy53L5"
|
| 141 |
+
},
|
| 142 |
+
"outputs": [],
|
| 143 |
+
"source": [
|
| 144 |
+
"# float 16\n",
|
| 145 |
+
"model_fp16 = DummyModel().half()"
|
| 146 |
+
]
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"cell_type": "markdown",
|
| 150 |
+
"metadata": {},
|
| 151 |
+
"source": [
|
| 152 |
+
"- Inspect the data types of the parameters."
|
| 153 |
+
]
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"cell_type": "code",
|
| 157 |
+
"execution_count": null,
|
| 158 |
+
"metadata": {},
|
| 159 |
+
"outputs": [],
|
| 160 |
+
"source": [
|
| 161 |
+
"print_param_dtype(model_fp16)"
|
| 162 |
+
]
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"cell_type": "code",
|
| 166 |
+
"execution_count": null,
|
| 167 |
+
"metadata": {},
|
| 168 |
+
"outputs": [],
|
| 169 |
+
"source": [
|
| 170 |
+
"model_fp16"
|
| 171 |
+
]
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"cell_type": "markdown",
|
| 175 |
+
"metadata": {
|
| 176 |
+
"id": "zSlCqQL274P_"
|
| 177 |
+
},
|
| 178 |
+
"source": [
|
| 179 |
+
"- Run simple inference using model."
|
| 180 |
+
]
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"cell_type": "code",
|
| 184 |
+
"execution_count": null,
|
| 185 |
+
"metadata": {},
|
| 186 |
+
"outputs": [],
|
| 187 |
+
"source": [
|
| 188 |
+
"import torch"
|
| 189 |
+
]
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"cell_type": "code",
|
| 193 |
+
"execution_count": null,
|
| 194 |
+
"metadata": {},
|
| 195 |
+
"outputs": [],
|
| 196 |
+
"source": [
|
| 197 |
+
"dummy_input = torch.LongTensor([[1, 0], [0, 1]])"
|
| 198 |
+
]
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"cell_type": "code",
|
| 202 |
+
"execution_count": null,
|
| 203 |
+
"metadata": {
|
| 204 |
+
"colab": {
|
| 205 |
+
"base_uri": "https://localhost:8080/",
|
| 206 |
+
"height": 356
|
| 207 |
+
},
|
| 208 |
+
"executionInfo": {
|
| 209 |
+
"elapsed": 233,
|
| 210 |
+
"status": "error",
|
| 211 |
+
"timestamp": 1699380421192,
|
| 212 |
+
"user": {
|
| 213 |
+
"displayName": "Younes Belkada",
|
| 214 |
+
"userId": "15414910276690549281"
|
| 215 |
+
},
|
| 216 |
+
"user_tz": -60
|
| 217 |
+
},
|
| 218 |
+
"id": "5wcBVd_N70oH",
|
| 219 |
+
"outputId": "e5f36295-2086-4a67-94d1-49e438d1c17d"
|
| 220 |
+
},
|
| 221 |
+
"outputs": [],
|
| 222 |
+
"source": [
|
| 223 |
+
"# inference using float32 model\n",
|
| 224 |
+
"logits_fp32 = model(dummy_input)"
|
| 225 |
+
]
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"cell_type": "code",
|
| 229 |
+
"execution_count": null,
|
| 230 |
+
"metadata": {
|
| 231 |
+
"id": "B3I1G2zy8d1x"
|
| 232 |
+
},
|
| 233 |
+
"outputs": [],
|
| 234 |
+
"source": [
|
| 235 |
+
"logits_fp32"
|
| 236 |
+
]
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"cell_type": "code",
|
| 240 |
+
"execution_count": null,
|
| 241 |
+
"metadata": {
|
| 242 |
+
"id": "siPMcJRX84WZ"
|
| 243 |
+
},
|
| 244 |
+
"outputs": [],
|
| 245 |
+
"source": [
|
| 246 |
+
"# inference using float16 model\n",
|
| 247 |
+
"try:\n",
|
| 248 |
+
" logits_fp16 = model_fp16(dummy_input)\n",
|
| 249 |
+
"except Exception as error:\n",
|
| 250 |
+
" print(\"\\033[91m\", type(error).__name__, \": \", error, \"\\033[0m\")"
|
| 251 |
+
]
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"cell_type": "markdown",
|
| 255 |
+
"metadata": {
|
| 256 |
+
"id": "D8U4TCxb9NR0"
|
| 257 |
+
},
|
| 258 |
+
"source": [
|
| 259 |
+
"## Model Casting: `bfloat16`\n",
|
| 260 |
+
"\n",
|
| 261 |
+
"#### Note about deepcopy\n",
|
| 262 |
+
"- `copy.deepcopy` makes a copy of the model that is independent of the original. Modifications you make to the copy will not affect the original, because you're making a \"deep copy\". For more details, see the Python docs on the [copy][https://docs.python.org/3/library/copy.html] library."
|
| 263 |
+
]
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"cell_type": "code",
|
| 267 |
+
"execution_count": null,
|
| 268 |
+
"metadata": {},
|
| 269 |
+
"outputs": [],
|
| 270 |
+
"source": [
|
| 271 |
+
"from copy import deepcopy"
|
| 272 |
+
]
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"cell_type": "code",
|
| 276 |
+
"execution_count": null,
|
| 277 |
+
"metadata": {},
|
| 278 |
+
"outputs": [],
|
| 279 |
+
"source": [
|
| 280 |
+
"model_bf16 = deepcopy(model)"
|
| 281 |
+
]
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"cell_type": "code",
|
| 285 |
+
"execution_count": null,
|
| 286 |
+
"metadata": {},
|
| 287 |
+
"outputs": [],
|
| 288 |
+
"source": [
|
| 289 |
+
"model_bf16 = model_bf16.to(torch.bfloat16)"
|
| 290 |
+
]
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"cell_type": "code",
|
| 294 |
+
"execution_count": null,
|
| 295 |
+
"metadata": {},
|
| 296 |
+
"outputs": [],
|
| 297 |
+
"source": [
|
| 298 |
+
"print_param_dtype(model_bf16)"
|
| 299 |
+
]
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"cell_type": "code",
|
| 303 |
+
"execution_count": null,
|
| 304 |
+
"metadata": {},
|
| 305 |
+
"outputs": [],
|
| 306 |
+
"source": [
|
| 307 |
+
"logits_bf16 = model_bf16(dummy_input)"
|
| 308 |
+
]
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"cell_type": "markdown",
|
| 312 |
+
"metadata": {
|
| 313 |
+
"id": "krxuhBKx9tcl"
|
| 314 |
+
},
|
| 315 |
+
"source": [
|
| 316 |
+
"- Now, compare the difference between `logits_fp32` and `logits_bf16`."
|
| 317 |
+
]
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"cell_type": "code",
|
| 321 |
+
"execution_count": null,
|
| 322 |
+
"metadata": {
|
| 323 |
+
"colab": {
|
| 324 |
+
"base_uri": "https://localhost:8080/"
|
| 325 |
+
},
|
| 326 |
+
"executionInfo": {
|
| 327 |
+
"elapsed": 2,
|
| 328 |
+
"status": "ok",
|
| 329 |
+
"timestamp": 1699380547898,
|
| 330 |
+
"user": {
|
| 331 |
+
"displayName": "Younes Belkada",
|
| 332 |
+
"userId": "15414910276690549281"
|
| 333 |
+
},
|
| 334 |
+
"user_tz": -60
|
| 335 |
+
},
|
| 336 |
+
"id": "SkasCWBH-Dm5",
|
| 337 |
+
"outputId": "193b71af-d66a-4030-ff62-e86ebd062ead"
|
| 338 |
+
},
|
| 339 |
+
"outputs": [],
|
| 340 |
+
"source": [
|
| 341 |
+
"mean_diff = torch.abs(logits_bf16 - logits_fp32).mean().item()\n",
|
| 342 |
+
"max_diff = torch.abs(logits_bf16 - logits_fp32).max().item()\n",
|
| 343 |
+
"\n",
|
| 344 |
+
"print(f\"Mean diff: {mean_diff} | Max diff: {max_diff}\")"
|
| 345 |
+
]
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"cell_type": "markdown",
|
| 349 |
+
"metadata": {
|
| 350 |
+
"id": "sdN9QpMN-mUw"
|
| 351 |
+
},
|
| 352 |
+
"source": [
|
| 353 |
+
"## Using Popular Generative Models in Different Data Types"
|
| 354 |
+
]
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"cell_type": "markdown",
|
| 358 |
+
"metadata": {
|
| 359 |
+
"id": "GmFTCvcNANQX"
|
| 360 |
+
},
|
| 361 |
+
"source": [
|
| 362 |
+
"- Load [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base) to perform image captioning.\n",
|
| 363 |
+
"\n",
|
| 364 |
+
"#### To get the sample code that Younes showed:\n",
|
| 365 |
+
"- Click on the \"Model Card\" tab.\n",
|
| 366 |
+
"- On the right, click on the button \"<> Use in Transformers\", you'll see a popup with sample code for loading this model.\n",
|
| 367 |
+
"\n",
|
| 368 |
+
"```Python\n",
|
| 369 |
+
"# Load model directly\n",
|
| 370 |
+
"from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n",
|
| 371 |
+
"\n",
|
| 372 |
+
"processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
|
| 373 |
+
"model = AutoModelForSeq2SeqLM.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
|
| 374 |
+
"```\n",
|
| 375 |
+
"\n",
|
| 376 |
+
"- To see the sample code with an example, click on \"Read model documentation\" at the bottom of the popup. It opens a new tab.\n",
|
| 377 |
+
" https://huggingface.co/docs/transformers/main/en/model_doc/blip#transformers.BlipForConditionalGeneration\n",
|
| 378 |
+
"- On this page, scroll down a bit, past the \"parameters\", section, and you'll see \"Examples:\"\n",
|
| 379 |
+
"\n",
|
| 380 |
+
"```Python\n",
|
| 381 |
+
"from PIL import Image\n",
|
| 382 |
+
"import requests\n",
|
| 383 |
+
"from transformers import AutoProcessor, BlipForConditionalGeneration\n",
|
| 384 |
+
"\n",
|
| 385 |
+
"processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
|
| 386 |
+
"model = BlipForConditionalGeneration.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n",
|
| 387 |
+
"\n",
|
| 388 |
+
"url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n",
|
| 389 |
+
"image = Image.open(requests.get(url, stream=True).raw)\n",
|
| 390 |
+
"text = \"A picture of\"\n",
|
| 391 |
+
"\n",
|
| 392 |
+
"inputs = processor(images=image, text=text, return_tensors=\"pt\")\n",
|
| 393 |
+
"\n",
|
| 394 |
+
"outputs = model(**inputs)\n",
|
| 395 |
+
"```"
|
| 396 |
+
]
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"cell_type": "code",
|
| 400 |
+
"execution_count": null,
|
| 401 |
+
"metadata": {
|
| 402 |
+
"colab": {
|
| 403 |
+
"base_uri": "https://localhost:8080/",
|
| 404 |
+
"height": 81,
|
| 405 |
+
"referenced_widgets": [
|
| 406 |
+
"426d8aec5ed44782963707817195db5d",
|
| 407 |
+
"51a84bbc69874d5bb065701aba6c5a22",
|
| 408 |
+
"738c5fefbd4842ebacf8d0248a4e5bdd",
|
| 409 |
+
"65b5176702244e338c1120e702f3ca96",
|
| 410 |
+
"cebfa21c01524fa89ea124485867d8cc",
|
| 411 |
+
"bc254bc2525f43f7bea70b2f6f515288",
|
| 412 |
+
"f3b1dc11f2a44da3a5db8499c1401288",
|
| 413 |
+
"b3140b792b3c47c68907d7a86259e389",
|
| 414 |
+
"5070aad508574d0da4f667885e9f8e6d",
|
| 415 |
+
"ae278dae0c484e85831aafd7a4ad6484",
|
| 416 |
+
"1b3d91e17c0a4435860477f0593fc014",
|
| 417 |
+
"b41130569642470ca5efb531e7652e17",
|
| 418 |
+
"afac2f5a68c946b387f30e914a62729d",
|
| 419 |
+
"4fecdb69b5f942449767d63c2bbb2dc9",
|
| 420 |
+
"a3333106642d40d9a3066e9e2d79a650",
|
| 421 |
+
"0a1f3685b7264cc595355cd906cb0e4d",
|
| 422 |
+
"9dda2b50e90446438835f902b2b9d4b1",
|
| 423 |
+
"21c6e937405d42048aa5607dbe779465",
|
| 424 |
+
"47d756b3efba43a8a45b48cac04cf929",
|
| 425 |
+
"d0d8b31e3c6b42318d804cc67a5aacea",
|
| 426 |
+
"f3ac895d29454e58bffbde53fc00a05e",
|
| 427 |
+
"b076ffc6b36a46c684557086a8c16582"
|
| 428 |
+
]
|
| 429 |
+
},
|
| 430 |
+
"executionInfo": {
|
| 431 |
+
"elapsed": 14495,
|
| 432 |
+
"status": "ok",
|
| 433 |
+
"timestamp": 1699381117014,
|
| 434 |
+
"user": {
|
| 435 |
+
"displayName": "Younes Belkada",
|
| 436 |
+
"userId": "15414910276690549281"
|
| 437 |
+
},
|
| 438 |
+
"user_tz": -60
|
| 439 |
+
},
|
| 440 |
+
"id": "sJ8__4_sAMKT",
|
| 441 |
+
"outputId": "e18cad23-6bd5-4bf8-d059-586f8b772c9f"
|
| 442 |
+
},
|
| 443 |
+
"outputs": [],
|
| 444 |
+
"source": [
|
| 445 |
+
"from transformers import BlipForConditionalGeneration"
|
| 446 |
+
]
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"cell_type": "code",
|
| 450 |
+
"execution_count": null,
|
| 451 |
+
"metadata": {},
|
| 452 |
+
"outputs": [],
|
| 453 |
+
"source": [
|
| 454 |
+
"model_name = \"Salesforce/blip-image-captioning-base\""
|
| 455 |
+
]
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"cell_type": "code",
|
| 459 |
+
"execution_count": null,
|
| 460 |
+
"metadata": {},
|
| 461 |
+
"outputs": [],
|
| 462 |
+
"source": [
|
| 463 |
+
"model = BlipForConditionalGeneration.from_pretrained(model_name)"
|
| 464 |
+
]
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"cell_type": "code",
|
| 468 |
+
"execution_count": null,
|
| 469 |
+
"metadata": {
|
| 470 |
+
"id": "Ptm1kGMTA8dB"
|
| 471 |
+
},
|
| 472 |
+
"outputs": [],
|
| 473 |
+
"source": [
|
| 474 |
+
"# inspect the default data types of the model\n",
|
| 475 |
+
"\n",
|
| 476 |
+
"# print_param_dtype(model)"
|
| 477 |
+
]
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"cell_type": "markdown",
|
| 481 |
+
"metadata": {
|
| 482 |
+
"id": "_YYQNKfnAgsb"
|
| 483 |
+
},
|
| 484 |
+
"source": [
|
| 485 |
+
"- Check the memory footprint of the model. "
|
| 486 |
+
]
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"cell_type": "code",
|
| 490 |
+
"execution_count": null,
|
| 491 |
+
"metadata": {},
|
| 492 |
+
"outputs": [],
|
| 493 |
+
"source": [
|
| 494 |
+
"fp32_mem_footprint = model.get_memory_footprint()"
|
| 495 |
+
]
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"cell_type": "code",
|
| 499 |
+
"execution_count": null,
|
| 500 |
+
"metadata": {},
|
| 501 |
+
"outputs": [],
|
| 502 |
+
"source": [
|
| 503 |
+
"print(\"Footprint of the fp32 model in bytes: \",\n",
|
| 504 |
+
" fp32_mem_footprint)\n",
|
| 505 |
+
"print(\"Footprint of the fp32 model in MBs: \", \n",
|
| 506 |
+
" fp32_mem_footprint/1e+6)"
|
| 507 |
+
]
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"cell_type": "markdown",
|
| 511 |
+
"metadata": {
|
| 512 |
+
"id": "N7GgA7mnAuXV"
|
| 513 |
+
},
|
| 514 |
+
"source": [
|
| 515 |
+
"- Load the same model in `bfloat16`."
|
| 516 |
+
]
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"cell_type": "code",
|
| 520 |
+
"execution_count": null,
|
| 521 |
+
"metadata": {
|
| 522 |
+
"colab": {
|
| 523 |
+
"base_uri": "https://localhost:8080/"
|
| 524 |
+
},
|
| 525 |
+
"executionInfo": {
|
| 526 |
+
"elapsed": 9,
|
| 527 |
+
"status": "ok",
|
| 528 |
+
"timestamp": 1699381195895,
|
| 529 |
+
"user": {
|
| 530 |
+
"displayName": "Younes Belkada",
|
| 531 |
+
"userId": "15414910276690549281"
|
| 532 |
+
},
|
| 533 |
+
"user_tz": -60
|
| 534 |
+
},
|
| 535 |
+
"id": "9v4XJcvLAt5o",
|
| 536 |
+
"outputId": "abbc3fd9-f8a9-4c5e-dd3d-61ddcf316d27"
|
| 537 |
+
},
|
| 538 |
+
"outputs": [],
|
| 539 |
+
"source": [
|
| 540 |
+
"model_bf16 = BlipForConditionalGeneration.from_pretrained(\n",
|
| 541 |
+
" model_name,\n",
|
| 542 |
+
" torch_dtype=torch.bfloat16\n",
|
| 543 |
+
")"
|
| 544 |
+
]
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"cell_type": "code",
|
| 548 |
+
"execution_count": null,
|
| 549 |
+
"metadata": {
|
| 550 |
+
"id": "pVnUmBBABJxB"
|
| 551 |
+
},
|
| 552 |
+
"outputs": [],
|
| 553 |
+
"source": [
|
| 554 |
+
"bf16_mem_footprint = model_bf16.get_memory_footprint()"
|
| 555 |
+
]
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"cell_type": "code",
|
| 559 |
+
"execution_count": null,
|
| 560 |
+
"metadata": {
|
| 561 |
+
"colab": {
|
| 562 |
+
"base_uri": "https://localhost:8080/"
|
| 563 |
+
},
|
| 564 |
+
"executionInfo": {
|
| 565 |
+
"elapsed": 6,
|
| 566 |
+
"status": "ok",
|
| 567 |
+
"timestamp": 1699381416456,
|
| 568 |
+
"user": {
|
| 569 |
+
"displayName": "Younes Belkada",
|
| 570 |
+
"userId": "15414910276690549281"
|
| 571 |
+
},
|
| 572 |
+
"user_tz": -60
|
| 573 |
+
},
|
| 574 |
+
"id": "6Co7vpcYBl60",
|
| 575 |
+
"outputId": "c282be1e-95b9-4591-f56f-945a2587dd1e"
|
| 576 |
+
},
|
| 577 |
+
"outputs": [],
|
| 578 |
+
"source": [
|
| 579 |
+
"# Get the relative difference\n",
|
| 580 |
+
"relative_diff = bf16_mem_footprint / fp32_mem_footprint\n",
|
| 581 |
+
"\n",
|
| 582 |
+
"print(\"Footprint of the bf16 model in MBs: \", \n",
|
| 583 |
+
" bf16_mem_footprint/1e+6)\n",
|
| 584 |
+
"print(f\"Relative diff: {relative_diff}\")"
|
| 585 |
+
]
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"cell_type": "markdown",
|
| 589 |
+
"metadata": {
|
| 590 |
+
"id": "GeGHdNROBqPa"
|
| 591 |
+
},
|
| 592 |
+
"source": [
|
| 593 |
+
"### Model Performance: `float32` vs `bfloat16`"
|
| 594 |
+
]
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"cell_type": "markdown",
|
| 598 |
+
"metadata": {
|
| 599 |
+
"id": "eT7n0ybhCNOk"
|
| 600 |
+
},
|
| 601 |
+
"source": [
|
| 602 |
+
"- Now, compare the generation results of the two model."
|
| 603 |
+
]
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"cell_type": "code",
|
| 607 |
+
"execution_count": null,
|
| 608 |
+
"metadata": {
|
| 609 |
+
"colab": {
|
| 610 |
+
"base_uri": "https://localhost:8080/"
|
| 611 |
+
},
|
| 612 |
+
"executionInfo": {
|
| 613 |
+
"elapsed": 8317,
|
| 614 |
+
"status": "ok",
|
| 615 |
+
"timestamp": 1699381678824,
|
| 616 |
+
"user": {
|
| 617 |
+
"displayName": "Younes Belkada",
|
| 618 |
+
"userId": "15414910276690549281"
|
| 619 |
+
},
|
| 620 |
+
"user_tz": -60
|
| 621 |
+
},
|
| 622 |
+
"id": "m3z3N6k3CN_B",
|
| 623 |
+
"outputId": "b255e213-ccb8-43f9-9fb9-d31558e885b2"
|
| 624 |
+
},
|
| 625 |
+
"outputs": [],
|
| 626 |
+
"source": [
|
| 627 |
+
"from transformers import BlipProcessor"
|
| 628 |
+
]
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"cell_type": "code",
|
| 632 |
+
"execution_count": null,
|
| 633 |
+
"metadata": {},
|
| 634 |
+
"outputs": [],
|
| 635 |
+
"source": [
|
| 636 |
+
"processor = BlipProcessor.from_pretrained(model_name)"
|
| 637 |
+
]
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"cell_type": "markdown",
|
| 641 |
+
"metadata": {},
|
| 642 |
+
"source": [
|
| 643 |
+
"- Load the image."
|
| 644 |
+
]
|
| 645 |
+
},
|
| 646 |
+
{
|
| 647 |
+
"cell_type": "code",
|
| 648 |
+
"execution_count": null,
|
| 649 |
+
"metadata": {},
|
| 650 |
+
"outputs": [],
|
| 651 |
+
"source": [
|
| 652 |
+
"from helper import load_image, get_generation\n",
|
| 653 |
+
"from IPython.display import display\n",
|
| 654 |
+
"\n",
|
| 655 |
+
"img_url = 'https://storage.googleapis.com/\\\n",
|
| 656 |
+
"sfr-vision-language-research/BLIP/demo.jpg'\n",
|
| 657 |
+
"\n",
|
| 658 |
+
"image = load_image(img_url)\n",
|
| 659 |
+
"display(image.resize((500, 350)))"
|
| 660 |
+
]
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"cell_type": "code",
|
| 664 |
+
"execution_count": null,
|
| 665 |
+
"metadata": {},
|
| 666 |
+
"outputs": [],
|
| 667 |
+
"source": [
|
| 668 |
+
"results_fp32 = get_generation(model, \n",
|
| 669 |
+
" processor, \n",
|
| 670 |
+
" image, \n",
|
| 671 |
+
" torch.float32)"
|
| 672 |
+
]
|
| 673 |
+
},
|
| 674 |
+
{
|
| 675 |
+
"cell_type": "code",
|
| 676 |
+
"execution_count": null,
|
| 677 |
+
"metadata": {},
|
| 678 |
+
"outputs": [],
|
| 679 |
+
"source": [
|
| 680 |
+
"print(\"fp32 Model Results:\\n\", results_fp32)"
|
| 681 |
+
]
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"cell_type": "code",
|
| 685 |
+
"execution_count": null,
|
| 686 |
+
"metadata": {},
|
| 687 |
+
"outputs": [],
|
| 688 |
+
"source": [
|
| 689 |
+
"results_bf16 = get_generation(model_bf16, \n",
|
| 690 |
+
" processor, \n",
|
| 691 |
+
" image, \n",
|
| 692 |
+
" torch.bfloat16)"
|
| 693 |
+
]
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"cell_type": "code",
|
| 697 |
+
"execution_count": null,
|
| 698 |
+
"metadata": {},
|
| 699 |
+
"outputs": [],
|
| 700 |
+
"source": [
|
| 701 |
+
"print(\"bf16 Model Results:\\n\", results_bf16)"
|
| 702 |
+
]
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"cell_type": "markdown",
|
| 706 |
+
"metadata": {},
|
| 707 |
+
"source": [
|
| 708 |
+
"### Default Data Type\n",
|
| 709 |
+
"\n",
|
| 710 |
+
"- For Hugging Face Transformers library, the deafult data type to load the models in is `float32`\n",
|
| 711 |
+
"- You can set the \"default data type\" as what you want."
|
| 712 |
+
]
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"cell_type": "code",
|
| 716 |
+
"execution_count": null,
|
| 717 |
+
"metadata": {
|
| 718 |
+
"colab": {
|
| 719 |
+
"base_uri": "https://localhost:8080/"
|
| 720 |
+
},
|
| 721 |
+
"executionInfo": {
|
| 722 |
+
"elapsed": 212,
|
| 723 |
+
"status": "ok",
|
| 724 |
+
"timestamp": 1699382665501,
|
| 725 |
+
"user": {
|
| 726 |
+
"displayName": "Younes Belkada",
|
| 727 |
+
"userId": "15414910276690549281"
|
| 728 |
+
},
|
| 729 |
+
"user_tz": -60
|
| 730 |
+
},
|
| 731 |
+
"id": "7FlJlxH2GJzQ",
|
| 732 |
+
"outputId": "95cf9a07-c366-42e4-8e62-1858a0dd3703"
|
| 733 |
+
},
|
| 734 |
+
"outputs": [],
|
| 735 |
+
"source": [
|
| 736 |
+
"desired_dtype = torch.bfloat16\n",
|
| 737 |
+
"torch.set_default_dtype(desired_dtype)"
|
| 738 |
+
]
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"cell_type": "code",
|
| 742 |
+
"execution_count": null,
|
| 743 |
+
"metadata": {},
|
| 744 |
+
"outputs": [],
|
| 745 |
+
"source": [
|
| 746 |
+
"dummy_model_bf16 = DummyModel()"
|
| 747 |
+
]
|
| 748 |
+
},
|
| 749 |
+
{
|
| 750 |
+
"cell_type": "code",
|
| 751 |
+
"execution_count": null,
|
| 752 |
+
"metadata": {},
|
| 753 |
+
"outputs": [],
|
| 754 |
+
"source": [
|
| 755 |
+
"print_param_dtype(dummy_model_bf16)"
|
| 756 |
+
]
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"cell_type": "markdown",
|
| 760 |
+
"metadata": {},
|
| 761 |
+
"source": [
|
| 762 |
+
"- Similarly, you can reset the default data type to float32."
|
| 763 |
+
]
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"cell_type": "code",
|
| 767 |
+
"execution_count": null,
|
| 768 |
+
"metadata": {},
|
| 769 |
+
"outputs": [],
|
| 770 |
+
"source": [
|
| 771 |
+
"torch.set_default_dtype(torch.float32)"
|
| 772 |
+
]
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"cell_type": "code",
|
| 776 |
+
"execution_count": null,
|
| 777 |
+
"metadata": {},
|
| 778 |
+
"outputs": [],
|
| 779 |
+
"source": [
|
| 780 |
+
"print_param_dtype(dummy_model_bf16)"
|
| 781 |
+
]
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"cell_type": "markdown",
|
| 785 |
+
"metadata": {},
|
| 786 |
+
"source": [
|
| 787 |
+
"### Note\n",
|
| 788 |
+
"- You just used a simple form of quantization, in which the model's parameters are saved in a more compact data type (bfloat16). During inference, the model performs its calculations in this data type, and its activations are in this data type.\n",
|
| 789 |
+
"- In the next lesson, you will use another quantization method, \"linear quantization\", which enables the quantized model to maintain performance much closer to the original model by converting from the compressed data type back to the original FP32 data type during inference."
|
| 790 |
+
]
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"cell_type": "code",
|
| 794 |
+
"execution_count": null,
|
| 795 |
+
"metadata": {},
|
| 796 |
+
"outputs": [],
|
| 797 |
+
"source": []
|
| 798 |
+
}
|
| 799 |
+
],
|
| 800 |
+
"metadata": {
|
| 801 |
+
"colab": {
|
| 802 |
+
"collapsed_sections": [
|
| 803 |
+
"-_qUBPhy53L5",
|
| 804 |
+
"N7GgA7mnAuXV"
|
| 805 |
+
],
|
| 806 |
+
"provenance": []
|
| 807 |
+
},
|
| 808 |
+
"kernelspec": {
|
| 809 |
+
"display_name": "Python 3 (ipykernel)",
|
| 810 |
+
"language": "python",
|
| 811 |
+
"name": "python3"
|
| 812 |
+
},
|
| 813 |
+
"language_info": {
|
| 814 |
+
"codemirror_mode": {
|
| 815 |
+
"name": "ipython",
|
| 816 |
+
"version": 3
|
| 817 |
+
},
|
| 818 |
+
"file_extension": ".py",
|
| 819 |
+
"mimetype": "text/x-python",
|
| 820 |
+
"name": "python",
|
| 821 |
+
"nbconvert_exporter": "python",
|
| 822 |
+
"pygments_lexer": "ipython3",
|
| 823 |
+
"version": "3.10.9"
|
| 824 |
+
},
|
| 825 |
+
"widgets": {
|
| 826 |
+
"application/vnd.jupyter.widget-state+json": {
|
| 827 |
+
"0a1f3685b7264cc595355cd906cb0e4d": {
|
| 828 |
+
"model_module": "@jupyter-widgets/base",
|
| 829 |
+
"model_module_version": "1.2.0",
|
| 830 |
+
"model_name": "LayoutModel",
|
| 831 |
+
"state": {
|
| 832 |
+
"_model_module": "@jupyter-widgets/base",
|
| 833 |
+
"_model_module_version": "1.2.0",
|
| 834 |
+
"_model_name": "LayoutModel",
|
| 835 |
+
"_view_count": null,
|
| 836 |
+
"_view_module": "@jupyter-widgets/base",
|
| 837 |
+
"_view_module_version": "1.2.0",
|
| 838 |
+
"_view_name": "LayoutView",
|
| 839 |
+
"align_content": null,
|
| 840 |
+
"align_items": null,
|
| 841 |
+
"align_self": null,
|
| 842 |
+
"border": null,
|
| 843 |
+
"bottom": null,
|
| 844 |
+
"display": null,
|
| 845 |
+
"flex": null,
|
| 846 |
+
"flex_flow": null,
|
| 847 |
+
"grid_area": null,
|
| 848 |
+
"grid_auto_columns": null,
|
| 849 |
+
"grid_auto_flow": null,
|
| 850 |
+
"grid_auto_rows": null,
|
| 851 |
+
"grid_column": null,
|
| 852 |
+
"grid_gap": null,
|
| 853 |
+
"grid_row": null,
|
| 854 |
+
"grid_template_areas": null,
|
| 855 |
+
"grid_template_columns": null,
|
| 856 |
+
"grid_template_rows": null,
|
| 857 |
+
"height": null,
|
| 858 |
+
"justify_content": null,
|
| 859 |
+
"justify_items": null,
|
| 860 |
+
"left": null,
|
| 861 |
+
"margin": null,
|
| 862 |
+
"max_height": null,
|
| 863 |
+
"max_width": null,
|
| 864 |
+
"min_height": null,
|
| 865 |
+
"min_width": null,
|
| 866 |
+
"object_fit": null,
|
| 867 |
+
"object_position": null,
|
| 868 |
+
"order": null,
|
| 869 |
+
"overflow": null,
|
| 870 |
+
"overflow_x": null,
|
| 871 |
+
"overflow_y": null,
|
| 872 |
+
"padding": null,
|
| 873 |
+
"right": null,
|
| 874 |
+
"top": null,
|
| 875 |
+
"visibility": null,
|
| 876 |
+
"width": null
|
| 877 |
+
}
|
| 878 |
+
},
|
| 879 |
+
"1b3d91e17c0a4435860477f0593fc014": {
|
| 880 |
+
"model_module": "@jupyter-widgets/controls",
|
| 881 |
+
"model_module_version": "1.5.0",
|
| 882 |
+
"model_name": "DescriptionStyleModel",
|
| 883 |
+
"state": {
|
| 884 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 885 |
+
"_model_module_version": "1.5.0",
|
| 886 |
+
"_model_name": "DescriptionStyleModel",
|
| 887 |
+
"_view_count": null,
|
| 888 |
+
"_view_module": "@jupyter-widgets/base",
|
| 889 |
+
"_view_module_version": "1.2.0",
|
| 890 |
+
"_view_name": "StyleView",
|
| 891 |
+
"description_width": ""
|
| 892 |
+
}
|
| 893 |
+
},
|
| 894 |
+
"21c6e937405d42048aa5607dbe779465": {
|
| 895 |
+
"model_module": "@jupyter-widgets/controls",
|
| 896 |
+
"model_module_version": "1.5.0",
|
| 897 |
+
"model_name": "DescriptionStyleModel",
|
| 898 |
+
"state": {
|
| 899 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 900 |
+
"_model_module_version": "1.5.0",
|
| 901 |
+
"_model_name": "DescriptionStyleModel",
|
| 902 |
+
"_view_count": null,
|
| 903 |
+
"_view_module": "@jupyter-widgets/base",
|
| 904 |
+
"_view_module_version": "1.2.0",
|
| 905 |
+
"_view_name": "StyleView",
|
| 906 |
+
"description_width": ""
|
| 907 |
+
}
|
| 908 |
+
},
|
| 909 |
+
"426d8aec5ed44782963707817195db5d": {
|
| 910 |
+
"model_module": "@jupyter-widgets/controls",
|
| 911 |
+
"model_module_version": "1.5.0",
|
| 912 |
+
"model_name": "HBoxModel",
|
| 913 |
+
"state": {
|
| 914 |
+
"_dom_classes": [],
|
| 915 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 916 |
+
"_model_module_version": "1.5.0",
|
| 917 |
+
"_model_name": "HBoxModel",
|
| 918 |
+
"_view_count": null,
|
| 919 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 920 |
+
"_view_module_version": "1.5.0",
|
| 921 |
+
"_view_name": "HBoxView",
|
| 922 |
+
"box_style": "",
|
| 923 |
+
"children": [
|
| 924 |
+
"IPY_MODEL_51a84bbc69874d5bb065701aba6c5a22",
|
| 925 |
+
"IPY_MODEL_738c5fefbd4842ebacf8d0248a4e5bdd",
|
| 926 |
+
"IPY_MODEL_65b5176702244e338c1120e702f3ca96"
|
| 927 |
+
],
|
| 928 |
+
"layout": "IPY_MODEL_cebfa21c01524fa89ea124485867d8cc"
|
| 929 |
+
}
|
| 930 |
+
},
|
| 931 |
+
"47d756b3efba43a8a45b48cac04cf929": {
|
| 932 |
+
"model_module": "@jupyter-widgets/base",
|
| 933 |
+
"model_module_version": "1.2.0",
|
| 934 |
+
"model_name": "LayoutModel",
|
| 935 |
+
"state": {
|
| 936 |
+
"_model_module": "@jupyter-widgets/base",
|
| 937 |
+
"_model_module_version": "1.2.0",
|
| 938 |
+
"_model_name": "LayoutModel",
|
| 939 |
+
"_view_count": null,
|
| 940 |
+
"_view_module": "@jupyter-widgets/base",
|
| 941 |
+
"_view_module_version": "1.2.0",
|
| 942 |
+
"_view_name": "LayoutView",
|
| 943 |
+
"align_content": null,
|
| 944 |
+
"align_items": null,
|
| 945 |
+
"align_self": null,
|
| 946 |
+
"border": null,
|
| 947 |
+
"bottom": null,
|
| 948 |
+
"display": null,
|
| 949 |
+
"flex": null,
|
| 950 |
+
"flex_flow": null,
|
| 951 |
+
"grid_area": null,
|
| 952 |
+
"grid_auto_columns": null,
|
| 953 |
+
"grid_auto_flow": null,
|
| 954 |
+
"grid_auto_rows": null,
|
| 955 |
+
"grid_column": null,
|
| 956 |
+
"grid_gap": null,
|
| 957 |
+
"grid_row": null,
|
| 958 |
+
"grid_template_areas": null,
|
| 959 |
+
"grid_template_columns": null,
|
| 960 |
+
"grid_template_rows": null,
|
| 961 |
+
"height": null,
|
| 962 |
+
"justify_content": null,
|
| 963 |
+
"justify_items": null,
|
| 964 |
+
"left": null,
|
| 965 |
+
"margin": null,
|
| 966 |
+
"max_height": null,
|
| 967 |
+
"max_width": null,
|
| 968 |
+
"min_height": null,
|
| 969 |
+
"min_width": null,
|
| 970 |
+
"object_fit": null,
|
| 971 |
+
"object_position": null,
|
| 972 |
+
"order": null,
|
| 973 |
+
"overflow": null,
|
| 974 |
+
"overflow_x": null,
|
| 975 |
+
"overflow_y": null,
|
| 976 |
+
"padding": null,
|
| 977 |
+
"right": null,
|
| 978 |
+
"top": null,
|
| 979 |
+
"visibility": null,
|
| 980 |
+
"width": null
|
| 981 |
+
}
|
| 982 |
+
},
|
| 983 |
+
"4fecdb69b5f942449767d63c2bbb2dc9": {
|
| 984 |
+
"model_module": "@jupyter-widgets/controls",
|
| 985 |
+
"model_module_version": "1.5.0",
|
| 986 |
+
"model_name": "FloatProgressModel",
|
| 987 |
+
"state": {
|
| 988 |
+
"_dom_classes": [],
|
| 989 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 990 |
+
"_model_module_version": "1.5.0",
|
| 991 |
+
"_model_name": "FloatProgressModel",
|
| 992 |
+
"_view_count": null,
|
| 993 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 994 |
+
"_view_module_version": "1.5.0",
|
| 995 |
+
"_view_name": "ProgressView",
|
| 996 |
+
"bar_style": "success",
|
| 997 |
+
"description": "",
|
| 998 |
+
"description_tooltip": null,
|
| 999 |
+
"layout": "IPY_MODEL_47d756b3efba43a8a45b48cac04cf929",
|
| 1000 |
+
"max": 989820849,
|
| 1001 |
+
"min": 0,
|
| 1002 |
+
"orientation": "horizontal",
|
| 1003 |
+
"style": "IPY_MODEL_d0d8b31e3c6b42318d804cc67a5aacea",
|
| 1004 |
+
"value": 989820849
|
| 1005 |
+
}
|
| 1006 |
+
},
|
| 1007 |
+
"5070aad508574d0da4f667885e9f8e6d": {
|
| 1008 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1009 |
+
"model_module_version": "1.5.0",
|
| 1010 |
+
"model_name": "ProgressStyleModel",
|
| 1011 |
+
"state": {
|
| 1012 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1013 |
+
"_model_module_version": "1.5.0",
|
| 1014 |
+
"_model_name": "ProgressStyleModel",
|
| 1015 |
+
"_view_count": null,
|
| 1016 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1017 |
+
"_view_module_version": "1.2.0",
|
| 1018 |
+
"_view_name": "StyleView",
|
| 1019 |
+
"bar_color": null,
|
| 1020 |
+
"description_width": ""
|
| 1021 |
+
}
|
| 1022 |
+
},
|
| 1023 |
+
"51a84bbc69874d5bb065701aba6c5a22": {
|
| 1024 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1025 |
+
"model_module_version": "1.5.0",
|
| 1026 |
+
"model_name": "HTMLModel",
|
| 1027 |
+
"state": {
|
| 1028 |
+
"_dom_classes": [],
|
| 1029 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1030 |
+
"_model_module_version": "1.5.0",
|
| 1031 |
+
"_model_name": "HTMLModel",
|
| 1032 |
+
"_view_count": null,
|
| 1033 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1034 |
+
"_view_module_version": "1.5.0",
|
| 1035 |
+
"_view_name": "HTMLView",
|
| 1036 |
+
"description": "",
|
| 1037 |
+
"description_tooltip": null,
|
| 1038 |
+
"layout": "IPY_MODEL_bc254bc2525f43f7bea70b2f6f515288",
|
| 1039 |
+
"placeholder": "",
|
| 1040 |
+
"style": "IPY_MODEL_f3b1dc11f2a44da3a5db8499c1401288",
|
| 1041 |
+
"value": "Downloading (…)lve/main/config.json: 100%"
|
| 1042 |
+
}
|
| 1043 |
+
},
|
| 1044 |
+
"65b5176702244e338c1120e702f3ca96": {
|
| 1045 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1046 |
+
"model_module_version": "1.5.0",
|
| 1047 |
+
"model_name": "HTMLModel",
|
| 1048 |
+
"state": {
|
| 1049 |
+
"_dom_classes": [],
|
| 1050 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1051 |
+
"_model_module_version": "1.5.0",
|
| 1052 |
+
"_model_name": "HTMLModel",
|
| 1053 |
+
"_view_count": null,
|
| 1054 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1055 |
+
"_view_module_version": "1.5.0",
|
| 1056 |
+
"_view_name": "HTMLView",
|
| 1057 |
+
"description": "",
|
| 1058 |
+
"description_tooltip": null,
|
| 1059 |
+
"layout": "IPY_MODEL_ae278dae0c484e85831aafd7a4ad6484",
|
| 1060 |
+
"placeholder": "",
|
| 1061 |
+
"style": "IPY_MODEL_1b3d91e17c0a4435860477f0593fc014",
|
| 1062 |
+
"value": " 4.56k/4.56k [00:00<00:00, 59.3kB/s]"
|
| 1063 |
+
}
|
| 1064 |
+
},
|
| 1065 |
+
"738c5fefbd4842ebacf8d0248a4e5bdd": {
|
| 1066 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1067 |
+
"model_module_version": "1.5.0",
|
| 1068 |
+
"model_name": "FloatProgressModel",
|
| 1069 |
+
"state": {
|
| 1070 |
+
"_dom_classes": [],
|
| 1071 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1072 |
+
"_model_module_version": "1.5.0",
|
| 1073 |
+
"_model_name": "FloatProgressModel",
|
| 1074 |
+
"_view_count": null,
|
| 1075 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1076 |
+
"_view_module_version": "1.5.0",
|
| 1077 |
+
"_view_name": "ProgressView",
|
| 1078 |
+
"bar_style": "success",
|
| 1079 |
+
"description": "",
|
| 1080 |
+
"description_tooltip": null,
|
| 1081 |
+
"layout": "IPY_MODEL_b3140b792b3c47c68907d7a86259e389",
|
| 1082 |
+
"max": 4563,
|
| 1083 |
+
"min": 0,
|
| 1084 |
+
"orientation": "horizontal",
|
| 1085 |
+
"style": "IPY_MODEL_5070aad508574d0da4f667885e9f8e6d",
|
| 1086 |
+
"value": 4563
|
| 1087 |
+
}
|
| 1088 |
+
},
|
| 1089 |
+
"9dda2b50e90446438835f902b2b9d4b1": {
|
| 1090 |
+
"model_module": "@jupyter-widgets/base",
|
| 1091 |
+
"model_module_version": "1.2.0",
|
| 1092 |
+
"model_name": "LayoutModel",
|
| 1093 |
+
"state": {
|
| 1094 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1095 |
+
"_model_module_version": "1.2.0",
|
| 1096 |
+
"_model_name": "LayoutModel",
|
| 1097 |
+
"_view_count": null,
|
| 1098 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1099 |
+
"_view_module_version": "1.2.0",
|
| 1100 |
+
"_view_name": "LayoutView",
|
| 1101 |
+
"align_content": null,
|
| 1102 |
+
"align_items": null,
|
| 1103 |
+
"align_self": null,
|
| 1104 |
+
"border": null,
|
| 1105 |
+
"bottom": null,
|
| 1106 |
+
"display": null,
|
| 1107 |
+
"flex": null,
|
| 1108 |
+
"flex_flow": null,
|
| 1109 |
+
"grid_area": null,
|
| 1110 |
+
"grid_auto_columns": null,
|
| 1111 |
+
"grid_auto_flow": null,
|
| 1112 |
+
"grid_auto_rows": null,
|
| 1113 |
+
"grid_column": null,
|
| 1114 |
+
"grid_gap": null,
|
| 1115 |
+
"grid_row": null,
|
| 1116 |
+
"grid_template_areas": null,
|
| 1117 |
+
"grid_template_columns": null,
|
| 1118 |
+
"grid_template_rows": null,
|
| 1119 |
+
"height": null,
|
| 1120 |
+
"justify_content": null,
|
| 1121 |
+
"justify_items": null,
|
| 1122 |
+
"left": null,
|
| 1123 |
+
"margin": null,
|
| 1124 |
+
"max_height": null,
|
| 1125 |
+
"max_width": null,
|
| 1126 |
+
"min_height": null,
|
| 1127 |
+
"min_width": null,
|
| 1128 |
+
"object_fit": null,
|
| 1129 |
+
"object_position": null,
|
| 1130 |
+
"order": null,
|
| 1131 |
+
"overflow": null,
|
| 1132 |
+
"overflow_x": null,
|
| 1133 |
+
"overflow_y": null,
|
| 1134 |
+
"padding": null,
|
| 1135 |
+
"right": null,
|
| 1136 |
+
"top": null,
|
| 1137 |
+
"visibility": null,
|
| 1138 |
+
"width": null
|
| 1139 |
+
}
|
| 1140 |
+
},
|
| 1141 |
+
"a3333106642d40d9a3066e9e2d79a650": {
|
| 1142 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1143 |
+
"model_module_version": "1.5.0",
|
| 1144 |
+
"model_name": "HTMLModel",
|
| 1145 |
+
"state": {
|
| 1146 |
+
"_dom_classes": [],
|
| 1147 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1148 |
+
"_model_module_version": "1.5.0",
|
| 1149 |
+
"_model_name": "HTMLModel",
|
| 1150 |
+
"_view_count": null,
|
| 1151 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1152 |
+
"_view_module_version": "1.5.0",
|
| 1153 |
+
"_view_name": "HTMLView",
|
| 1154 |
+
"description": "",
|
| 1155 |
+
"description_tooltip": null,
|
| 1156 |
+
"layout": "IPY_MODEL_f3ac895d29454e58bffbde53fc00a05e",
|
| 1157 |
+
"placeholder": "",
|
| 1158 |
+
"style": "IPY_MODEL_b076ffc6b36a46c684557086a8c16582",
|
| 1159 |
+
"value": " 990M/990M [00:04<00:00, 219MB/s]"
|
| 1160 |
+
}
|
| 1161 |
+
},
|
| 1162 |
+
"ae278dae0c484e85831aafd7a4ad6484": {
|
| 1163 |
+
"model_module": "@jupyter-widgets/base",
|
| 1164 |
+
"model_module_version": "1.2.0",
|
| 1165 |
+
"model_name": "LayoutModel",
|
| 1166 |
+
"state": {
|
| 1167 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1168 |
+
"_model_module_version": "1.2.0",
|
| 1169 |
+
"_model_name": "LayoutModel",
|
| 1170 |
+
"_view_count": null,
|
| 1171 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1172 |
+
"_view_module_version": "1.2.0",
|
| 1173 |
+
"_view_name": "LayoutView",
|
| 1174 |
+
"align_content": null,
|
| 1175 |
+
"align_items": null,
|
| 1176 |
+
"align_self": null,
|
| 1177 |
+
"border": null,
|
| 1178 |
+
"bottom": null,
|
| 1179 |
+
"display": null,
|
| 1180 |
+
"flex": null,
|
| 1181 |
+
"flex_flow": null,
|
| 1182 |
+
"grid_area": null,
|
| 1183 |
+
"grid_auto_columns": null,
|
| 1184 |
+
"grid_auto_flow": null,
|
| 1185 |
+
"grid_auto_rows": null,
|
| 1186 |
+
"grid_column": null,
|
| 1187 |
+
"grid_gap": null,
|
| 1188 |
+
"grid_row": null,
|
| 1189 |
+
"grid_template_areas": null,
|
| 1190 |
+
"grid_template_columns": null,
|
| 1191 |
+
"grid_template_rows": null,
|
| 1192 |
+
"height": null,
|
| 1193 |
+
"justify_content": null,
|
| 1194 |
+
"justify_items": null,
|
| 1195 |
+
"left": null,
|
| 1196 |
+
"margin": null,
|
| 1197 |
+
"max_height": null,
|
| 1198 |
+
"max_width": null,
|
| 1199 |
+
"min_height": null,
|
| 1200 |
+
"min_width": null,
|
| 1201 |
+
"object_fit": null,
|
| 1202 |
+
"object_position": null,
|
| 1203 |
+
"order": null,
|
| 1204 |
+
"overflow": null,
|
| 1205 |
+
"overflow_x": null,
|
| 1206 |
+
"overflow_y": null,
|
| 1207 |
+
"padding": null,
|
| 1208 |
+
"right": null,
|
| 1209 |
+
"top": null,
|
| 1210 |
+
"visibility": null,
|
| 1211 |
+
"width": null
|
| 1212 |
+
}
|
| 1213 |
+
},
|
| 1214 |
+
"afac2f5a68c946b387f30e914a62729d": {
|
| 1215 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1216 |
+
"model_module_version": "1.5.0",
|
| 1217 |
+
"model_name": "HTMLModel",
|
| 1218 |
+
"state": {
|
| 1219 |
+
"_dom_classes": [],
|
| 1220 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1221 |
+
"_model_module_version": "1.5.0",
|
| 1222 |
+
"_model_name": "HTMLModel",
|
| 1223 |
+
"_view_count": null,
|
| 1224 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1225 |
+
"_view_module_version": "1.5.0",
|
| 1226 |
+
"_view_name": "HTMLView",
|
| 1227 |
+
"description": "",
|
| 1228 |
+
"description_tooltip": null,
|
| 1229 |
+
"layout": "IPY_MODEL_9dda2b50e90446438835f902b2b9d4b1",
|
| 1230 |
+
"placeholder": "",
|
| 1231 |
+
"style": "IPY_MODEL_21c6e937405d42048aa5607dbe779465",
|
| 1232 |
+
"value": "Downloading pytorch_model.bin: 100%"
|
| 1233 |
+
}
|
| 1234 |
+
},
|
| 1235 |
+
"b076ffc6b36a46c684557086a8c16582": {
|
| 1236 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1237 |
+
"model_module_version": "1.5.0",
|
| 1238 |
+
"model_name": "DescriptionStyleModel",
|
| 1239 |
+
"state": {
|
| 1240 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1241 |
+
"_model_module_version": "1.5.0",
|
| 1242 |
+
"_model_name": "DescriptionStyleModel",
|
| 1243 |
+
"_view_count": null,
|
| 1244 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1245 |
+
"_view_module_version": "1.2.0",
|
| 1246 |
+
"_view_name": "StyleView",
|
| 1247 |
+
"description_width": ""
|
| 1248 |
+
}
|
| 1249 |
+
},
|
| 1250 |
+
"b3140b792b3c47c68907d7a86259e389": {
|
| 1251 |
+
"model_module": "@jupyter-widgets/base",
|
| 1252 |
+
"model_module_version": "1.2.0",
|
| 1253 |
+
"model_name": "LayoutModel",
|
| 1254 |
+
"state": {
|
| 1255 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1256 |
+
"_model_module_version": "1.2.0",
|
| 1257 |
+
"_model_name": "LayoutModel",
|
| 1258 |
+
"_view_count": null,
|
| 1259 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1260 |
+
"_view_module_version": "1.2.0",
|
| 1261 |
+
"_view_name": "LayoutView",
|
| 1262 |
+
"align_content": null,
|
| 1263 |
+
"align_items": null,
|
| 1264 |
+
"align_self": null,
|
| 1265 |
+
"border": null,
|
| 1266 |
+
"bottom": null,
|
| 1267 |
+
"display": null,
|
| 1268 |
+
"flex": null,
|
| 1269 |
+
"flex_flow": null,
|
| 1270 |
+
"grid_area": null,
|
| 1271 |
+
"grid_auto_columns": null,
|
| 1272 |
+
"grid_auto_flow": null,
|
| 1273 |
+
"grid_auto_rows": null,
|
| 1274 |
+
"grid_column": null,
|
| 1275 |
+
"grid_gap": null,
|
| 1276 |
+
"grid_row": null,
|
| 1277 |
+
"grid_template_areas": null,
|
| 1278 |
+
"grid_template_columns": null,
|
| 1279 |
+
"grid_template_rows": null,
|
| 1280 |
+
"height": null,
|
| 1281 |
+
"justify_content": null,
|
| 1282 |
+
"justify_items": null,
|
| 1283 |
+
"left": null,
|
| 1284 |
+
"margin": null,
|
| 1285 |
+
"max_height": null,
|
| 1286 |
+
"max_width": null,
|
| 1287 |
+
"min_height": null,
|
| 1288 |
+
"min_width": null,
|
| 1289 |
+
"object_fit": null,
|
| 1290 |
+
"object_position": null,
|
| 1291 |
+
"order": null,
|
| 1292 |
+
"overflow": null,
|
| 1293 |
+
"overflow_x": null,
|
| 1294 |
+
"overflow_y": null,
|
| 1295 |
+
"padding": null,
|
| 1296 |
+
"right": null,
|
| 1297 |
+
"top": null,
|
| 1298 |
+
"visibility": null,
|
| 1299 |
+
"width": null
|
| 1300 |
+
}
|
| 1301 |
+
},
|
| 1302 |
+
"b41130569642470ca5efb531e7652e17": {
|
| 1303 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1304 |
+
"model_module_version": "1.5.0",
|
| 1305 |
+
"model_name": "HBoxModel",
|
| 1306 |
+
"state": {
|
| 1307 |
+
"_dom_classes": [],
|
| 1308 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1309 |
+
"_model_module_version": "1.5.0",
|
| 1310 |
+
"_model_name": "HBoxModel",
|
| 1311 |
+
"_view_count": null,
|
| 1312 |
+
"_view_module": "@jupyter-widgets/controls",
|
| 1313 |
+
"_view_module_version": "1.5.0",
|
| 1314 |
+
"_view_name": "HBoxView",
|
| 1315 |
+
"box_style": "",
|
| 1316 |
+
"children": [
|
| 1317 |
+
"IPY_MODEL_afac2f5a68c946b387f30e914a62729d",
|
| 1318 |
+
"IPY_MODEL_4fecdb69b5f942449767d63c2bbb2dc9",
|
| 1319 |
+
"IPY_MODEL_a3333106642d40d9a3066e9e2d79a650"
|
| 1320 |
+
],
|
| 1321 |
+
"layout": "IPY_MODEL_0a1f3685b7264cc595355cd906cb0e4d"
|
| 1322 |
+
}
|
| 1323 |
+
},
|
| 1324 |
+
"bc254bc2525f43f7bea70b2f6f515288": {
|
| 1325 |
+
"model_module": "@jupyter-widgets/base",
|
| 1326 |
+
"model_module_version": "1.2.0",
|
| 1327 |
+
"model_name": "LayoutModel",
|
| 1328 |
+
"state": {
|
| 1329 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1330 |
+
"_model_module_version": "1.2.0",
|
| 1331 |
+
"_model_name": "LayoutModel",
|
| 1332 |
+
"_view_count": null,
|
| 1333 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1334 |
+
"_view_module_version": "1.2.0",
|
| 1335 |
+
"_view_name": "LayoutView",
|
| 1336 |
+
"align_content": null,
|
| 1337 |
+
"align_items": null,
|
| 1338 |
+
"align_self": null,
|
| 1339 |
+
"border": null,
|
| 1340 |
+
"bottom": null,
|
| 1341 |
+
"display": null,
|
| 1342 |
+
"flex": null,
|
| 1343 |
+
"flex_flow": null,
|
| 1344 |
+
"grid_area": null,
|
| 1345 |
+
"grid_auto_columns": null,
|
| 1346 |
+
"grid_auto_flow": null,
|
| 1347 |
+
"grid_auto_rows": null,
|
| 1348 |
+
"grid_column": null,
|
| 1349 |
+
"grid_gap": null,
|
| 1350 |
+
"grid_row": null,
|
| 1351 |
+
"grid_template_areas": null,
|
| 1352 |
+
"grid_template_columns": null,
|
| 1353 |
+
"grid_template_rows": null,
|
| 1354 |
+
"height": null,
|
| 1355 |
+
"justify_content": null,
|
| 1356 |
+
"justify_items": null,
|
| 1357 |
+
"left": null,
|
| 1358 |
+
"margin": null,
|
| 1359 |
+
"max_height": null,
|
| 1360 |
+
"max_width": null,
|
| 1361 |
+
"min_height": null,
|
| 1362 |
+
"min_width": null,
|
| 1363 |
+
"object_fit": null,
|
| 1364 |
+
"object_position": null,
|
| 1365 |
+
"order": null,
|
| 1366 |
+
"overflow": null,
|
| 1367 |
+
"overflow_x": null,
|
| 1368 |
+
"overflow_y": null,
|
| 1369 |
+
"padding": null,
|
| 1370 |
+
"right": null,
|
| 1371 |
+
"top": null,
|
| 1372 |
+
"visibility": null,
|
| 1373 |
+
"width": null
|
| 1374 |
+
}
|
| 1375 |
+
},
|
| 1376 |
+
"cebfa21c01524fa89ea124485867d8cc": {
|
| 1377 |
+
"model_module": "@jupyter-widgets/base",
|
| 1378 |
+
"model_module_version": "1.2.0",
|
| 1379 |
+
"model_name": "LayoutModel",
|
| 1380 |
+
"state": {
|
| 1381 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1382 |
+
"_model_module_version": "1.2.0",
|
| 1383 |
+
"_model_name": "LayoutModel",
|
| 1384 |
+
"_view_count": null,
|
| 1385 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1386 |
+
"_view_module_version": "1.2.0",
|
| 1387 |
+
"_view_name": "LayoutView",
|
| 1388 |
+
"align_content": null,
|
| 1389 |
+
"align_items": null,
|
| 1390 |
+
"align_self": null,
|
| 1391 |
+
"border": null,
|
| 1392 |
+
"bottom": null,
|
| 1393 |
+
"display": null,
|
| 1394 |
+
"flex": null,
|
| 1395 |
+
"flex_flow": null,
|
| 1396 |
+
"grid_area": null,
|
| 1397 |
+
"grid_auto_columns": null,
|
| 1398 |
+
"grid_auto_flow": null,
|
| 1399 |
+
"grid_auto_rows": null,
|
| 1400 |
+
"grid_column": null,
|
| 1401 |
+
"grid_gap": null,
|
| 1402 |
+
"grid_row": null,
|
| 1403 |
+
"grid_template_areas": null,
|
| 1404 |
+
"grid_template_columns": null,
|
| 1405 |
+
"grid_template_rows": null,
|
| 1406 |
+
"height": null,
|
| 1407 |
+
"justify_content": null,
|
| 1408 |
+
"justify_items": null,
|
| 1409 |
+
"left": null,
|
| 1410 |
+
"margin": null,
|
| 1411 |
+
"max_height": null,
|
| 1412 |
+
"max_width": null,
|
| 1413 |
+
"min_height": null,
|
| 1414 |
+
"min_width": null,
|
| 1415 |
+
"object_fit": null,
|
| 1416 |
+
"object_position": null,
|
| 1417 |
+
"order": null,
|
| 1418 |
+
"overflow": null,
|
| 1419 |
+
"overflow_x": null,
|
| 1420 |
+
"overflow_y": null,
|
| 1421 |
+
"padding": null,
|
| 1422 |
+
"right": null,
|
| 1423 |
+
"top": null,
|
| 1424 |
+
"visibility": null,
|
| 1425 |
+
"width": null
|
| 1426 |
+
}
|
| 1427 |
+
},
|
| 1428 |
+
"d0d8b31e3c6b42318d804cc67a5aacea": {
|
| 1429 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1430 |
+
"model_module_version": "1.5.0",
|
| 1431 |
+
"model_name": "ProgressStyleModel",
|
| 1432 |
+
"state": {
|
| 1433 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1434 |
+
"_model_module_version": "1.5.0",
|
| 1435 |
+
"_model_name": "ProgressStyleModel",
|
| 1436 |
+
"_view_count": null,
|
| 1437 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1438 |
+
"_view_module_version": "1.2.0",
|
| 1439 |
+
"_view_name": "StyleView",
|
| 1440 |
+
"bar_color": null,
|
| 1441 |
+
"description_width": ""
|
| 1442 |
+
}
|
| 1443 |
+
},
|
| 1444 |
+
"f3ac895d29454e58bffbde53fc00a05e": {
|
| 1445 |
+
"model_module": "@jupyter-widgets/base",
|
| 1446 |
+
"model_module_version": "1.2.0",
|
| 1447 |
+
"model_name": "LayoutModel",
|
| 1448 |
+
"state": {
|
| 1449 |
+
"_model_module": "@jupyter-widgets/base",
|
| 1450 |
+
"_model_module_version": "1.2.0",
|
| 1451 |
+
"_model_name": "LayoutModel",
|
| 1452 |
+
"_view_count": null,
|
| 1453 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1454 |
+
"_view_module_version": "1.2.0",
|
| 1455 |
+
"_view_name": "LayoutView",
|
| 1456 |
+
"align_content": null,
|
| 1457 |
+
"align_items": null,
|
| 1458 |
+
"align_self": null,
|
| 1459 |
+
"border": null,
|
| 1460 |
+
"bottom": null,
|
| 1461 |
+
"display": null,
|
| 1462 |
+
"flex": null,
|
| 1463 |
+
"flex_flow": null,
|
| 1464 |
+
"grid_area": null,
|
| 1465 |
+
"grid_auto_columns": null,
|
| 1466 |
+
"grid_auto_flow": null,
|
| 1467 |
+
"grid_auto_rows": null,
|
| 1468 |
+
"grid_column": null,
|
| 1469 |
+
"grid_gap": null,
|
| 1470 |
+
"grid_row": null,
|
| 1471 |
+
"grid_template_areas": null,
|
| 1472 |
+
"grid_template_columns": null,
|
| 1473 |
+
"grid_template_rows": null,
|
| 1474 |
+
"height": null,
|
| 1475 |
+
"justify_content": null,
|
| 1476 |
+
"justify_items": null,
|
| 1477 |
+
"left": null,
|
| 1478 |
+
"margin": null,
|
| 1479 |
+
"max_height": null,
|
| 1480 |
+
"max_width": null,
|
| 1481 |
+
"min_height": null,
|
| 1482 |
+
"min_width": null,
|
| 1483 |
+
"object_fit": null,
|
| 1484 |
+
"object_position": null,
|
| 1485 |
+
"order": null,
|
| 1486 |
+
"overflow": null,
|
| 1487 |
+
"overflow_x": null,
|
| 1488 |
+
"overflow_y": null,
|
| 1489 |
+
"padding": null,
|
| 1490 |
+
"right": null,
|
| 1491 |
+
"top": null,
|
| 1492 |
+
"visibility": null,
|
| 1493 |
+
"width": null
|
| 1494 |
+
}
|
| 1495 |
+
},
|
| 1496 |
+
"f3b1dc11f2a44da3a5db8499c1401288": {
|
| 1497 |
+
"model_module": "@jupyter-widgets/controls",
|
| 1498 |
+
"model_module_version": "1.5.0",
|
| 1499 |
+
"model_name": "DescriptionStyleModel",
|
| 1500 |
+
"state": {
|
| 1501 |
+
"_model_module": "@jupyter-widgets/controls",
|
| 1502 |
+
"_model_module_version": "1.5.0",
|
| 1503 |
+
"_model_name": "DescriptionStyleModel",
|
| 1504 |
+
"_view_count": null,
|
| 1505 |
+
"_view_module": "@jupyter-widgets/base",
|
| 1506 |
+
"_view_module_version": "1.2.0",
|
| 1507 |
+
"_view_name": "StyleView",
|
| 1508 |
+
"description_width": ""
|
| 1509 |
+
}
|
| 1510 |
+
}
|
| 1511 |
+
}
|
| 1512 |
+
}
|
| 1513 |
+
},
|
| 1514 |
+
"nbformat": 4,
|
| 1515 |
+
"nbformat_minor": 4
|
| 1516 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/3_Loading Models by data type_helper.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import requests
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
import warnings
|
| 7 |
+
# Ignore specific UserWarnings related to max_length in transformers
|
| 8 |
+
warnings.filterwarnings("ignore",
|
| 9 |
+
message=".*Using the model-agnostic default `max_length`.*")
|
| 10 |
+
|
| 11 |
+
class DummyModel(nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
A dummy model that consists of an embedding layer
|
| 14 |
+
with two blocks of a linear layer followed by a layer
|
| 15 |
+
norm layer.
|
| 16 |
+
"""
|
| 17 |
+
def __init__(self):
|
| 18 |
+
super().__init__()
|
| 19 |
+
|
| 20 |
+
torch.manual_seed(123)
|
| 21 |
+
|
| 22 |
+
self.token_embedding = nn.Embedding(2, 2)
|
| 23 |
+
|
| 24 |
+
# Block 1
|
| 25 |
+
self.linear_1 = nn.Linear(2, 2)
|
| 26 |
+
self.layernorm_1 = nn.LayerNorm(2)
|
| 27 |
+
|
| 28 |
+
# Block 2
|
| 29 |
+
self.linear_2 = nn.Linear(2, 2)
|
| 30 |
+
self.layernorm_2 = nn.LayerNorm(2)
|
| 31 |
+
|
| 32 |
+
self.head = nn.Linear(2, 2)
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
hidden_states = self.token_embedding(x)
|
| 36 |
+
|
| 37 |
+
# Block 1
|
| 38 |
+
hidden_states = self.linear_1(hidden_states)
|
| 39 |
+
hidden_states = self.layernorm_1(hidden_states)
|
| 40 |
+
|
| 41 |
+
# Block 2
|
| 42 |
+
hidden_states = self.linear_2(hidden_states)
|
| 43 |
+
hidden_states = self.layernorm_2(hidden_states)
|
| 44 |
+
|
| 45 |
+
logits = self.head(hidden_states)
|
| 46 |
+
return logits
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_generation(model, processor, image, dtype):
|
| 50 |
+
inputs = processor(image, return_tensors="pt").to(dtype)
|
| 51 |
+
out = model.generate(**inputs)
|
| 52 |
+
return processor.decode(out[0], skip_special_tokens=True)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def load_image(img_url):
|
| 56 |
+
image = Image.open(requests.get(
|
| 57 |
+
img_url, stream=True).raw).convert('RGB')
|
| 58 |
+
|
| 59 |
+
return image
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
Hugging Face.Quantization Fundamentals/Materials/4_Quantization Theory_L4_quantization_theory.ipynb
ADDED
|
@@ -0,0 +1,371 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "W6tCv-A_bJVb"
|
| 7 |
+
},
|
| 8 |
+
"source": [
|
| 9 |
+
"# Lesson 4: Quantization Theory\n",
|
| 10 |
+
"\n",
|
| 11 |
+
"In this lab, you will perform Linear Quantization.\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"#### Libraries to install\n",
|
| 14 |
+
"- If you are running this notebook on your local machine, you can install the following:\n",
|
| 15 |
+
"\n",
|
| 16 |
+
"```Python\n",
|
| 17 |
+
"!pip install transformers==4.35.0\n",
|
| 18 |
+
"!pip install quanto==0.0.11\n",
|
| 19 |
+
"!pip install torch==2.1.1\n",
|
| 20 |
+
"```"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "markdown",
|
| 25 |
+
"metadata": {},
|
| 26 |
+
"source": [
|
| 27 |
+
"## T5-FLAN\n",
|
| 28 |
+
"- Please note that due to hardware memory constraints, and in order to offer this course for free to everyone, the code you'll run here is for the T5-FLAN model instead of the EleutherAI AI Pythia model. \n",
|
| 29 |
+
"- Thank you for your understanding! 🤗\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"For the T5-FLAN model, here is one more library to install if you are running locally:\n",
|
| 32 |
+
"```Python\n",
|
| 33 |
+
"!pip install sentencepiece==0.2.0\n",
|
| 34 |
+
"```\n"
|
| 35 |
+
]
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"cell_type": "markdown",
|
| 39 |
+
"metadata": {},
|
| 40 |
+
"source": [
|
| 41 |
+
"### Without Quantization"
|
| 42 |
+
]
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"cell_type": "code",
|
| 46 |
+
"execution_count": null,
|
| 47 |
+
"metadata": {},
|
| 48 |
+
"outputs": [],
|
| 49 |
+
"source": [
|
| 50 |
+
"model_name = \"google/flan-t5-small\""
|
| 51 |
+
]
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"cell_type": "code",
|
| 55 |
+
"execution_count": null,
|
| 56 |
+
"metadata": {},
|
| 57 |
+
"outputs": [],
|
| 58 |
+
"source": [
|
| 59 |
+
"import sentencepiece as spm\n",
|
| 60 |
+
"from transformers import T5Tokenizer, T5ForConditionalGeneration\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"tokenizer = T5Tokenizer.from_pretrained(\"google/flan-t5-small\")"
|
| 63 |
+
]
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"cell_type": "code",
|
| 67 |
+
"execution_count": null,
|
| 68 |
+
"metadata": {},
|
| 69 |
+
"outputs": [],
|
| 70 |
+
"source": [
|
| 71 |
+
"model = T5ForConditionalGeneration.from_pretrained(\"google/flan-t5-small\")"
|
| 72 |
+
]
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"cell_type": "code",
|
| 76 |
+
"execution_count": null,
|
| 77 |
+
"metadata": {},
|
| 78 |
+
"outputs": [],
|
| 79 |
+
"source": [
|
| 80 |
+
"input_text = \"Hello, my name is \"\n",
|
| 81 |
+
"input_ids = tokenizer(input_text, return_tensors=\"pt\").input_ids\n",
|
| 82 |
+
"\n",
|
| 83 |
+
"outputs = model.generate(input_ids)\n",
|
| 84 |
+
"print(tokenizer.decode(outputs[0]))"
|
| 85 |
+
]
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"cell_type": "code",
|
| 89 |
+
"execution_count": null,
|
| 90 |
+
"metadata": {},
|
| 91 |
+
"outputs": [],
|
| 92 |
+
"source": [
|
| 93 |
+
"from helper import compute_module_sizes\n",
|
| 94 |
+
"module_sizes = compute_module_sizes(model)\n",
|
| 95 |
+
"print(f\"The model size is {module_sizes[''] * 1e-9} GB\")"
|
| 96 |
+
]
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"cell_type": "markdown",
|
| 100 |
+
"metadata": {},
|
| 101 |
+
"source": [
|
| 102 |
+
"## Quantize the model (8-bit precision)"
|
| 103 |
+
]
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"cell_type": "code",
|
| 107 |
+
"execution_count": null,
|
| 108 |
+
"metadata": {},
|
| 109 |
+
"outputs": [],
|
| 110 |
+
"source": [
|
| 111 |
+
"from quanto import quantize, freeze\n",
|
| 112 |
+
"import torch"
|
| 113 |
+
]
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"cell_type": "code",
|
| 117 |
+
"execution_count": null,
|
| 118 |
+
"metadata": {},
|
| 119 |
+
"outputs": [],
|
| 120 |
+
"source": [
|
| 121 |
+
"quantize(model, weights=torch.int8, activations=None)"
|
| 122 |
+
]
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"cell_type": "code",
|
| 126 |
+
"execution_count": null,
|
| 127 |
+
"metadata": {},
|
| 128 |
+
"outputs": [],
|
| 129 |
+
"source": [
|
| 130 |
+
"print(model)"
|
| 131 |
+
]
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"cell_type": "markdown",
|
| 135 |
+
"metadata": {},
|
| 136 |
+
"source": [
|
| 137 |
+
"### Freeze the model\n",
|
| 138 |
+
"- This step takes a bit of memory, and so for the Pythia model that is shown in the lecture video, it will not run in the classroom.\n",
|
| 139 |
+
"- This will work fine with the smaller T5-Flan model."
|
| 140 |
+
]
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"cell_type": "code",
|
| 144 |
+
"execution_count": null,
|
| 145 |
+
"metadata": {},
|
| 146 |
+
"outputs": [],
|
| 147 |
+
"source": [
|
| 148 |
+
"freeze(model)"
|
| 149 |
+
]
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"cell_type": "code",
|
| 153 |
+
"execution_count": null,
|
| 154 |
+
"metadata": {},
|
| 155 |
+
"outputs": [],
|
| 156 |
+
"source": [
|
| 157 |
+
"module_sizes = compute_module_sizes(model)\n",
|
| 158 |
+
"print(f\"The model size is {module_sizes[''] * 1e-9} GB\")"
|
| 159 |
+
]
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"cell_type": "markdown",
|
| 163 |
+
"metadata": {},
|
| 164 |
+
"source": [
|
| 165 |
+
"### Try running inference on the quantized model"
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"cell_type": "code",
|
| 170 |
+
"execution_count": null,
|
| 171 |
+
"metadata": {},
|
| 172 |
+
"outputs": [],
|
| 173 |
+
"source": [
|
| 174 |
+
"input_text = \"Hello, my name is \"\n",
|
| 175 |
+
"input_ids = tokenizer(input_text, return_tensors=\"pt\").input_ids\n",
|
| 176 |
+
"\n",
|
| 177 |
+
"outputs = model.generate(input_ids)\n",
|
| 178 |
+
"print(tokenizer.decode(outputs[0]))"
|
| 179 |
+
]
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"cell_type": "markdown",
|
| 183 |
+
"metadata": {
|
| 184 |
+
"id": "qqmyZYGbbO8A"
|
| 185 |
+
},
|
| 186 |
+
"source": [
|
| 187 |
+
"## Note: Quantizing the model used in the lecture video will not work due to classroom hardware limitations.\n",
|
| 188 |
+
"- Here is the code that Marc, the instructor is walking through. \n",
|
| 189 |
+
"- It will likely run on your local computer if you have 8GB of memory, which is usually the minimum for personal computers.\n",
|
| 190 |
+
" - To run locally, you can download the notebook and the helper.py file by clicking on the \"Jupyter icon\" at the top of the notebook and navigating the file directory of this classroom. Also download the requirements.txt to install all the required libraries.\n",
|
| 191 |
+
"\n",
|
| 192 |
+
"### Without Quantization\n",
|
| 193 |
+
"\n"
|
| 194 |
+
]
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"cell_type": "markdown",
|
| 198 |
+
"metadata": {},
|
| 199 |
+
"source": [
|
| 200 |
+
"- Load [EleutherAI/pythia-410m](https://huggingface.co/EleutherAI/pythia-410m) model and tokenizer.\n",
|
| 201 |
+
"\n",
|
| 202 |
+
"```Python\n",
|
| 203 |
+
"from transformers import AutoModelForCausalLM\n",
|
| 204 |
+
"model_name = \"EleutherAI/pythia-410m\"\n",
|
| 205 |
+
"\n",
|
| 206 |
+
"model = AutoModelForCausalLM.from_pretrained(model_name,\n",
|
| 207 |
+
" low_cpu_mem_usage=True)\n",
|
| 208 |
+
"print(model.gpt_neox)\n",
|
| 209 |
+
"\n",
|
| 210 |
+
"\n",
|
| 211 |
+
"from transformers import AutoTokenizer\n",
|
| 212 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)"
|
| 213 |
+
]
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"cell_type": "markdown",
|
| 217 |
+
"metadata": {},
|
| 218 |
+
"source": [
|
| 219 |
+
"- Write a start of a (`text`) sentence which you'd like the model to complete.\n",
|
| 220 |
+
"```Python\n",
|
| 221 |
+
"text = \"Hello my name is\"\n",
|
| 222 |
+
"inputs = tokenizer(text, return_tensors=\"pt\")\n",
|
| 223 |
+
"outputs = model.generate(**inputs, max_new_tokens=10)\n",
|
| 224 |
+
"outputs\n",
|
| 225 |
+
"print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n",
|
| 226 |
+
"```"
|
| 227 |
+
]
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"cell_type": "markdown",
|
| 231 |
+
"metadata": {},
|
| 232 |
+
"source": [
|
| 233 |
+
"- Compute the model's size using the helper function, `compute_module_sizes`.\n",
|
| 234 |
+
"```Python\n",
|
| 235 |
+
"from helper import compute_module_sizes\n",
|
| 236 |
+
"module_sizes = compute_module_sizes(model)\n",
|
| 237 |
+
"print(f\"The model size is {module_sizes[''] * 1e-9} GB\")\n",
|
| 238 |
+
"print(model.gpt_neox.layers[0].attention.dense.weight)\n",
|
| 239 |
+
"```\n",
|
| 240 |
+
"**Note:** The weights are in `fp32`."
|
| 241 |
+
]
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"cell_type": "markdown",
|
| 245 |
+
"metadata": {},
|
| 246 |
+
"source": []
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"cell_type": "markdown",
|
| 250 |
+
"metadata": {
|
| 251 |
+
"id": "2BC809CYugOp"
|
| 252 |
+
},
|
| 253 |
+
"source": [
|
| 254 |
+
"### 8-bit Quantization"
|
| 255 |
+
]
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"cell_type": "markdown",
|
| 259 |
+
"metadata": {},
|
| 260 |
+
"source": [
|
| 261 |
+
"```Python\n",
|
| 262 |
+
"from quanto import quantize, freeze\n",
|
| 263 |
+
"import torch\n",
|
| 264 |
+
"\n",
|
| 265 |
+
"quantize(model, weights=torch.int8, activations=None)\n",
|
| 266 |
+
"# after performing quantization\n",
|
| 267 |
+
"print(model.gpt_neox)\n",
|
| 268 |
+
"print(model.gpt_neox.layers[0].attention.dense.weight)\n",
|
| 269 |
+
"```"
|
| 270 |
+
]
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"cell_type": "markdown",
|
| 274 |
+
"metadata": {},
|
| 275 |
+
"source": [
|
| 276 |
+
"- The \"freeze\" function requires more memory than is available in this classroom.\n",
|
| 277 |
+
"- This code will run on a machine that has 8GB of memory, and so it will likely work if you run this code on your local machine.\n",
|
| 278 |
+
"\n",
|
| 279 |
+
"```Python\n",
|
| 280 |
+
"# freeze the model\n",
|
| 281 |
+
"freeze(model)\n",
|
| 282 |
+
"print(model.gpt_neox.layers[0].attention.dense.weight)\n",
|
| 283 |
+
"\n",
|
| 284 |
+
"# get model size after quantization\n",
|
| 285 |
+
"module_sizes = compute_module_sizes(model)\n",
|
| 286 |
+
"print(f\"The model size is {module_sizes[''] * 1e-9} GB\")\n",
|
| 287 |
+
"\n",
|
| 288 |
+
"# run inference after quantizing the model\n",
|
| 289 |
+
"outputs = model.generate(**inputs, max_new_tokens=10)\n",
|
| 290 |
+
"print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n",
|
| 291 |
+
"```"
|
| 292 |
+
]
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"cell_type": "markdown",
|
| 296 |
+
"metadata": {},
|
| 297 |
+
"source": [
|
| 298 |
+
"#### Comparing \"linear quantization\" to \"downcasting\"\n",
|
| 299 |
+
"\n",
|
| 300 |
+
"To recap the difference between the \"linear quantization\" method in this lesson with the \"downcasting\" method in the previous lesson:\n",
|
| 301 |
+
"\n",
|
| 302 |
+
"- When downcasting a model, you convert the model's parameters to a more compact data type (bfloat16). During inference, the model performs its calculations in this data type, and its activations are in this data type. Downcasting may work with the bfloat16 data type, but the model performance will likely degrade with any smaller data type, and won't work if you convert to an integer data type (like the int8 in this lesson).\n",
|
| 303 |
+
"\n",
|
| 304 |
+
"\n",
|
| 305 |
+
"- In this lesson, you used another quantization method, \"linear quantization\", which enables the quantized model to maintain performance much closer to the original model by converting from the compressed data type back to the original FP32 data type during inference. So when the model makes a prediction, it is performing the matrix multiplications in FP32, and the activations are in FP32. This enables you to quantize the model in data types smaller than bfloat16, such as int8, in this example."
|
| 306 |
+
]
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"cell_type": "markdown",
|
| 310 |
+
"metadata": {},
|
| 311 |
+
"source": [
|
| 312 |
+
"#### This is just the beginning...\n",
|
| 313 |
+
"- This course is intended to be a beginner-friendly introduction to the field of quantization. 🐣\n",
|
| 314 |
+
"- If you'd like to learn more about quantization, please stay tuned for another Hugging Face short course that goes into more depth on this topic (launching in a few weeks!) 🤗"
|
| 315 |
+
]
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"cell_type": "markdown",
|
| 319 |
+
"metadata": {},
|
| 320 |
+
"source": [
|
| 321 |
+
"## Did you like this course?\n",
|
| 322 |
+
"\n",
|
| 323 |
+
"- If you liked this course, could you consider giving a rating and share what you liked? 💕\n",
|
| 324 |
+
"- If you did not like this course, could you also please share what you think could have made it better? 🙏\n",
|
| 325 |
+
"\n",
|
| 326 |
+
"#### A note about the \"Course Review\" page.\n",
|
| 327 |
+
"The rating options are from 0 to 10.\n",
|
| 328 |
+
"- A score of 9 or 10 means you like the course.🤗\n",
|
| 329 |
+
"- A score of 7 or 8 means you feel neutral about the course (neither like nor dislike).🙄\n",
|
| 330 |
+
"- A score of 0,1,2,3,4,5 or 6 all mean that you do not like the course. 😭\n",
|
| 331 |
+
" - Whether you give a 0 or a 6, these are all defined as \"detractors\" according to the standard measurement called \"Net Promoter Score\". 🧐"
|
| 332 |
+
]
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"cell_type": "code",
|
| 336 |
+
"execution_count": null,
|
| 337 |
+
"metadata": {},
|
| 338 |
+
"outputs": [],
|
| 339 |
+
"source": []
|
| 340 |
+
}
|
| 341 |
+
],
|
| 342 |
+
"metadata": {
|
| 343 |
+
"colab": {
|
| 344 |
+
"authorship_tag": "ABX9TyOpTGuZzDRr5A8ocSoYIFkP",
|
| 345 |
+
"collapsed_sections": [
|
| 346 |
+
"4V_amrl-xG9D",
|
| 347 |
+
"dODA6rR0z297"
|
| 348 |
+
],
|
| 349 |
+
"provenance": []
|
| 350 |
+
},
|
| 351 |
+
"kernelspec": {
|
| 352 |
+
"display_name": "Python 3 (ipykernel)",
|
| 353 |
+
"language": "python",
|
| 354 |
+
"name": "python3"
|
| 355 |
+
},
|
| 356 |
+
"language_info": {
|
| 357 |
+
"codemirror_mode": {
|
| 358 |
+
"name": "ipython",
|
| 359 |
+
"version": 3
|
| 360 |
+
},
|
| 361 |
+
"file_extension": ".py",
|
| 362 |
+
"mimetype": "text/x-python",
|
| 363 |
+
"name": "python",
|
| 364 |
+
"nbconvert_exporter": "python",
|
| 365 |
+
"pygments_lexer": "ipython3",
|
| 366 |
+
"version": "3.10.9"
|
| 367 |
+
}
|
| 368 |
+
},
|
| 369 |
+
"nbformat": 4,
|
| 370 |
+
"nbformat_minor": 4
|
| 371 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/4_Quantization Theory_helper.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
# ################ monkey patch for quanto
|
| 4 |
+
def named_module_tensors(module, recurse=False):
|
| 5 |
+
for named_parameter in module.named_parameters(recurse=recurse):
|
| 6 |
+
name, val = named_parameter
|
| 7 |
+
flag = True
|
| 8 |
+
if hasattr(val,"_data") or hasattr(val,"_scale"):
|
| 9 |
+
if hasattr(val,"_data"):
|
| 10 |
+
yield name + "._data", val._data
|
| 11 |
+
if hasattr(val,"_scale"):
|
| 12 |
+
yield name + "._scale", val._scale
|
| 13 |
+
else:
|
| 14 |
+
yield named_parameter
|
| 15 |
+
|
| 16 |
+
for named_buffer in module.named_buffers(recurse=recurse):
|
| 17 |
+
yield named_buffer
|
| 18 |
+
|
| 19 |
+
def dtype_byte_size(dtype):
|
| 20 |
+
"""
|
| 21 |
+
Returns the size (in bytes) occupied by one parameter of type `dtype`.
|
| 22 |
+
"""
|
| 23 |
+
import re
|
| 24 |
+
if dtype == torch.bool:
|
| 25 |
+
return 1 / 8
|
| 26 |
+
bit_search = re.search(r"[^\d](\d+)$", str(dtype))
|
| 27 |
+
if bit_search is None:
|
| 28 |
+
raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
|
| 29 |
+
bit_size = int(bit_search.groups()[0])
|
| 30 |
+
return bit_size // 8
|
| 31 |
+
|
| 32 |
+
def compute_module_sizes(model):
|
| 33 |
+
"""
|
| 34 |
+
Compute the size of each submodule of a given model.
|
| 35 |
+
"""
|
| 36 |
+
from collections import defaultdict
|
| 37 |
+
module_sizes = defaultdict(int)
|
| 38 |
+
for name, tensor in named_module_tensors(model, recurse=True):
|
| 39 |
+
size = tensor.numel() * dtype_byte_size(tensor.dtype)
|
| 40 |
+
name_parts = name.split(".")
|
| 41 |
+
for idx in range(len(name_parts) + 1):
|
| 42 |
+
module_sizes[".".join(name_parts[:idx])] += size
|
| 43 |
+
|
| 44 |
+
return module_sizes
|
Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/README.md
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
tags:
|
| 4 |
+
- vision
|
| 5 |
+
- depth-estimation
|
| 6 |
+
widget:
|
| 7 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
|
| 8 |
+
example_title: Tiger
|
| 9 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
|
| 10 |
+
example_title: Teapot
|
| 11 |
+
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
|
| 12 |
+
example_title: Palace
|
| 13 |
+
model-index:
|
| 14 |
+
- name: dpt-hybrid-midas
|
| 15 |
+
results:
|
| 16 |
+
- task:
|
| 17 |
+
type: monocular-depth-estimation
|
| 18 |
+
name: Monocular Depth Estimation
|
| 19 |
+
dataset:
|
| 20 |
+
type: MIX-6
|
| 21 |
+
name: MIX-6
|
| 22 |
+
metrics:
|
| 23 |
+
- type: Zero-shot transfer
|
| 24 |
+
value: 11.06
|
| 25 |
+
name: Zero-shot transfer
|
| 26 |
+
config: Zero-shot transfer
|
| 27 |
+
verified: false
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## Model Details: DPT-Hybrid (also known as MiDaS 3.0)
|
| 32 |
+
|
| 33 |
+
Dense Prediction Transformer (DPT) model trained on 1.4 million images for monocular depth estimation.
|
| 34 |
+
It was introduced in the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by Ranftl et al. (2021) and first released in [this repository](https://github.com/isl-org/DPT).
|
| 35 |
+
DPT uses the Vision Transformer (ViT) as backbone and adds a neck + head on top for monocular depth estimation.
|
| 36 |
+

|
| 37 |
+
|
| 38 |
+
This repository hosts the "hybrid" version of the model as stated in the paper. DPT-Hybrid diverges from DPT by using [ViT-hybrid](https://huggingface.co/google/vit-hybrid-base-bit-384) as a backbone and taking some activations from the backbone.
|
| 39 |
+
|
| 40 |
+
The model card has been written in combination by the Hugging Face team and Intel.
|
| 41 |
+
|
| 42 |
+
| Model Detail | Description |
|
| 43 |
+
| ----------- | ----------- |
|
| 44 |
+
| Model Authors - Company | Intel |
|
| 45 |
+
| Date | December 22, 2022 |
|
| 46 |
+
| Version | 1 |
|
| 47 |
+
| Type | Computer Vision - Monocular Depth Estimation |
|
| 48 |
+
| Paper or Other Resources | [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) and [GitHub Repo](https://github.com/isl-org/DPT) |
|
| 49 |
+
| License | Apache 2.0 |
|
| 50 |
+
| Questions or Comments | [Community Tab](https://huggingface.co/Intel/dpt-hybrid-midas/discussions) and [Intel Developers Discord](https://discord.gg/rv2Gp55UJQ)|
|
| 51 |
+
|
| 52 |
+
| Intended Use | Description |
|
| 53 |
+
| ----------- | ----------- |
|
| 54 |
+
| Primary intended uses | You can use the raw model for zero-shot monocular depth estimation. See the [model hub](https://huggingface.co/models?search=dpt) to look for fine-tuned versions on a task that interests you. |
|
| 55 |
+
| Primary intended users | Anyone doing monocular depth estimation |
|
| 56 |
+
| Out-of-scope uses | This model in most cases will need to be fine-tuned for your particular task. The model should not be used to intentionally create hostile or alienating environments for people.|
|
| 57 |
+
|
| 58 |
+
### How to use
|
| 59 |
+
|
| 60 |
+
Here is how to use this model for zero-shot depth estimation on an image:
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
from PIL import Image
|
| 64 |
+
import numpy as np
|
| 65 |
+
import requests
|
| 66 |
+
import torch
|
| 67 |
+
|
| 68 |
+
from transformers import DPTImageProcessor, DPTForDepthEstimation
|
| 69 |
+
|
| 70 |
+
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
|
| 71 |
+
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas", low_cpu_mem_usage=True)
|
| 72 |
+
|
| 73 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 74 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 75 |
+
|
| 76 |
+
# prepare image for the model
|
| 77 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
| 78 |
+
|
| 79 |
+
with torch.no_grad():
|
| 80 |
+
outputs = model(**inputs)
|
| 81 |
+
predicted_depth = outputs.predicted_depth
|
| 82 |
+
|
| 83 |
+
# interpolate to original size
|
| 84 |
+
prediction = torch.nn.functional.interpolate(
|
| 85 |
+
predicted_depth.unsqueeze(1),
|
| 86 |
+
size=image.size[::-1],
|
| 87 |
+
mode="bicubic",
|
| 88 |
+
align_corners=False,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
# visualize the prediction
|
| 92 |
+
output = prediction.squeeze().cpu().numpy()
|
| 93 |
+
formatted = (output * 255 / np.max(output)).astype("uint8")
|
| 94 |
+
depth = Image.fromarray(formatted)
|
| 95 |
+
depth.show()
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/dpt).
|
| 99 |
+
|
| 100 |
+
| Factors | Description |
|
| 101 |
+
| ----------- | ----------- |
|
| 102 |
+
| Groups | Multiple datasets compiled together |
|
| 103 |
+
| Instrumentation | - |
|
| 104 |
+
| Environment | Inference completed on Intel Xeon Platinum 8280 CPU @ 2.70GHz with 8 physical cores and an NVIDIA RTX 2080 GPU. |
|
| 105 |
+
| Card Prompts | Model deployment on alternate hardware and software will change model performance |
|
| 106 |
+
|
| 107 |
+
| Metrics | Description |
|
| 108 |
+
| ----------- | ----------- |
|
| 109 |
+
| Model performance measures | Zero-shot Transfer |
|
| 110 |
+
| Decision thresholds | - |
|
| 111 |
+
| Approaches to uncertainty and variability | - |
|
| 112 |
+
|
| 113 |
+
| Training and Evaluation Data | Description |
|
| 114 |
+
| ----------- | ----------- |
|
| 115 |
+
| Datasets | The dataset is called MIX 6, and contains around 1.4M images. The model was initialized with ImageNet-pretrained weights.|
|
| 116 |
+
| Motivation | To build a robust monocular depth prediction network |
|
| 117 |
+
| Preprocessing | "We resize the image such that the longer side is 384 pixels and train on random square crops of size 384. ... We perform random horizontal flips for data augmentation." See [Ranftl et al. (2021)](https://arxiv.org/abs/2103.13413) for more details. |
|
| 118 |
+
|
| 119 |
+
## Quantitative Analyses
|
| 120 |
+
| Model | Training set | DIW WHDR | ETH3D AbsRel | Sintel AbsRel | KITTI δ>1.25 | NYU δ>1.25 | TUM δ>1.25 |
|
| 121 |
+
| --- | --- | --- | --- | --- | --- | --- | --- |
|
| 122 |
+
| DPT - Large | MIX 6 | 10.82 (-13.2%) | 0.089 (-31.2%) | 0.270 (-17.5%) | 8.46 (-64.6%) | 8.32 (-12.9%) | 9.97 (-30.3%) |
|
| 123 |
+
| DPT - Hybrid | MIX 6 | 11.06 (-11.2%) | 0.093 (-27.6%) | 0.274 (-16.2%) | 11.56 (-51.6%) | 8.69 (-9.0%) | 10.89 (-23.2%) |
|
| 124 |
+
| MiDaS | MIX 6 | 12.95 (+3.9%) | 0.116 (-10.5%) | 0.329 (+0.5%) | 16.08 (-32.7%) | 8.71 (-8.8%) | 12.51 (-12.5%)
|
| 125 |
+
| MiDaS [30] | MIX 5 | 12.46 | 0.129 | 0.327 | 23.90 | 9.55 | 14.29 |
|
| 126 |
+
| Li [22] | MD [22] | 23.15 | 0.181 | 0.385 | 36.29 | 27.52 | 29.54 |
|
| 127 |
+
| Li [21] | MC [21] | 26.52 | 0.183 | 0.405 | 47.94 | 18.57 | 17.71 |
|
| 128 |
+
| Wang [40] | WS [40] | 19.09 | 0.205 | 0.390 | 31.92 | 29.57 | 20.18 |
|
| 129 |
+
| Xian [45] | RW [45] | 14.59 | 0.186 | 0.422 | 34.08 | 27.00 | 25.02 |
|
| 130 |
+
| Casser [5] | CS [8] | 32.80 | 0.235 | 0.422 | 21.15 | 39.58 | 37.18 |
|
| 131 |
+
|
| 132 |
+
Table 1. Comparison to the state of the art on monocular depth estimation. We evaluate zero-shot cross-dataset transfer according to the
|
| 133 |
+
protocol defined in [30]. Relative performance is computed with respect to the original MiDaS model [30]. Lower is better for all metrics. ([Ranftl et al., 2021](https://arxiv.org/abs/2103.13413))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
| Ethical Considerations | Description |
|
| 137 |
+
| ----------- | ----------- |
|
| 138 |
+
| Data | The training data come from multiple image datasets compiled together. |
|
| 139 |
+
| Human life | The model is not intended to inform decisions central to human life or flourishing. It is an aggregated set of monocular depth image datasets. |
|
| 140 |
+
| Mitigations | No additional risk mitigation strategies were considered during model development. |
|
| 141 |
+
| Risks and harms | The extent of the risks involved by using the model remain unknown. |
|
| 142 |
+
| Use cases | - |
|
| 143 |
+
|
| 144 |
+
| Caveats and Recommendations |
|
| 145 |
+
| ----------- |
|
| 146 |
+
| Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. There are no additional caveats or recommendations for this model. |
|
| 147 |
+
|
| 148 |
+
### BibTeX entry and citation info
|
| 149 |
+
|
| 150 |
+
```bibtex
|
| 151 |
+
@article{DBLP:journals/corr/abs-2103-13413,
|
| 152 |
+
author = {Ren{\'{e}} Ranftl and
|
| 153 |
+
Alexey Bochkovskiy and
|
| 154 |
+
Vladlen Koltun},
|
| 155 |
+
title = {Vision Transformers for Dense Prediction},
|
| 156 |
+
journal = {CoRR},
|
| 157 |
+
volume = {abs/2103.13413},
|
| 158 |
+
year = {2021},
|
| 159 |
+
url = {https://arxiv.org/abs/2103.13413},
|
| 160 |
+
eprinttype = {arXiv},
|
| 161 |
+
eprint = {2103.13413},
|
| 162 |
+
timestamp = {Wed, 07 Apr 2021 15:31:46 +0200},
|
| 163 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-2103-13413.bib},
|
| 164 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 165 |
+
}
|
| 166 |
+
```
|
Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/config.json
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_commit_hash": null,
|
| 3 |
+
"architectures": [
|
| 4 |
+
"DPTForDepthEstimation"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.0,
|
| 7 |
+
"auxiliary_loss_weight": 0.4,
|
| 8 |
+
"backbone_config": {
|
| 9 |
+
"_name_or_path": "",
|
| 10 |
+
"add_cross_attention": false,
|
| 11 |
+
"architectures": null,
|
| 12 |
+
"bad_words_ids": null,
|
| 13 |
+
"begin_suppress_tokens": null,
|
| 14 |
+
"bos_token_id": null,
|
| 15 |
+
"chunk_size_feed_forward": 0,
|
| 16 |
+
"cross_attention_hidden_size": null,
|
| 17 |
+
"decoder_start_token_id": null,
|
| 18 |
+
"depths": [
|
| 19 |
+
3,
|
| 20 |
+
4,
|
| 21 |
+
9
|
| 22 |
+
],
|
| 23 |
+
"diversity_penalty": 0.0,
|
| 24 |
+
"do_sample": false,
|
| 25 |
+
"drop_path_rate": 0.0,
|
| 26 |
+
"early_stopping": false,
|
| 27 |
+
"embedding_dynamic_padding": true,
|
| 28 |
+
"embedding_size": 64,
|
| 29 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 30 |
+
"eos_token_id": null,
|
| 31 |
+
"exponential_decay_length_penalty": null,
|
| 32 |
+
"finetuning_task": null,
|
| 33 |
+
"forced_bos_token_id": null,
|
| 34 |
+
"forced_eos_token_id": null,
|
| 35 |
+
"global_padding": "SAME",
|
| 36 |
+
"hidden_act": "relu",
|
| 37 |
+
"hidden_sizes": [
|
| 38 |
+
256,
|
| 39 |
+
512,
|
| 40 |
+
1024,
|
| 41 |
+
2048
|
| 42 |
+
],
|
| 43 |
+
"id2label": {
|
| 44 |
+
"0": "LABEL_0",
|
| 45 |
+
"1": "LABEL_1"
|
| 46 |
+
},
|
| 47 |
+
"is_decoder": false,
|
| 48 |
+
"is_encoder_decoder": false,
|
| 49 |
+
"label2id": {
|
| 50 |
+
"LABEL_0": 0,
|
| 51 |
+
"LABEL_1": 1
|
| 52 |
+
},
|
| 53 |
+
"layer_type": "bottleneck",
|
| 54 |
+
"length_penalty": 1.0,
|
| 55 |
+
"max_length": 20,
|
| 56 |
+
"min_length": 0,
|
| 57 |
+
"model_type": "bit",
|
| 58 |
+
"no_repeat_ngram_size": 0,
|
| 59 |
+
"num_beam_groups": 1,
|
| 60 |
+
"num_beams": 1,
|
| 61 |
+
"num_channels": 3,
|
| 62 |
+
"num_groups": 32,
|
| 63 |
+
"num_return_sequences": 1,
|
| 64 |
+
"out_features": [
|
| 65 |
+
"stage1",
|
| 66 |
+
"stage2",
|
| 67 |
+
"stage3"
|
| 68 |
+
],
|
| 69 |
+
"output_attentions": false,
|
| 70 |
+
"output_hidden_states": false,
|
| 71 |
+
"output_scores": false,
|
| 72 |
+
"output_stride": 32,
|
| 73 |
+
"pad_token_id": null,
|
| 74 |
+
"prefix": null,
|
| 75 |
+
"problem_type": null,
|
| 76 |
+
"pruned_heads": {},
|
| 77 |
+
"remove_invalid_values": false,
|
| 78 |
+
"repetition_penalty": 1.0,
|
| 79 |
+
"return_dict": true,
|
| 80 |
+
"return_dict_in_generate": false,
|
| 81 |
+
"sep_token_id": null,
|
| 82 |
+
"stage_names": [
|
| 83 |
+
"stem",
|
| 84 |
+
"stage1",
|
| 85 |
+
"stage2",
|
| 86 |
+
"stage3"
|
| 87 |
+
],
|
| 88 |
+
"suppress_tokens": null,
|
| 89 |
+
"task_specific_params": null,
|
| 90 |
+
"temperature": 1.0,
|
| 91 |
+
"tf_legacy_loss": false,
|
| 92 |
+
"tie_encoder_decoder": false,
|
| 93 |
+
"tie_word_embeddings": true,
|
| 94 |
+
"tokenizer_class": null,
|
| 95 |
+
"top_k": 50,
|
| 96 |
+
"top_p": 1.0,
|
| 97 |
+
"torch_dtype": null,
|
| 98 |
+
"torchscript": false,
|
| 99 |
+
"transformers_version": "4.26.0.dev0",
|
| 100 |
+
"typical_p": 1.0,
|
| 101 |
+
"use_bfloat16": false,
|
| 102 |
+
"width_factor": 1
|
| 103 |
+
},
|
| 104 |
+
"backbone_featmap_shape": [
|
| 105 |
+
1,
|
| 106 |
+
1024,
|
| 107 |
+
24,
|
| 108 |
+
24
|
| 109 |
+
],
|
| 110 |
+
"backbone_out_indices": [
|
| 111 |
+
2,
|
| 112 |
+
5,
|
| 113 |
+
8,
|
| 114 |
+
11
|
| 115 |
+
],
|
| 116 |
+
"fusion_hidden_size": 256,
|
| 117 |
+
"head_in_index": -1,
|
| 118 |
+
"hidden_act": "gelu",
|
| 119 |
+
"hidden_dropout_prob": 0.0,
|
| 120 |
+
"hidden_size": 768,
|
| 121 |
+
"id2label": {
|
| 122 |
+
"0": "LABEL_0",
|
| 123 |
+
"1": "LABEL_1",
|
| 124 |
+
"2": "LABEL_2",
|
| 125 |
+
"3": "LABEL_3",
|
| 126 |
+
"4": "LABEL_4",
|
| 127 |
+
"5": "LABEL_5",
|
| 128 |
+
"6": "LABEL_6",
|
| 129 |
+
"7": "LABEL_7",
|
| 130 |
+
"8": "LABEL_8",
|
| 131 |
+
"9": "LABEL_9",
|
| 132 |
+
"10": "LABEL_10",
|
| 133 |
+
"11": "LABEL_11",
|
| 134 |
+
"12": "LABEL_12",
|
| 135 |
+
"13": "LABEL_13",
|
| 136 |
+
"14": "LABEL_14",
|
| 137 |
+
"15": "LABEL_15",
|
| 138 |
+
"16": "LABEL_16",
|
| 139 |
+
"17": "LABEL_17",
|
| 140 |
+
"18": "LABEL_18",
|
| 141 |
+
"19": "LABEL_19",
|
| 142 |
+
"20": "LABEL_20",
|
| 143 |
+
"21": "LABEL_21",
|
| 144 |
+
"22": "LABEL_22",
|
| 145 |
+
"23": "LABEL_23",
|
| 146 |
+
"24": "LABEL_24",
|
| 147 |
+
"25": "LABEL_25",
|
| 148 |
+
"26": "LABEL_26",
|
| 149 |
+
"27": "LABEL_27",
|
| 150 |
+
"28": "LABEL_28",
|
| 151 |
+
"29": "LABEL_29",
|
| 152 |
+
"30": "LABEL_30",
|
| 153 |
+
"31": "LABEL_31",
|
| 154 |
+
"32": "LABEL_32",
|
| 155 |
+
"33": "LABEL_33",
|
| 156 |
+
"34": "LABEL_34",
|
| 157 |
+
"35": "LABEL_35",
|
| 158 |
+
"36": "LABEL_36",
|
| 159 |
+
"37": "LABEL_37",
|
| 160 |
+
"38": "LABEL_38",
|
| 161 |
+
"39": "LABEL_39",
|
| 162 |
+
"40": "LABEL_40",
|
| 163 |
+
"41": "LABEL_41",
|
| 164 |
+
"42": "LABEL_42",
|
| 165 |
+
"43": "LABEL_43",
|
| 166 |
+
"44": "LABEL_44",
|
| 167 |
+
"45": "LABEL_45",
|
| 168 |
+
"46": "LABEL_46",
|
| 169 |
+
"47": "LABEL_47",
|
| 170 |
+
"48": "LABEL_48",
|
| 171 |
+
"49": "LABEL_49",
|
| 172 |
+
"50": "LABEL_50",
|
| 173 |
+
"51": "LABEL_51",
|
| 174 |
+
"52": "LABEL_52",
|
| 175 |
+
"53": "LABEL_53",
|
| 176 |
+
"54": "LABEL_54",
|
| 177 |
+
"55": "LABEL_55",
|
| 178 |
+
"56": "LABEL_56",
|
| 179 |
+
"57": "LABEL_57",
|
| 180 |
+
"58": "LABEL_58",
|
| 181 |
+
"59": "LABEL_59",
|
| 182 |
+
"60": "LABEL_60",
|
| 183 |
+
"61": "LABEL_61",
|
| 184 |
+
"62": "LABEL_62",
|
| 185 |
+
"63": "LABEL_63",
|
| 186 |
+
"64": "LABEL_64",
|
| 187 |
+
"65": "LABEL_65",
|
| 188 |
+
"66": "LABEL_66",
|
| 189 |
+
"67": "LABEL_67",
|
| 190 |
+
"68": "LABEL_68",
|
| 191 |
+
"69": "LABEL_69",
|
| 192 |
+
"70": "LABEL_70",
|
| 193 |
+
"71": "LABEL_71",
|
| 194 |
+
"72": "LABEL_72",
|
| 195 |
+
"73": "LABEL_73",
|
| 196 |
+
"74": "LABEL_74",
|
| 197 |
+
"75": "LABEL_75",
|
| 198 |
+
"76": "LABEL_76",
|
| 199 |
+
"77": "LABEL_77",
|
| 200 |
+
"78": "LABEL_78",
|
| 201 |
+
"79": "LABEL_79",
|
| 202 |
+
"80": "LABEL_80",
|
| 203 |
+
"81": "LABEL_81",
|
| 204 |
+
"82": "LABEL_82",
|
| 205 |
+
"83": "LABEL_83",
|
| 206 |
+
"84": "LABEL_84",
|
| 207 |
+
"85": "LABEL_85",
|
| 208 |
+
"86": "LABEL_86",
|
| 209 |
+
"87": "LABEL_87",
|
| 210 |
+
"88": "LABEL_88",
|
| 211 |
+
"89": "LABEL_89",
|
| 212 |
+
"90": "LABEL_90",
|
| 213 |
+
"91": "LABEL_91",
|
| 214 |
+
"92": "LABEL_92",
|
| 215 |
+
"93": "LABEL_93",
|
| 216 |
+
"94": "LABEL_94",
|
| 217 |
+
"95": "LABEL_95",
|
| 218 |
+
"96": "LABEL_96",
|
| 219 |
+
"97": "LABEL_97",
|
| 220 |
+
"98": "LABEL_98",
|
| 221 |
+
"99": "LABEL_99",
|
| 222 |
+
"100": "LABEL_100",
|
| 223 |
+
"101": "LABEL_101",
|
| 224 |
+
"102": "LABEL_102",
|
| 225 |
+
"103": "LABEL_103",
|
| 226 |
+
"104": "LABEL_104",
|
| 227 |
+
"105": "LABEL_105",
|
| 228 |
+
"106": "LABEL_106",
|
| 229 |
+
"107": "LABEL_107",
|
| 230 |
+
"108": "LABEL_108",
|
| 231 |
+
"109": "LABEL_109",
|
| 232 |
+
"110": "LABEL_110",
|
| 233 |
+
"111": "LABEL_111",
|
| 234 |
+
"112": "LABEL_112",
|
| 235 |
+
"113": "LABEL_113",
|
| 236 |
+
"114": "LABEL_114",
|
| 237 |
+
"115": "LABEL_115",
|
| 238 |
+
"116": "LABEL_116",
|
| 239 |
+
"117": "LABEL_117",
|
| 240 |
+
"118": "LABEL_118",
|
| 241 |
+
"119": "LABEL_119",
|
| 242 |
+
"120": "LABEL_120",
|
| 243 |
+
"121": "LABEL_121",
|
| 244 |
+
"122": "LABEL_122",
|
| 245 |
+
"123": "LABEL_123",
|
| 246 |
+
"124": "LABEL_124",
|
| 247 |
+
"125": "LABEL_125",
|
| 248 |
+
"126": "LABEL_126",
|
| 249 |
+
"127": "LABEL_127",
|
| 250 |
+
"128": "LABEL_128",
|
| 251 |
+
"129": "LABEL_129",
|
| 252 |
+
"130": "LABEL_130",
|
| 253 |
+
"131": "LABEL_131",
|
| 254 |
+
"132": "LABEL_132",
|
| 255 |
+
"133": "LABEL_133",
|
| 256 |
+
"134": "LABEL_134",
|
| 257 |
+
"135": "LABEL_135",
|
| 258 |
+
"136": "LABEL_136",
|
| 259 |
+
"137": "LABEL_137",
|
| 260 |
+
"138": "LABEL_138",
|
| 261 |
+
"139": "LABEL_139",
|
| 262 |
+
"140": "LABEL_140",
|
| 263 |
+
"141": "LABEL_141",
|
| 264 |
+
"142": "LABEL_142",
|
| 265 |
+
"143": "LABEL_143",
|
| 266 |
+
"144": "LABEL_144",
|
| 267 |
+
"145": "LABEL_145",
|
| 268 |
+
"146": "LABEL_146",
|
| 269 |
+
"147": "LABEL_147",
|
| 270 |
+
"148": "LABEL_148",
|
| 271 |
+
"149": "LABEL_149"
|
| 272 |
+
},
|
| 273 |
+
"image_size": 384,
|
| 274 |
+
"initializer_range": 0.02,
|
| 275 |
+
"intermediate_size": 3072,
|
| 276 |
+
"is_hybrid": true,
|
| 277 |
+
"label2id": {
|
| 278 |
+
"LABEL_0": 0,
|
| 279 |
+
"LABEL_1": 1,
|
| 280 |
+
"LABEL_10": 10,
|
| 281 |
+
"LABEL_100": 100,
|
| 282 |
+
"LABEL_101": 101,
|
| 283 |
+
"LABEL_102": 102,
|
| 284 |
+
"LABEL_103": 103,
|
| 285 |
+
"LABEL_104": 104,
|
| 286 |
+
"LABEL_105": 105,
|
| 287 |
+
"LABEL_106": 106,
|
| 288 |
+
"LABEL_107": 107,
|
| 289 |
+
"LABEL_108": 108,
|
| 290 |
+
"LABEL_109": 109,
|
| 291 |
+
"LABEL_11": 11,
|
| 292 |
+
"LABEL_110": 110,
|
| 293 |
+
"LABEL_111": 111,
|
| 294 |
+
"LABEL_112": 112,
|
| 295 |
+
"LABEL_113": 113,
|
| 296 |
+
"LABEL_114": 114,
|
| 297 |
+
"LABEL_115": 115,
|
| 298 |
+
"LABEL_116": 116,
|
| 299 |
+
"LABEL_117": 117,
|
| 300 |
+
"LABEL_118": 118,
|
| 301 |
+
"LABEL_119": 119,
|
| 302 |
+
"LABEL_12": 12,
|
| 303 |
+
"LABEL_120": 120,
|
| 304 |
+
"LABEL_121": 121,
|
| 305 |
+
"LABEL_122": 122,
|
| 306 |
+
"LABEL_123": 123,
|
| 307 |
+
"LABEL_124": 124,
|
| 308 |
+
"LABEL_125": 125,
|
| 309 |
+
"LABEL_126": 126,
|
| 310 |
+
"LABEL_127": 127,
|
| 311 |
+
"LABEL_128": 128,
|
| 312 |
+
"LABEL_129": 129,
|
| 313 |
+
"LABEL_13": 13,
|
| 314 |
+
"LABEL_130": 130,
|
| 315 |
+
"LABEL_131": 131,
|
| 316 |
+
"LABEL_132": 132,
|
| 317 |
+
"LABEL_133": 133,
|
| 318 |
+
"LABEL_134": 134,
|
| 319 |
+
"LABEL_135": 135,
|
| 320 |
+
"LABEL_136": 136,
|
| 321 |
+
"LABEL_137": 137,
|
| 322 |
+
"LABEL_138": 138,
|
| 323 |
+
"LABEL_139": 139,
|
| 324 |
+
"LABEL_14": 14,
|
| 325 |
+
"LABEL_140": 140,
|
| 326 |
+
"LABEL_141": 141,
|
| 327 |
+
"LABEL_142": 142,
|
| 328 |
+
"LABEL_143": 143,
|
| 329 |
+
"LABEL_144": 144,
|
| 330 |
+
"LABEL_145": 145,
|
| 331 |
+
"LABEL_146": 146,
|
| 332 |
+
"LABEL_147": 147,
|
| 333 |
+
"LABEL_148": 148,
|
| 334 |
+
"LABEL_149": 149,
|
| 335 |
+
"LABEL_15": 15,
|
| 336 |
+
"LABEL_16": 16,
|
| 337 |
+
"LABEL_17": 17,
|
| 338 |
+
"LABEL_18": 18,
|
| 339 |
+
"LABEL_19": 19,
|
| 340 |
+
"LABEL_2": 2,
|
| 341 |
+
"LABEL_20": 20,
|
| 342 |
+
"LABEL_21": 21,
|
| 343 |
+
"LABEL_22": 22,
|
| 344 |
+
"LABEL_23": 23,
|
| 345 |
+
"LABEL_24": 24,
|
| 346 |
+
"LABEL_25": 25,
|
| 347 |
+
"LABEL_26": 26,
|
| 348 |
+
"LABEL_27": 27,
|
| 349 |
+
"LABEL_28": 28,
|
| 350 |
+
"LABEL_29": 29,
|
| 351 |
+
"LABEL_3": 3,
|
| 352 |
+
"LABEL_30": 30,
|
| 353 |
+
"LABEL_31": 31,
|
| 354 |
+
"LABEL_32": 32,
|
| 355 |
+
"LABEL_33": 33,
|
| 356 |
+
"LABEL_34": 34,
|
| 357 |
+
"LABEL_35": 35,
|
| 358 |
+
"LABEL_36": 36,
|
| 359 |
+
"LABEL_37": 37,
|
| 360 |
+
"LABEL_38": 38,
|
| 361 |
+
"LABEL_39": 39,
|
| 362 |
+
"LABEL_4": 4,
|
| 363 |
+
"LABEL_40": 40,
|
| 364 |
+
"LABEL_41": 41,
|
| 365 |
+
"LABEL_42": 42,
|
| 366 |
+
"LABEL_43": 43,
|
| 367 |
+
"LABEL_44": 44,
|
| 368 |
+
"LABEL_45": 45,
|
| 369 |
+
"LABEL_46": 46,
|
| 370 |
+
"LABEL_47": 47,
|
| 371 |
+
"LABEL_48": 48,
|
| 372 |
+
"LABEL_49": 49,
|
| 373 |
+
"LABEL_5": 5,
|
| 374 |
+
"LABEL_50": 50,
|
| 375 |
+
"LABEL_51": 51,
|
| 376 |
+
"LABEL_52": 52,
|
| 377 |
+
"LABEL_53": 53,
|
| 378 |
+
"LABEL_54": 54,
|
| 379 |
+
"LABEL_55": 55,
|
| 380 |
+
"LABEL_56": 56,
|
| 381 |
+
"LABEL_57": 57,
|
| 382 |
+
"LABEL_58": 58,
|
| 383 |
+
"LABEL_59": 59,
|
| 384 |
+
"LABEL_6": 6,
|
| 385 |
+
"LABEL_60": 60,
|
| 386 |
+
"LABEL_61": 61,
|
| 387 |
+
"LABEL_62": 62,
|
| 388 |
+
"LABEL_63": 63,
|
| 389 |
+
"LABEL_64": 64,
|
| 390 |
+
"LABEL_65": 65,
|
| 391 |
+
"LABEL_66": 66,
|
| 392 |
+
"LABEL_67": 67,
|
| 393 |
+
"LABEL_68": 68,
|
| 394 |
+
"LABEL_69": 69,
|
| 395 |
+
"LABEL_7": 7,
|
| 396 |
+
"LABEL_70": 70,
|
| 397 |
+
"LABEL_71": 71,
|
| 398 |
+
"LABEL_72": 72,
|
| 399 |
+
"LABEL_73": 73,
|
| 400 |
+
"LABEL_74": 74,
|
| 401 |
+
"LABEL_75": 75,
|
| 402 |
+
"LABEL_76": 76,
|
| 403 |
+
"LABEL_77": 77,
|
| 404 |
+
"LABEL_78": 78,
|
| 405 |
+
"LABEL_79": 79,
|
| 406 |
+
"LABEL_8": 8,
|
| 407 |
+
"LABEL_80": 80,
|
| 408 |
+
"LABEL_81": 81,
|
| 409 |
+
"LABEL_82": 82,
|
| 410 |
+
"LABEL_83": 83,
|
| 411 |
+
"LABEL_84": 84,
|
| 412 |
+
"LABEL_85": 85,
|
| 413 |
+
"LABEL_86": 86,
|
| 414 |
+
"LABEL_87": 87,
|
| 415 |
+
"LABEL_88": 88,
|
| 416 |
+
"LABEL_89": 89,
|
| 417 |
+
"LABEL_9": 9,
|
| 418 |
+
"LABEL_90": 90,
|
| 419 |
+
"LABEL_91": 91,
|
| 420 |
+
"LABEL_92": 92,
|
| 421 |
+
"LABEL_93": 93,
|
| 422 |
+
"LABEL_94": 94,
|
| 423 |
+
"LABEL_95": 95,
|
| 424 |
+
"LABEL_96": 96,
|
| 425 |
+
"LABEL_97": 97,
|
| 426 |
+
"LABEL_98": 98,
|
| 427 |
+
"LABEL_99": 99
|
| 428 |
+
},
|
| 429 |
+
"layer_norm_eps": 1e-12,
|
| 430 |
+
"model_type": "dpt",
|
| 431 |
+
"neck_hidden_sizes": [
|
| 432 |
+
256,
|
| 433 |
+
512,
|
| 434 |
+
768,
|
| 435 |
+
768
|
| 436 |
+
],
|
| 437 |
+
"neck_ignore_stages": [
|
| 438 |
+
0,
|
| 439 |
+
1
|
| 440 |
+
],
|
| 441 |
+
"num_attention_heads": 12,
|
| 442 |
+
"num_channels": 3,
|
| 443 |
+
"num_hidden_layers": 12,
|
| 444 |
+
"patch_size": 16,
|
| 445 |
+
"qkv_bias": true,
|
| 446 |
+
"readout_type": "project",
|
| 447 |
+
"reassemble_factors": [
|
| 448 |
+
1,
|
| 449 |
+
1,
|
| 450 |
+
1,
|
| 451 |
+
0.5
|
| 452 |
+
],
|
| 453 |
+
"semantic_classifier_dropout": 0.1,
|
| 454 |
+
"semantic_loss_ignore_index": 255,
|
| 455 |
+
"torch_dtype": "float32",
|
| 456 |
+
"transformers_version": null,
|
| 457 |
+
"use_auxiliary_head": true,
|
| 458 |
+
"use_batch_norm_in_fusion_residual": false
|
| 459 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/preprocessor_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_rescale": true,
|
| 4 |
+
"do_resize": true,
|
| 5 |
+
"ensure_multiple_of": 1,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.5,
|
| 8 |
+
0.5,
|
| 9 |
+
0.5
|
| 10 |
+
],
|
| 11 |
+
"image_processor_type": "DPTImageProcessor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.5,
|
| 14 |
+
0.5,
|
| 15 |
+
0.5
|
| 16 |
+
],
|
| 17 |
+
"keep_aspect_ratio": false,
|
| 18 |
+
"resample": 2,
|
| 19 |
+
"rescale_factor": 0.00392156862745098,
|
| 20 |
+
"size": {
|
| 21 |
+
"height": 384,
|
| 22 |
+
"width": 384
|
| 23 |
+
}
|
| 24 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Intel/dpt-hybrid-midas/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6c4d44f9d96ca3fa76dd3bbb153989a60b4ad5526559f3c598562a368d687ec
|
| 3 |
+
size 489648389
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/README.md
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pipeline_tag: image-to-text
|
| 3 |
+
tags:
|
| 4 |
+
- image-captioning
|
| 5 |
+
languages:
|
| 6 |
+
- en
|
| 7 |
+
license: bsd-3-clause
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
|
| 11 |
+
|
| 12 |
+
Model card for image captioning pretrained on COCO dataset - base architecture (with ViT base backbone).
|
| 13 |
+
|
| 14 |
+
|  |
|
| 15 |
+
|:--:|
|
| 16 |
+
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
|
| 17 |
+
|
| 18 |
+
## TL;DR
|
| 19 |
+
|
| 20 |
+
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
|
| 21 |
+
|
| 22 |
+
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
|
| 23 |
+
|
| 24 |
+
## Usage
|
| 25 |
+
|
| 26 |
+
You can use this model for conditional and un-conditional image captioning
|
| 27 |
+
|
| 28 |
+
### Using the Pytorch model
|
| 29 |
+
|
| 30 |
+
#### Running the model on CPU
|
| 31 |
+
|
| 32 |
+
<details>
|
| 33 |
+
<summary> Click to expand </summary>
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
import requests
|
| 37 |
+
from PIL import Image
|
| 38 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 39 |
+
|
| 40 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 41 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 42 |
+
|
| 43 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 44 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 45 |
+
|
| 46 |
+
# conditional image captioning
|
| 47 |
+
text = "a photography of"
|
| 48 |
+
inputs = processor(raw_image, text, return_tensors="pt")
|
| 49 |
+
|
| 50 |
+
out = model.generate(**inputs)
|
| 51 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 52 |
+
# >>> a photography of a woman and her dog
|
| 53 |
+
|
| 54 |
+
# unconditional image captioning
|
| 55 |
+
inputs = processor(raw_image, return_tensors="pt")
|
| 56 |
+
|
| 57 |
+
out = model.generate(**inputs)
|
| 58 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 59 |
+
>>> a woman sitting on the beach with her dog
|
| 60 |
+
```
|
| 61 |
+
</details>
|
| 62 |
+
|
| 63 |
+
#### Running the model on GPU
|
| 64 |
+
|
| 65 |
+
##### In full precision
|
| 66 |
+
|
| 67 |
+
<details>
|
| 68 |
+
<summary> Click to expand </summary>
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
import requests
|
| 72 |
+
from PIL import Image
|
| 73 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 74 |
+
|
| 75 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 76 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cuda")
|
| 77 |
+
|
| 78 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 79 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 80 |
+
|
| 81 |
+
# conditional image captioning
|
| 82 |
+
text = "a photography of"
|
| 83 |
+
inputs = processor(raw_image, text, return_tensors="pt").to("cuda")
|
| 84 |
+
|
| 85 |
+
out = model.generate(**inputs)
|
| 86 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 87 |
+
# >>> a photography of a woman and her dog
|
| 88 |
+
|
| 89 |
+
# unconditional image captioning
|
| 90 |
+
inputs = processor(raw_image, return_tensors="pt").to("cuda")
|
| 91 |
+
|
| 92 |
+
out = model.generate(**inputs)
|
| 93 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 94 |
+
>>> a woman sitting on the beach with her dog
|
| 95 |
+
```
|
| 96 |
+
</details>
|
| 97 |
+
|
| 98 |
+
##### In half precision (`float16`)
|
| 99 |
+
|
| 100 |
+
<details>
|
| 101 |
+
<summary> Click to expand </summary>
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
import torch
|
| 105 |
+
import requests
|
| 106 |
+
from PIL import Image
|
| 107 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 108 |
+
|
| 109 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 110 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16).to("cuda")
|
| 111 |
+
|
| 112 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 113 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 114 |
+
|
| 115 |
+
# conditional image captioning
|
| 116 |
+
text = "a photography of"
|
| 117 |
+
inputs = processor(raw_image, text, return_tensors="pt").to("cuda", torch.float16)
|
| 118 |
+
|
| 119 |
+
out = model.generate(**inputs)
|
| 120 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 121 |
+
# >>> a photography of a woman and her dog
|
| 122 |
+
|
| 123 |
+
# unconditional image captioning
|
| 124 |
+
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
|
| 125 |
+
|
| 126 |
+
out = model.generate(**inputs)
|
| 127 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 128 |
+
>>> a woman sitting on the beach with her dog
|
| 129 |
+
```
|
| 130 |
+
</details>
|
| 131 |
+
|
| 132 |
+
## BibTex and citation info
|
| 133 |
+
|
| 134 |
+
```
|
| 135 |
+
@misc{https://doi.org/10.48550/arxiv.2201.12086,
|
| 136 |
+
doi = {10.48550/ARXIV.2201.12086},
|
| 137 |
+
|
| 138 |
+
url = {https://arxiv.org/abs/2201.12086},
|
| 139 |
+
|
| 140 |
+
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
|
| 141 |
+
|
| 142 |
+
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
| 143 |
+
|
| 144 |
+
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
|
| 145 |
+
|
| 146 |
+
publisher = {arXiv},
|
| 147 |
+
|
| 148 |
+
year = {2022},
|
| 149 |
+
|
| 150 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
| 151 |
+
}
|
| 152 |
+
```
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/config.json
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_commit_hash": null,
|
| 3 |
+
"architectures": [
|
| 4 |
+
"BlipForConditionalGeneration"
|
| 5 |
+
],
|
| 6 |
+
"image_text_hidden_size": 256,
|
| 7 |
+
"initializer_factor": 1.0,
|
| 8 |
+
"logit_scale_init_value": 2.6592,
|
| 9 |
+
"model_type": "blip",
|
| 10 |
+
"projection_dim": 512,
|
| 11 |
+
"text_config": {
|
| 12 |
+
"_name_or_path": "",
|
| 13 |
+
"add_cross_attention": false,
|
| 14 |
+
"architectures": null,
|
| 15 |
+
"attention_probs_dropout_prob": 0.0,
|
| 16 |
+
"bad_words_ids": null,
|
| 17 |
+
"begin_suppress_tokens": null,
|
| 18 |
+
"bos_token_id": 30522,
|
| 19 |
+
"chunk_size_feed_forward": 0,
|
| 20 |
+
"cross_attention_hidden_size": null,
|
| 21 |
+
"decoder_start_token_id": null,
|
| 22 |
+
"diversity_penalty": 0.0,
|
| 23 |
+
"do_sample": false,
|
| 24 |
+
"early_stopping": false,
|
| 25 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 26 |
+
"eos_token_id": 2,
|
| 27 |
+
"exponential_decay_length_penalty": null,
|
| 28 |
+
"finetuning_task": null,
|
| 29 |
+
"forced_bos_token_id": null,
|
| 30 |
+
"forced_eos_token_id": null,
|
| 31 |
+
"hidden_act": "gelu",
|
| 32 |
+
"hidden_dropout_prob": 0.0,
|
| 33 |
+
"hidden_size": 768,
|
| 34 |
+
"id2label": {
|
| 35 |
+
"0": "LABEL_0",
|
| 36 |
+
"1": "LABEL_1"
|
| 37 |
+
},
|
| 38 |
+
"initializer_factor": 1.0,
|
| 39 |
+
"initializer_range": 0.02,
|
| 40 |
+
"intermediate_size": 3072,
|
| 41 |
+
"is_decoder": true,
|
| 42 |
+
"is_encoder_decoder": false,
|
| 43 |
+
"label2id": {
|
| 44 |
+
"LABEL_0": 0,
|
| 45 |
+
"LABEL_1": 1
|
| 46 |
+
},
|
| 47 |
+
"layer_norm_eps": 1e-12,
|
| 48 |
+
"length_penalty": 1.0,
|
| 49 |
+
"max_length": 20,
|
| 50 |
+
"max_position_embeddings": 512,
|
| 51 |
+
"min_length": 0,
|
| 52 |
+
"model_type": "blip_text_model",
|
| 53 |
+
"no_repeat_ngram_size": 0,
|
| 54 |
+
"num_attention_heads": 12,
|
| 55 |
+
"num_beam_groups": 1,
|
| 56 |
+
"num_beams": 1,
|
| 57 |
+
"num_hidden_layers": 12,
|
| 58 |
+
"num_return_sequences": 1,
|
| 59 |
+
"output_attentions": false,
|
| 60 |
+
"output_hidden_states": false,
|
| 61 |
+
"output_scores": false,
|
| 62 |
+
"pad_token_id": 0,
|
| 63 |
+
"prefix": null,
|
| 64 |
+
"problem_type": null,
|
| 65 |
+
"projection_dim": 768,
|
| 66 |
+
"pruned_heads": {},
|
| 67 |
+
"remove_invalid_values": false,
|
| 68 |
+
"repetition_penalty": 1.0,
|
| 69 |
+
"return_dict": true,
|
| 70 |
+
"return_dict_in_generate": false,
|
| 71 |
+
"sep_token_id": 102,
|
| 72 |
+
"suppress_tokens": null,
|
| 73 |
+
"task_specific_params": null,
|
| 74 |
+
"temperature": 1.0,
|
| 75 |
+
"tf_legacy_loss": false,
|
| 76 |
+
"tie_encoder_decoder": false,
|
| 77 |
+
"tie_word_embeddings": true,
|
| 78 |
+
"tokenizer_class": null,
|
| 79 |
+
"top_k": 50,
|
| 80 |
+
"top_p": 1.0,
|
| 81 |
+
"torch_dtype": null,
|
| 82 |
+
"torchscript": false,
|
| 83 |
+
"transformers_version": "4.26.0.dev0",
|
| 84 |
+
"typical_p": 1.0,
|
| 85 |
+
"use_bfloat16": false,
|
| 86 |
+
"use_cache": true,
|
| 87 |
+
"vocab_size": 30524
|
| 88 |
+
},
|
| 89 |
+
"torch_dtype": "float32",
|
| 90 |
+
"transformers_version": null,
|
| 91 |
+
"vision_config": {
|
| 92 |
+
"_name_or_path": "",
|
| 93 |
+
"add_cross_attention": false,
|
| 94 |
+
"architectures": null,
|
| 95 |
+
"attention_dropout": 0.0,
|
| 96 |
+
"bad_words_ids": null,
|
| 97 |
+
"begin_suppress_tokens": null,
|
| 98 |
+
"bos_token_id": null,
|
| 99 |
+
"chunk_size_feed_forward": 0,
|
| 100 |
+
"cross_attention_hidden_size": null,
|
| 101 |
+
"decoder_start_token_id": null,
|
| 102 |
+
"diversity_penalty": 0.0,
|
| 103 |
+
"do_sample": false,
|
| 104 |
+
"dropout": 0.0,
|
| 105 |
+
"early_stopping": false,
|
| 106 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 107 |
+
"eos_token_id": null,
|
| 108 |
+
"exponential_decay_length_penalty": null,
|
| 109 |
+
"finetuning_task": null,
|
| 110 |
+
"forced_bos_token_id": null,
|
| 111 |
+
"forced_eos_token_id": null,
|
| 112 |
+
"hidden_act": "gelu",
|
| 113 |
+
"hidden_size": 768,
|
| 114 |
+
"id2label": {
|
| 115 |
+
"0": "LABEL_0",
|
| 116 |
+
"1": "LABEL_1"
|
| 117 |
+
},
|
| 118 |
+
"image_size": 384,
|
| 119 |
+
"initializer_factor": 1.0,
|
| 120 |
+
"initializer_range": 0.02,
|
| 121 |
+
"intermediate_size": 3072,
|
| 122 |
+
"is_decoder": false,
|
| 123 |
+
"is_encoder_decoder": false,
|
| 124 |
+
"label2id": {
|
| 125 |
+
"LABEL_0": 0,
|
| 126 |
+
"LABEL_1": 1
|
| 127 |
+
},
|
| 128 |
+
"layer_norm_eps": 1e-05,
|
| 129 |
+
"length_penalty": 1.0,
|
| 130 |
+
"max_length": 20,
|
| 131 |
+
"min_length": 0,
|
| 132 |
+
"model_type": "blip_vision_model",
|
| 133 |
+
"no_repeat_ngram_size": 0,
|
| 134 |
+
"num_attention_heads": 12,
|
| 135 |
+
"num_beam_groups": 1,
|
| 136 |
+
"num_beams": 1,
|
| 137 |
+
"num_channels": 3,
|
| 138 |
+
"num_hidden_layers": 12,
|
| 139 |
+
"num_return_sequences": 1,
|
| 140 |
+
"output_attentions": false,
|
| 141 |
+
"output_hidden_states": false,
|
| 142 |
+
"output_scores": false,
|
| 143 |
+
"pad_token_id": null,
|
| 144 |
+
"patch_size": 16,
|
| 145 |
+
"prefix": null,
|
| 146 |
+
"problem_type": null,
|
| 147 |
+
"projection_dim": 512,
|
| 148 |
+
"pruned_heads": {},
|
| 149 |
+
"remove_invalid_values": false,
|
| 150 |
+
"repetition_penalty": 1.0,
|
| 151 |
+
"return_dict": true,
|
| 152 |
+
"return_dict_in_generate": false,
|
| 153 |
+
"sep_token_id": null,
|
| 154 |
+
"suppress_tokens": null,
|
| 155 |
+
"task_specific_params": null,
|
| 156 |
+
"temperature": 1.0,
|
| 157 |
+
"tf_legacy_loss": false,
|
| 158 |
+
"tie_encoder_decoder": false,
|
| 159 |
+
"tie_word_embeddings": true,
|
| 160 |
+
"tokenizer_class": null,
|
| 161 |
+
"top_k": 50,
|
| 162 |
+
"top_p": 1.0,
|
| 163 |
+
"torch_dtype": null,
|
| 164 |
+
"torchscript": false,
|
| 165 |
+
"transformers_version": "4.26.0.dev0",
|
| 166 |
+
"typical_p": 1.0,
|
| 167 |
+
"use_bfloat16": false
|
| 168 |
+
}
|
| 169 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/preprocessor_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_resize": true,
|
| 4 |
+
"image_mean": [
|
| 5 |
+
0.48145466,
|
| 6 |
+
0.4578275,
|
| 7 |
+
0.40821073
|
| 8 |
+
],
|
| 9 |
+
"image_processor_type": "BlipImageProcessor",
|
| 10 |
+
"image_std": [
|
| 11 |
+
0.26862954,
|
| 12 |
+
0.26130258,
|
| 13 |
+
0.27577711
|
| 14 |
+
],
|
| 15 |
+
"processor_class": "BlipProcessor",
|
| 16 |
+
"size": 384
|
| 17 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6638651a5526cc2ede56f2b5104d6851b0755816d220e5e046870430180c767
|
| 3 |
+
size 989820849
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"do_basic_tokenize": true,
|
| 4 |
+
"do_lower_case": true,
|
| 5 |
+
"mask_token": "[MASK]",
|
| 6 |
+
"model_max_length": 512,
|
| 7 |
+
"name_or_path": "bert-base-uncased",
|
| 8 |
+
"never_split": null,
|
| 9 |
+
"pad_token": "[PAD]",
|
| 10 |
+
"processor_class": "BlipProcessor",
|
| 11 |
+
"sep_token": "[SEP]",
|
| 12 |
+
"special_tokens_map_file": null,
|
| 13 |
+
"strip_accents": null,
|
| 14 |
+
"tokenize_chinese_chars": true,
|
| 15 |
+
"tokenizer_class": "BertTokenizer",
|
| 16 |
+
"unk_token": "[UNK]",
|
| 17 |
+
"model_input_names": [
|
| 18 |
+
"input_ids",
|
| 19 |
+
"attention_mask"
|
| 20 |
+
]
|
| 21 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-image-captioning-base/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/README.md
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pipeline_tags: 'other'
|
| 3 |
+
tags:
|
| 4 |
+
- image-text-matching
|
| 5 |
+
languages:
|
| 6 |
+
- en
|
| 7 |
+
license: bsd-3-clause
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
|
| 11 |
+
|
| 12 |
+
Model card for BLIP trained on image-text matching - base architecture (with ViT base backbone) trained on COCO dataset.
|
| 13 |
+
|
| 14 |
+
|  |
|
| 15 |
+
|:--:|
|
| 16 |
+
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
|
| 17 |
+
|
| 18 |
+
## TL;DR
|
| 19 |
+
|
| 20 |
+
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
|
| 21 |
+
|
| 22 |
+
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
|
| 23 |
+
|
| 24 |
+
## Usage
|
| 25 |
+
|
| 26 |
+
You can use this model for conditional and un-conditional image captioning
|
| 27 |
+
|
| 28 |
+
### Using the Pytorch model
|
| 29 |
+
|
| 30 |
+
#### Running the model on CPU
|
| 31 |
+
|
| 32 |
+
<details>
|
| 33 |
+
<summary> Click to expand </summary>
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
import requests
|
| 37 |
+
from PIL import Image
|
| 38 |
+
from transformers import BlipProcessor, BlipForImageTextRetrieval
|
| 39 |
+
|
| 40 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
|
| 41 |
+
model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
|
| 42 |
+
|
| 43 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 44 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 45 |
+
|
| 46 |
+
question = "A woman and a dog sitting together in a beach."
|
| 47 |
+
inputs = processor(raw_image, question, return_tensors="pt")
|
| 48 |
+
|
| 49 |
+
itm_scores = model(**inputs)[0]
|
| 50 |
+
cosine_score = model(**inputs, use_itm_head=False)[0]
|
| 51 |
+
```
|
| 52 |
+
</details>
|
| 53 |
+
|
| 54 |
+
#### Running the model on GPU
|
| 55 |
+
|
| 56 |
+
##### In full precision
|
| 57 |
+
|
| 58 |
+
<details>
|
| 59 |
+
<summary> Click to expand </summary>
|
| 60 |
+
|
| 61 |
+
```python
|
| 62 |
+
import requests
|
| 63 |
+
from PIL import Image
|
| 64 |
+
from transformers import BlipProcessor, BlipForImageTextRetrieval
|
| 65 |
+
|
| 66 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
|
| 67 |
+
model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco").to("cuda")
|
| 68 |
+
|
| 69 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 70 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 71 |
+
|
| 72 |
+
question = "A woman and a dog sitting together in a beach."
|
| 73 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda")
|
| 74 |
+
|
| 75 |
+
itm_scores = model(**inputs)[0]
|
| 76 |
+
cosine_score = model(**inputs, use_itm_head=False)[0]
|
| 77 |
+
```
|
| 78 |
+
</details>
|
| 79 |
+
|
| 80 |
+
##### In half precision (`float16`)
|
| 81 |
+
|
| 82 |
+
<details>
|
| 83 |
+
<summary> Click to expand </summary>
|
| 84 |
+
|
| 85 |
+
```python
|
| 86 |
+
import torch
|
| 87 |
+
import requests
|
| 88 |
+
from PIL import Image
|
| 89 |
+
from transformers import BlipProcessor, BlipForImageTextRetrieval
|
| 90 |
+
|
| 91 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
|
| 92 |
+
model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco", torch_dtype=torch.float16).to("cuda")
|
| 93 |
+
|
| 94 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 95 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 96 |
+
|
| 97 |
+
question = "A woman and a dog sitting together in a beach."
|
| 98 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
| 99 |
+
|
| 100 |
+
itm_scores = model(**inputs)[0]
|
| 101 |
+
cosine_score = model(**inputs, use_itm_head=False)[0]
|
| 102 |
+
```
|
| 103 |
+
</details>
|
| 104 |
+
|
| 105 |
+
## BibTex and citation info
|
| 106 |
+
|
| 107 |
+
```
|
| 108 |
+
@misc{https://doi.org/10.48550/arxiv.2201.12086,
|
| 109 |
+
doi = {10.48550/ARXIV.2201.12086},
|
| 110 |
+
|
| 111 |
+
url = {https://arxiv.org/abs/2201.12086},
|
| 112 |
+
|
| 113 |
+
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
|
| 114 |
+
|
| 115 |
+
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
| 116 |
+
|
| 117 |
+
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
|
| 118 |
+
|
| 119 |
+
publisher = {arXiv},
|
| 120 |
+
|
| 121 |
+
year = {2022},
|
| 122 |
+
|
| 123 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
| 124 |
+
}
|
| 125 |
+
```
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/config.json
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_commit_hash": null,
|
| 3 |
+
"architectures": [
|
| 4 |
+
"BlipForImageTextRetrieval"
|
| 5 |
+
],
|
| 6 |
+
"image_text_hidden_size": 256,
|
| 7 |
+
"initializer_factor": 1.0,
|
| 8 |
+
"logit_scale_init_value": 2.6592,
|
| 9 |
+
"model_type": "blip",
|
| 10 |
+
"projection_dim": 512,
|
| 11 |
+
"text_config": {
|
| 12 |
+
"_name_or_path": "",
|
| 13 |
+
"add_cross_attention": false,
|
| 14 |
+
"architectures": null,
|
| 15 |
+
"attention_probs_dropout_prob": 0.0,
|
| 16 |
+
"bad_words_ids": null,
|
| 17 |
+
"begin_suppress_tokens": null,
|
| 18 |
+
"bos_token_id": 30522,
|
| 19 |
+
"chunk_size_feed_forward": 0,
|
| 20 |
+
"cross_attention_hidden_size": null,
|
| 21 |
+
"decoder_start_token_id": null,
|
| 22 |
+
"diversity_penalty": 0.0,
|
| 23 |
+
"do_sample": false,
|
| 24 |
+
"early_stopping": false,
|
| 25 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 26 |
+
"eos_token_id": 2,
|
| 27 |
+
"exponential_decay_length_penalty": null,
|
| 28 |
+
"finetuning_task": null,
|
| 29 |
+
"forced_bos_token_id": null,
|
| 30 |
+
"forced_eos_token_id": null,
|
| 31 |
+
"hidden_act": "gelu",
|
| 32 |
+
"hidden_dropout_prob": 0.0,
|
| 33 |
+
"hidden_size": 768,
|
| 34 |
+
"id2label": {
|
| 35 |
+
"0": "LABEL_0",
|
| 36 |
+
"1": "LABEL_1"
|
| 37 |
+
},
|
| 38 |
+
"initializer_factor": 1.0,
|
| 39 |
+
"initializer_range": 0.02,
|
| 40 |
+
"intermediate_size": 3072,
|
| 41 |
+
"is_decoder": true,
|
| 42 |
+
"is_encoder_decoder": false,
|
| 43 |
+
"label2id": {
|
| 44 |
+
"LABEL_0": 0,
|
| 45 |
+
"LABEL_1": 1
|
| 46 |
+
},
|
| 47 |
+
"layer_norm_eps": 1e-12,
|
| 48 |
+
"length_penalty": 1.0,
|
| 49 |
+
"max_length": 20,
|
| 50 |
+
"max_position_embeddings": 512,
|
| 51 |
+
"min_length": 0,
|
| 52 |
+
"model_type": "blip_text_model",
|
| 53 |
+
"no_repeat_ngram_size": 0,
|
| 54 |
+
"num_attention_heads": 12,
|
| 55 |
+
"num_beam_groups": 1,
|
| 56 |
+
"num_beams": 1,
|
| 57 |
+
"num_hidden_layers": 12,
|
| 58 |
+
"num_return_sequences": 1,
|
| 59 |
+
"output_attentions": false,
|
| 60 |
+
"output_hidden_states": false,
|
| 61 |
+
"output_scores": false,
|
| 62 |
+
"pad_token_id": 0,
|
| 63 |
+
"prefix": null,
|
| 64 |
+
"problem_type": null,
|
| 65 |
+
"projection_dim": 768,
|
| 66 |
+
"pruned_heads": {},
|
| 67 |
+
"remove_invalid_values": false,
|
| 68 |
+
"repetition_penalty": 1.0,
|
| 69 |
+
"return_dict": true,
|
| 70 |
+
"return_dict_in_generate": false,
|
| 71 |
+
"sep_token_id": 102,
|
| 72 |
+
"suppress_tokens": null,
|
| 73 |
+
"task_specific_params": null,
|
| 74 |
+
"temperature": 1.0,
|
| 75 |
+
"tf_legacy_loss": false,
|
| 76 |
+
"tie_encoder_decoder": false,
|
| 77 |
+
"tie_word_embeddings": true,
|
| 78 |
+
"tokenizer_class": null,
|
| 79 |
+
"top_k": 50,
|
| 80 |
+
"top_p": 1.0,
|
| 81 |
+
"torch_dtype": null,
|
| 82 |
+
"torchscript": false,
|
| 83 |
+
"transformers_version": "4.26.0.dev0",
|
| 84 |
+
"typical_p": 1.0,
|
| 85 |
+
"use_bfloat16": false,
|
| 86 |
+
"use_cache": true,
|
| 87 |
+
"vocab_size": 30524
|
| 88 |
+
},
|
| 89 |
+
"torch_dtype": "float32",
|
| 90 |
+
"transformers_version": null,
|
| 91 |
+
"vision_config": {
|
| 92 |
+
"_name_or_path": "",
|
| 93 |
+
"add_cross_attention": false,
|
| 94 |
+
"architectures": null,
|
| 95 |
+
"attention_dropout": 0.0,
|
| 96 |
+
"bad_words_ids": null,
|
| 97 |
+
"begin_suppress_tokens": null,
|
| 98 |
+
"bos_token_id": null,
|
| 99 |
+
"chunk_size_feed_forward": 0,
|
| 100 |
+
"cross_attention_hidden_size": null,
|
| 101 |
+
"decoder_start_token_id": null,
|
| 102 |
+
"diversity_penalty": 0.0,
|
| 103 |
+
"do_sample": false,
|
| 104 |
+
"dropout": 0.0,
|
| 105 |
+
"early_stopping": false,
|
| 106 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 107 |
+
"eos_token_id": null,
|
| 108 |
+
"exponential_decay_length_penalty": null,
|
| 109 |
+
"finetuning_task": null,
|
| 110 |
+
"forced_bos_token_id": null,
|
| 111 |
+
"forced_eos_token_id": null,
|
| 112 |
+
"hidden_act": "gelu",
|
| 113 |
+
"hidden_size": 768,
|
| 114 |
+
"id2label": {
|
| 115 |
+
"0": "LABEL_0",
|
| 116 |
+
"1": "LABEL_1"
|
| 117 |
+
},
|
| 118 |
+
"image_size": 384,
|
| 119 |
+
"initializer_factor": 1.0,
|
| 120 |
+
"initializer_range": 0.02,
|
| 121 |
+
"intermediate_size": 3072,
|
| 122 |
+
"is_decoder": false,
|
| 123 |
+
"is_encoder_decoder": false,
|
| 124 |
+
"label2id": {
|
| 125 |
+
"LABEL_0": 0,
|
| 126 |
+
"LABEL_1": 1
|
| 127 |
+
},
|
| 128 |
+
"layer_norm_eps": 1e-05,
|
| 129 |
+
"length_penalty": 1.0,
|
| 130 |
+
"max_length": 20,
|
| 131 |
+
"min_length": 0,
|
| 132 |
+
"model_type": "blip_vision_model",
|
| 133 |
+
"no_repeat_ngram_size": 0,
|
| 134 |
+
"num_attention_heads": 12,
|
| 135 |
+
"num_beam_groups": 1,
|
| 136 |
+
"num_beams": 1,
|
| 137 |
+
"num_channels": 3,
|
| 138 |
+
"num_hidden_layers": 12,
|
| 139 |
+
"num_return_sequences": 1,
|
| 140 |
+
"output_attentions": false,
|
| 141 |
+
"output_hidden_states": false,
|
| 142 |
+
"output_scores": false,
|
| 143 |
+
"pad_token_id": null,
|
| 144 |
+
"patch_size": 16,
|
| 145 |
+
"prefix": null,
|
| 146 |
+
"problem_type": null,
|
| 147 |
+
"projection_dim": 512,
|
| 148 |
+
"pruned_heads": {},
|
| 149 |
+
"remove_invalid_values": false,
|
| 150 |
+
"repetition_penalty": 1.0,
|
| 151 |
+
"return_dict": true,
|
| 152 |
+
"return_dict_in_generate": false,
|
| 153 |
+
"sep_token_id": null,
|
| 154 |
+
"suppress_tokens": null,
|
| 155 |
+
"task_specific_params": null,
|
| 156 |
+
"temperature": 1.0,
|
| 157 |
+
"tf_legacy_loss": false,
|
| 158 |
+
"tie_encoder_decoder": false,
|
| 159 |
+
"tie_word_embeddings": true,
|
| 160 |
+
"tokenizer_class": null,
|
| 161 |
+
"top_k": 50,
|
| 162 |
+
"top_p": 1.0,
|
| 163 |
+
"torch_dtype": null,
|
| 164 |
+
"torchscript": false,
|
| 165 |
+
"transformers_version": "4.26.0.dev0",
|
| 166 |
+
"typical_p": 1.0,
|
| 167 |
+
"use_bfloat16": false
|
| 168 |
+
}
|
| 169 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/preprocessor_config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_pad": true,
|
| 4 |
+
"do_rescale": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.48145466,
|
| 8 |
+
0.4578275,
|
| 9 |
+
0.40821073
|
| 10 |
+
],
|
| 11 |
+
"image_processor_type": "BlipImageProcessor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"processor_class": "BlipProcessor",
|
| 18 |
+
"resample": 3,
|
| 19 |
+
"rescale_factor": 0.00392156862745098,
|
| 20 |
+
"size": {
|
| 21 |
+
"height": 384,
|
| 22 |
+
"width": 384
|
| 23 |
+
},
|
| 24 |
+
"size_divisor": 32
|
| 25 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:017fb3e7f4e125f13a8a4717f1402dbe0d0bb877474b4a203db13a4447b0227f
|
| 3 |
+
size 895139697
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/tokenizer_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"do_basic_tokenize": true,
|
| 4 |
+
"do_lower_case": true,
|
| 5 |
+
"mask_token": "[MASK]",
|
| 6 |
+
"model_max_length": 512,
|
| 7 |
+
"name_or_path": "ybelkada/blip-image-captioning-base",
|
| 8 |
+
"never_split": null,
|
| 9 |
+
"pad_token": "[PAD]",
|
| 10 |
+
"processor_class": "BlipProcessor",
|
| 11 |
+
"sep_token": "[SEP]",
|
| 12 |
+
"special_tokens_map_file": null,
|
| 13 |
+
"strip_accents": null,
|
| 14 |
+
"tokenize_chinese_chars": true,
|
| 15 |
+
"tokenizer_class": "BertTokenizer",
|
| 16 |
+
"unk_token": "[UNK]"
|
| 17 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-itm-base-coco/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/README.md
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pipeline_tag: 'visual-question-answering'
|
| 3 |
+
tags:
|
| 4 |
+
- visual-question-answering
|
| 5 |
+
inference: false
|
| 6 |
+
languages:
|
| 7 |
+
- en
|
| 8 |
+
license: bsd-3-clause
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation
|
| 12 |
+
|
| 13 |
+
Model card for BLIP trained on visual question answering- base architecture (with ViT base backbone).
|
| 14 |
+
|
| 15 |
+
|  |
|
| 16 |
+
|:--:|
|
| 17 |
+
| <b> Pull figure from BLIP official repo | Image source: https://github.com/salesforce/BLIP </b>|
|
| 18 |
+
|
| 19 |
+
## TL;DR
|
| 20 |
+
|
| 21 |
+
Authors from the [paper](https://arxiv.org/abs/2201.12086) write in the abstract:
|
| 22 |
+
|
| 23 |
+
*Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to videolanguage tasks in a zero-shot manner. Code, models, and datasets are released.*
|
| 24 |
+
|
| 25 |
+
## Usage
|
| 26 |
+
|
| 27 |
+
You can use this model for conditional and un-conditional image captioning
|
| 28 |
+
|
| 29 |
+
### Using the Pytorch model
|
| 30 |
+
|
| 31 |
+
#### Running the model on CPU
|
| 32 |
+
|
| 33 |
+
<details>
|
| 34 |
+
<summary> Click to expand </summary>
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
import requests
|
| 38 |
+
from PIL import Image
|
| 39 |
+
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 40 |
+
|
| 41 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
| 42 |
+
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
|
| 43 |
+
|
| 44 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 45 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 46 |
+
|
| 47 |
+
question = "how many dogs are in the picture?"
|
| 48 |
+
inputs = processor(raw_image, question, return_tensors="pt")
|
| 49 |
+
|
| 50 |
+
out = model.generate(**inputs)
|
| 51 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 52 |
+
>>> 1
|
| 53 |
+
```
|
| 54 |
+
</details>
|
| 55 |
+
|
| 56 |
+
#### Running the model on GPU
|
| 57 |
+
|
| 58 |
+
##### In full precision
|
| 59 |
+
|
| 60 |
+
<details>
|
| 61 |
+
<summary> Click to expand </summary>
|
| 62 |
+
|
| 63 |
+
```python
|
| 64 |
+
import requests
|
| 65 |
+
from PIL import Image
|
| 66 |
+
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 67 |
+
|
| 68 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
| 69 |
+
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to("cuda")
|
| 70 |
+
|
| 71 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 72 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 73 |
+
|
| 74 |
+
question = "how many dogs are in the picture?"
|
| 75 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda")
|
| 76 |
+
|
| 77 |
+
out = model.generate(**inputs)
|
| 78 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 79 |
+
>>> 1
|
| 80 |
+
```
|
| 81 |
+
</details>
|
| 82 |
+
|
| 83 |
+
##### In half precision (`float16`)
|
| 84 |
+
|
| 85 |
+
<details>
|
| 86 |
+
<summary> Click to expand </summary>
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
import torch
|
| 90 |
+
import requests
|
| 91 |
+
from PIL import Image
|
| 92 |
+
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 93 |
+
|
| 94 |
+
processor = BlipProcessor.from_pretrained("ybelkada/blip-vqa-base")
|
| 95 |
+
model = BlipForQuestionAnswering.from_pretrained("ybelkada/blip-vqa-base", torch_dtype=torch.float16).to("cuda")
|
| 96 |
+
|
| 97 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
| 98 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
| 99 |
+
|
| 100 |
+
question = "how many dogs are in the picture?"
|
| 101 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
| 102 |
+
|
| 103 |
+
out = model.generate(**inputs)
|
| 104 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
| 105 |
+
>>> 1
|
| 106 |
+
```
|
| 107 |
+
</details>
|
| 108 |
+
|
| 109 |
+
## BibTex and citation info
|
| 110 |
+
|
| 111 |
+
```
|
| 112 |
+
@misc{https://doi.org/10.48550/arxiv.2201.12086,
|
| 113 |
+
doi = {10.48550/ARXIV.2201.12086},
|
| 114 |
+
|
| 115 |
+
url = {https://arxiv.org/abs/2201.12086},
|
| 116 |
+
|
| 117 |
+
author = {Li, Junnan and Li, Dongxu and Xiong, Caiming and Hoi, Steven},
|
| 118 |
+
|
| 119 |
+
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
| 120 |
+
|
| 121 |
+
title = {BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation},
|
| 122 |
+
|
| 123 |
+
publisher = {arXiv},
|
| 124 |
+
|
| 125 |
+
year = {2022},
|
| 126 |
+
|
| 127 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
| 128 |
+
}
|
| 129 |
+
```
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/config.json
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_commit_hash": null,
|
| 3 |
+
"architectures": [
|
| 4 |
+
"BlipForQuestionAnswering"
|
| 5 |
+
],
|
| 6 |
+
"image_text_hidden_size": 256,
|
| 7 |
+
"initializer_factor": 1.0,
|
| 8 |
+
"logit_scale_init_value": 2.6592,
|
| 9 |
+
"model_type": "blip",
|
| 10 |
+
"projection_dim": 512,
|
| 11 |
+
"text_config": {
|
| 12 |
+
"_name_or_path": "",
|
| 13 |
+
"add_cross_attention": false,
|
| 14 |
+
"architectures": null,
|
| 15 |
+
"attention_probs_dropout_prob": 0.0,
|
| 16 |
+
"bad_words_ids": null,
|
| 17 |
+
"begin_suppress_tokens": null,
|
| 18 |
+
"bos_token_id": 30522,
|
| 19 |
+
"chunk_size_feed_forward": 0,
|
| 20 |
+
"cross_attention_hidden_size": null,
|
| 21 |
+
"decoder_start_token_id": null,
|
| 22 |
+
"diversity_penalty": 0.0,
|
| 23 |
+
"do_sample": false,
|
| 24 |
+
"early_stopping": false,
|
| 25 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 26 |
+
"eos_token_id": 2,
|
| 27 |
+
"exponential_decay_length_penalty": null,
|
| 28 |
+
"finetuning_task": null,
|
| 29 |
+
"forced_bos_token_id": null,
|
| 30 |
+
"forced_eos_token_id": null,
|
| 31 |
+
"hidden_act": "gelu",
|
| 32 |
+
"hidden_dropout_prob": 0.0,
|
| 33 |
+
"hidden_size": 768,
|
| 34 |
+
"id2label": {
|
| 35 |
+
"0": "LABEL_0",
|
| 36 |
+
"1": "LABEL_1"
|
| 37 |
+
},
|
| 38 |
+
"initializer_factor": 1.0,
|
| 39 |
+
"initializer_range": 0.02,
|
| 40 |
+
"intermediate_size": 3072,
|
| 41 |
+
"is_decoder": true,
|
| 42 |
+
"is_encoder_decoder": false,
|
| 43 |
+
"label2id": {
|
| 44 |
+
"LABEL_0": 0,
|
| 45 |
+
"LABEL_1": 1
|
| 46 |
+
},
|
| 47 |
+
"layer_norm_eps": 1e-12,
|
| 48 |
+
"length_penalty": 1.0,
|
| 49 |
+
"max_length": 20,
|
| 50 |
+
"max_position_embeddings": 512,
|
| 51 |
+
"min_length": 0,
|
| 52 |
+
"model_type": "blip_text_model",
|
| 53 |
+
"no_repeat_ngram_size": 0,
|
| 54 |
+
"num_attention_heads": 12,
|
| 55 |
+
"num_beam_groups": 1,
|
| 56 |
+
"num_beams": 1,
|
| 57 |
+
"num_hidden_layers": 12,
|
| 58 |
+
"num_return_sequences": 1,
|
| 59 |
+
"output_attentions": false,
|
| 60 |
+
"output_hidden_states": false,
|
| 61 |
+
"output_scores": false,
|
| 62 |
+
"pad_token_id": 0,
|
| 63 |
+
"prefix": null,
|
| 64 |
+
"problem_type": null,
|
| 65 |
+
"projection_dim": 768,
|
| 66 |
+
"pruned_heads": {},
|
| 67 |
+
"remove_invalid_values": false,
|
| 68 |
+
"repetition_penalty": 1.0,
|
| 69 |
+
"return_dict": true,
|
| 70 |
+
"return_dict_in_generate": false,
|
| 71 |
+
"sep_token_id": 102,
|
| 72 |
+
"suppress_tokens": null,
|
| 73 |
+
"task_specific_params": null,
|
| 74 |
+
"temperature": 1.0,
|
| 75 |
+
"tf_legacy_loss": false,
|
| 76 |
+
"tie_encoder_decoder": false,
|
| 77 |
+
"tie_word_embeddings": true,
|
| 78 |
+
"tokenizer_class": null,
|
| 79 |
+
"top_k": 50,
|
| 80 |
+
"top_p": 1.0,
|
| 81 |
+
"torch_dtype": null,
|
| 82 |
+
"torchscript": false,
|
| 83 |
+
"transformers_version": "4.26.0.dev0",
|
| 84 |
+
"typical_p": 1.0,
|
| 85 |
+
"use_bfloat16": false,
|
| 86 |
+
"use_cache": true,
|
| 87 |
+
"vocab_size": 30524
|
| 88 |
+
},
|
| 89 |
+
"torch_dtype": "float32",
|
| 90 |
+
"transformers_version": null,
|
| 91 |
+
"vision_config": {
|
| 92 |
+
"_name_or_path": "",
|
| 93 |
+
"add_cross_attention": false,
|
| 94 |
+
"architectures": null,
|
| 95 |
+
"attention_dropout": 0.0,
|
| 96 |
+
"bad_words_ids": null,
|
| 97 |
+
"begin_suppress_tokens": null,
|
| 98 |
+
"bos_token_id": null,
|
| 99 |
+
"chunk_size_feed_forward": 0,
|
| 100 |
+
"cross_attention_hidden_size": null,
|
| 101 |
+
"decoder_start_token_id": null,
|
| 102 |
+
"diversity_penalty": 0.0,
|
| 103 |
+
"do_sample": false,
|
| 104 |
+
"dropout": 0.0,
|
| 105 |
+
"early_stopping": false,
|
| 106 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 107 |
+
"eos_token_id": null,
|
| 108 |
+
"exponential_decay_length_penalty": null,
|
| 109 |
+
"finetuning_task": null,
|
| 110 |
+
"forced_bos_token_id": null,
|
| 111 |
+
"forced_eos_token_id": null,
|
| 112 |
+
"hidden_act": "gelu",
|
| 113 |
+
"hidden_size": 768,
|
| 114 |
+
"id2label": {
|
| 115 |
+
"0": "LABEL_0",
|
| 116 |
+
"1": "LABEL_1"
|
| 117 |
+
},
|
| 118 |
+
"image_size": 384,
|
| 119 |
+
"initializer_factor": 1.0,
|
| 120 |
+
"initializer_range": 0.02,
|
| 121 |
+
"intermediate_size": 3072,
|
| 122 |
+
"is_decoder": false,
|
| 123 |
+
"is_encoder_decoder": false,
|
| 124 |
+
"label2id": {
|
| 125 |
+
"LABEL_0": 0,
|
| 126 |
+
"LABEL_1": 1
|
| 127 |
+
},
|
| 128 |
+
"layer_norm_eps": 1e-05,
|
| 129 |
+
"length_penalty": 1.0,
|
| 130 |
+
"max_length": 20,
|
| 131 |
+
"min_length": 0,
|
| 132 |
+
"model_type": "blip_vision_model",
|
| 133 |
+
"no_repeat_ngram_size": 0,
|
| 134 |
+
"num_attention_heads": 12,
|
| 135 |
+
"num_beam_groups": 1,
|
| 136 |
+
"num_beams": 1,
|
| 137 |
+
"num_channels": 3,
|
| 138 |
+
"num_hidden_layers": 12,
|
| 139 |
+
"num_return_sequences": 1,
|
| 140 |
+
"output_attentions": false,
|
| 141 |
+
"output_hidden_states": false,
|
| 142 |
+
"output_scores": false,
|
| 143 |
+
"pad_token_id": null,
|
| 144 |
+
"patch_size": 16,
|
| 145 |
+
"prefix": null,
|
| 146 |
+
"problem_type": null,
|
| 147 |
+
"projection_dim": 512,
|
| 148 |
+
"pruned_heads": {},
|
| 149 |
+
"remove_invalid_values": false,
|
| 150 |
+
"repetition_penalty": 1.0,
|
| 151 |
+
"return_dict": true,
|
| 152 |
+
"return_dict_in_generate": false,
|
| 153 |
+
"sep_token_id": null,
|
| 154 |
+
"suppress_tokens": null,
|
| 155 |
+
"task_specific_params": null,
|
| 156 |
+
"temperature": 1.0,
|
| 157 |
+
"tf_legacy_loss": false,
|
| 158 |
+
"tie_encoder_decoder": false,
|
| 159 |
+
"tie_word_embeddings": true,
|
| 160 |
+
"tokenizer_class": null,
|
| 161 |
+
"top_k": 50,
|
| 162 |
+
"top_p": 1.0,
|
| 163 |
+
"torch_dtype": null,
|
| 164 |
+
"torchscript": false,
|
| 165 |
+
"transformers_version": "4.26.0.dev0",
|
| 166 |
+
"typical_p": 1.0,
|
| 167 |
+
"use_bfloat16": false
|
| 168 |
+
}
|
| 169 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33786eed34def0c95fa948128cb4386be9b9219aa2c2e25f1c9c744692121bb7
|
| 3 |
+
size 1538800584
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/preprocessor_config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_pad": true,
|
| 4 |
+
"do_rescale": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.48145466,
|
| 8 |
+
0.4578275,
|
| 9 |
+
0.40821073
|
| 10 |
+
],
|
| 11 |
+
"image_processor_type": "BlipImageProcessor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"processor_class": "BlipProcessor",
|
| 18 |
+
"resample": 3,
|
| 19 |
+
"rescale_factor": 0.00392156862745098,
|
| 20 |
+
"size": {
|
| 21 |
+
"height": 384,
|
| 22 |
+
"width": 384
|
| 23 |
+
},
|
| 24 |
+
"size_divisor": 32
|
| 25 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/tokenizer_config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"do_basic_tokenize": true,
|
| 4 |
+
"do_lower_case": true,
|
| 5 |
+
"mask_token": "[MASK]",
|
| 6 |
+
"model_input_names": [
|
| 7 |
+
"input_ids",
|
| 8 |
+
"attention_mask"
|
| 9 |
+
],
|
| 10 |
+
"model_max_length": 512,
|
| 11 |
+
"name_or_path": "ybelkada/blip-image-captioning-base",
|
| 12 |
+
"never_split": null,
|
| 13 |
+
"pad_token": "[PAD]",
|
| 14 |
+
"processor_class": "BlipProcessor",
|
| 15 |
+
"sep_token": "[SEP]",
|
| 16 |
+
"special_tokens_map_file": null,
|
| 17 |
+
"strip_accents": null,
|
| 18 |
+
"tokenize_chinese_chars": true,
|
| 19 |
+
"tokenizer_class": "BertTokenizer",
|
| 20 |
+
"unk_token": "[UNK]",
|
| 21 |
+
"model_input_names": [
|
| 22 |
+
"input_ids",
|
| 23 |
+
"attention_mask"
|
| 24 |
+
]
|
| 25 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/blip-vqa-base/vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/README.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: bsd-3-clause
|
| 3 |
+
---
|
| 4 |
+
# CodeGen (CodeGen-Mono 350M)
|
| 5 |
+
|
| 6 |
+
## Model description
|
| 7 |
+
|
| 8 |
+
CodeGen is a family of autoregressive language models for **program synthesis** from the paper: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. The models are originally released in [this repository](https://github.com/salesforce/CodeGen), under 3 pre-training data variants (`NL`, `Multi`, `Mono`) and 4 model size variants (`350M`, `2B`, `6B`, `16B`).
|
| 9 |
+
|
| 10 |
+
The checkpoint included in this repository is denoted as **CodeGen-Mono 350M** in the paper, where "Mono" means the model is initialized with *CodeGen-Multi 350M* and further pre-trained on a Python programming language dataset, and "350M" refers to the number of trainable parameters.
|
| 11 |
+
|
| 12 |
+
## Training data
|
| 13 |
+
|
| 14 |
+
This checkpoint (CodeGen-Mono 350M) was firstly initialized with *CodeGen-Multi 350M*, and then pre-trained on BigPython dataset. The data consists of 71.7B tokens of Python programming language. See Section 2.1 of the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
| 15 |
+
|
| 16 |
+
## Training procedure
|
| 17 |
+
|
| 18 |
+
CodeGen was trained using cross-entropy loss to maximize the likelihood of sequential inputs.
|
| 19 |
+
The family of models are trained using multiple TPU-v4-512 by Google, leveraging data and model parallelism.
|
| 20 |
+
See Section 2.3 of the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
| 21 |
+
|
| 22 |
+
## Evaluation results
|
| 23 |
+
|
| 24 |
+
We evaluate our models on two code generation benchmark: HumanEval and MTPB. Please refer to the [paper](https://arxiv.org/abs/2203.13474) for more details.
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
## Intended Use and Limitations
|
| 28 |
+
|
| 29 |
+
As an autoregressive language model, CodeGen is capable of extracting features from given natural language and programming language texts, and calculating the likelihood of them.
|
| 30 |
+
However, the model is intended for and best at **program synthesis**, that is, generating executable code given English prompts, where the prompts should be in the form of a comment string. The model can complete partially-generated code as well.
|
| 31 |
+
|
| 32 |
+
## How to use
|
| 33 |
+
|
| 34 |
+
This model can be easily loaded using the `AutoModelForCausalLM` functionality:
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 38 |
+
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
|
| 39 |
+
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
|
| 40 |
+
|
| 41 |
+
text = "def hello_world():"
|
| 42 |
+
input_ids = tokenizer(text, return_tensors="pt").input_ids
|
| 43 |
+
|
| 44 |
+
generated_ids = model.generate(input_ids, max_length=128)
|
| 45 |
+
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
## BibTeX entry and citation info
|
| 49 |
+
|
| 50 |
+
```bibtex
|
| 51 |
+
@article{Nijkamp2022ACP,
|
| 52 |
+
title={A Conversational Paradigm for Program Synthesis},
|
| 53 |
+
author={Nijkamp, Erik and Pang, Bo and Hayashi, Hiroaki and Tu, Lifu and Wang, Huan and Zhou, Yingbo and Savarese, Silvio and Xiong, Caiming},
|
| 54 |
+
journal={arXiv preprint},
|
| 55 |
+
year={2022}
|
| 56 |
+
}
|
| 57 |
+
```
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/added_tokens.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{" ": 50285, " ": 50271, " ": 50260, " ": 50261, " ": 50272, "\t\t\t\t": 50292, "\t\t\t\t\t\t\t": 50289, " ": 50273, " ": 50284, " ": 50283, " ": 50263, " ": 50258, " ": 50269, " ": 50257, " ": 50265, " ": 50275, " ": 50267, " ": 50270, " ": 50278, " ": 50286, " ": 50276, " ": 50259, "\t\t\t\t\t\t": 50290, " ": 50268, " ": 50279, "\t\t\t\t\t\t\t\t\t": 50287, "\t\t\t": 50293, " ": 50264, " ": 50266, " ": 50277, "\t\t\t\t\t": 50291, "\t\t": 50294, " ": 50281, " ": 50274, "\t\t\t\t\t\t\t\t": 50288, " ": 50282, " ": 50262, " ": 50280}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "codegen-350M-mono",
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"CodeGenForCausalLM"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.0,
|
| 8 |
+
"bos_token_id": 1,
|
| 9 |
+
"embd_pdrop": 0.0,
|
| 10 |
+
"eos_token_id": 50256,
|
| 11 |
+
"gradient_checkpointing": false,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"layer_norm_epsilon": 1e-05,
|
| 14 |
+
"model_type": "codegen",
|
| 15 |
+
"n_ctx": 2048,
|
| 16 |
+
"n_embd": 1024,
|
| 17 |
+
"n_head": 16,
|
| 18 |
+
"n_inner": null,
|
| 19 |
+
"n_layer": 20,
|
| 20 |
+
"n_positions": 2048,
|
| 21 |
+
"resid_pdrop": 0.0,
|
| 22 |
+
"rotary_dim": 32,
|
| 23 |
+
"scale_attn_weights": true,
|
| 24 |
+
"summary_activation": null,
|
| 25 |
+
"summary_first_dropout": 0.1,
|
| 26 |
+
"summary_proj_to_labels": true,
|
| 27 |
+
"summary_type": "cls_index",
|
| 28 |
+
"summary_use_proj": true,
|
| 29 |
+
"task_specific_params": {
|
| 30 |
+
"text-generation": {
|
| 31 |
+
"do_sample": true,
|
| 32 |
+
"max_length": 50,
|
| 33 |
+
"temperature": 1.0
|
| 34 |
+
}
|
| 35 |
+
},
|
| 36 |
+
"tie_word_embeddings": false,
|
| 37 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 38 |
+
"torch_dtype": "float16",
|
| 39 |
+
"transformers_version": "4.21.0.dev0",
|
| 40 |
+
"use_cache": true,
|
| 41 |
+
"vocab_size": 51200
|
| 42 |
+
}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90e20f4887289739a435452f26974d97937adb41485f9c1a7b3d3810aac6773d
|
| 3 |
+
size 797367631
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Hugging Face.Quantization Fundamentals/Materials/Models/Salesforce/codegen-350M-mono/tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "CodeGenTokenizer"}
|