Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- InternVL/classification/configs/intern_vit_6b_1k_224.yaml +35 -0
- InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_a.yaml +36 -0
- InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_r.yaml +36 -0
- InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_real.yaml +36 -0
- InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_sketch.yaml +36 -0
- InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenetv2.yaml +36 -0
- InternVL/classification/dataset/__init__.py +7 -0
- InternVL/classification/dataset/build.py +332 -0
- InternVL/classification/dataset/cached_image_folder.py +543 -0
- InternVL/classification/dataset/imagenet_a_r_indices.py +295 -0
- InternVL/classification/dataset/imagenet_real.py +50 -0
- InternVL/classification/dataset/imagenetv2.py +59 -0
- InternVL/classification/dataset/samplers.py +116 -0
- InternVL/classification/dataset/zipreader.py +102 -0
- InternVL/classification/meta_data/22k_class_to_idx.json +0 -0
- InternVL/classification/meta_data/imagenet_classes.json +1002 -0
- InternVL/classification/meta_data/map22kto1k.txt +1000 -0
- InternVL/classification/meta_data/real.json +0 -0
- InternVL/classification/models/__init__.py +7 -0
- InternVL/classification/models/build.py +36 -0
- InternVL/classification/models/flash_attention.py +75 -0
- InternVL/classification/models/intern_vit_6b.py +473 -0
- InternVL/classification/work_dirs/intern_vit_6b_1k_224/log_rank0.txt +0 -0
- InternVL/internvl_chat/examples/image1.jpg +0 -0
- InternVL/internvl_chat/internvl/conversation.py +406 -0
- InternVL/internvl_chat/internvl/dist_utils.py +104 -0
- InternVL/internvl_chat/tools/convert_parquet.py +83 -0
- InternVL/internvl_chat/tools/convert_to_int8.py +16 -0
- InternVL/internvl_chat/tools/extract_mlp.py +19 -0
- InternVL/internvl_chat/tools/extract_video_frames.py +121 -0
- InternVL/internvl_chat/tools/extract_vit.py +16 -0
- InternVL/internvl_chat/tools/json2jsonl.py +21 -0
- InternVL/internvl_chat/tools/jsonl2jsonl.py +23 -0
- InternVL/internvl_chat/tools/merge_lora.py +31 -0
- InternVL/internvl_chat/tools/replace_llm.py +29 -0
- InternVL/internvl_chat/tools/resize_pos_embed.py +25 -0
- InternVL/internvl_chat_llava/llava/model/language_model/llava_llama.py +140 -0
- InternVL/internvl_chat_llava/llava/model/language_model/llava_mpt.py +97 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/adapt_tokenizer.py +41 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/attention.py +300 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/blocks.py +41 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/configuration_mpt.py +118 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/custom_embedding.py +11 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/flash_attn_triton.py +484 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/hf_prefixlm_converter.py +415 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/meta_init_context.py +94 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/modeling_mpt.py +331 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/norm.py +56 -0
- InternVL/internvl_chat_llava/llava/model/language_model/mpt/param_init_fns.py +181 -0
- InternVL/internvl_chat_llava/llava/model/multimodal_encoder/builder.py +12 -0
InternVL/classification/configs/intern_vit_6b_1k_224.yaml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 5 |
+
DATA_PATH: './data/imagenet-1k'
|
| 6 |
+
MODEL:
|
| 7 |
+
TYPE: intern_vit_6b
|
| 8 |
+
DROP_PATH_RATE: 0.0
|
| 9 |
+
INTERN_VIT_6B:
|
| 10 |
+
FREEZE_VIT: True
|
| 11 |
+
PATCH_SIZE: 14
|
| 12 |
+
PRETRAIN_SIZE: 224
|
| 13 |
+
QKV_BIAS: False
|
| 14 |
+
EMBED_DIM: 3200
|
| 15 |
+
NUM_HEADS: 25
|
| 16 |
+
MLP_RATIO: 4
|
| 17 |
+
INIT_VALUES: 0.1
|
| 18 |
+
QK_NORMALIZATION: True
|
| 19 |
+
DEPTH: 48
|
| 20 |
+
USE_FLASH_ATTN: True
|
| 21 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 22 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 23 |
+
TRAIN:
|
| 24 |
+
EMA:
|
| 25 |
+
ENABLE: False
|
| 26 |
+
DECAY: 0.998
|
| 27 |
+
EPOCHS: 10
|
| 28 |
+
WARMUP_EPOCHS: 1
|
| 29 |
+
WEIGHT_DECAY: 0.0
|
| 30 |
+
BASE_LR: 0.1 # 512
|
| 31 |
+
WARMUP_LR: .0
|
| 32 |
+
MIN_LR: .0
|
| 33 |
+
LR_LAYER_DECAY: false
|
| 34 |
+
OPTIMIZER:
|
| 35 |
+
NAME: 'sgd'
|
InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_a.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
DATASET: 'imagenet_a'
|
| 5 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 6 |
+
DATA_PATH: './data/imagenet-a'
|
| 7 |
+
MODEL:
|
| 8 |
+
TYPE: intern_vit_6b
|
| 9 |
+
DROP_PATH_RATE: 0.0
|
| 10 |
+
INTERN_VIT_6B:
|
| 11 |
+
FREEZE_VIT: True
|
| 12 |
+
PATCH_SIZE: 14
|
| 13 |
+
PRETRAIN_SIZE: 224
|
| 14 |
+
QKV_BIAS: False
|
| 15 |
+
EMBED_DIM: 3200
|
| 16 |
+
NUM_HEADS: 25
|
| 17 |
+
MLP_RATIO: 4
|
| 18 |
+
INIT_VALUES: 0.1
|
| 19 |
+
QK_NORMALIZATION: True
|
| 20 |
+
DEPTH: 48
|
| 21 |
+
USE_FLASH_ATTN: True
|
| 22 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 23 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 24 |
+
TRAIN:
|
| 25 |
+
EMA:
|
| 26 |
+
ENABLE: False
|
| 27 |
+
DECAY: 0.998
|
| 28 |
+
EPOCHS: 10
|
| 29 |
+
WARMUP_EPOCHS: 1
|
| 30 |
+
WEIGHT_DECAY: 0.0
|
| 31 |
+
BASE_LR: 0.1 # 512
|
| 32 |
+
WARMUP_LR: .0
|
| 33 |
+
MIN_LR: .0
|
| 34 |
+
LR_LAYER_DECAY: false
|
| 35 |
+
OPTIMIZER:
|
| 36 |
+
NAME: 'sgd'
|
InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_r.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
DATASET: 'imagenet_r'
|
| 5 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 6 |
+
DATA_PATH: './data/imagenet-r'
|
| 7 |
+
MODEL:
|
| 8 |
+
TYPE: intern_vit_6b
|
| 9 |
+
DROP_PATH_RATE: 0.0
|
| 10 |
+
INTERN_VIT_6B:
|
| 11 |
+
FREEZE_VIT: True
|
| 12 |
+
PATCH_SIZE: 14
|
| 13 |
+
PRETRAIN_SIZE: 224
|
| 14 |
+
QKV_BIAS: False
|
| 15 |
+
EMBED_DIM: 3200
|
| 16 |
+
NUM_HEADS: 25
|
| 17 |
+
MLP_RATIO: 4
|
| 18 |
+
INIT_VALUES: 0.1
|
| 19 |
+
QK_NORMALIZATION: True
|
| 20 |
+
DEPTH: 48
|
| 21 |
+
USE_FLASH_ATTN: True
|
| 22 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 23 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 24 |
+
TRAIN:
|
| 25 |
+
EMA:
|
| 26 |
+
ENABLE: False
|
| 27 |
+
DECAY: 0.998
|
| 28 |
+
EPOCHS: 10
|
| 29 |
+
WARMUP_EPOCHS: 1
|
| 30 |
+
WEIGHT_DECAY: 0.0
|
| 31 |
+
BASE_LR: 0.1 # 512
|
| 32 |
+
WARMUP_LR: .0
|
| 33 |
+
MIN_LR: .0
|
| 34 |
+
LR_LAYER_DECAY: false
|
| 35 |
+
OPTIMIZER:
|
| 36 |
+
NAME: 'sgd'
|
InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_real.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
DATASET: 'imagenet-real'
|
| 5 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 6 |
+
DATA_PATH: './data/imagenet-1k'
|
| 7 |
+
MODEL:
|
| 8 |
+
TYPE: intern_vit_6b
|
| 9 |
+
DROP_PATH_RATE: 0.0
|
| 10 |
+
INTERN_VIT_6B:
|
| 11 |
+
FREEZE_VIT: True
|
| 12 |
+
PATCH_SIZE: 14
|
| 13 |
+
PRETRAIN_SIZE: 224
|
| 14 |
+
QKV_BIAS: False
|
| 15 |
+
EMBED_DIM: 3200
|
| 16 |
+
NUM_HEADS: 25
|
| 17 |
+
MLP_RATIO: 4
|
| 18 |
+
INIT_VALUES: 0.1
|
| 19 |
+
QK_NORMALIZATION: True
|
| 20 |
+
DEPTH: 48
|
| 21 |
+
USE_FLASH_ATTN: True
|
| 22 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 23 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 24 |
+
TRAIN:
|
| 25 |
+
EMA:
|
| 26 |
+
ENABLE: False
|
| 27 |
+
DECAY: 0.998
|
| 28 |
+
EPOCHS: 10
|
| 29 |
+
WARMUP_EPOCHS: 1
|
| 30 |
+
WEIGHT_DECAY: 0.0
|
| 31 |
+
BASE_LR: 0.1 # 512
|
| 32 |
+
WARMUP_LR: .0
|
| 33 |
+
MIN_LR: .0
|
| 34 |
+
LR_LAYER_DECAY: false
|
| 35 |
+
OPTIMIZER:
|
| 36 |
+
NAME: 'sgd'
|
InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenet_sketch.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
DATASET: 'imagenet_sketch'
|
| 5 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 6 |
+
DATA_PATH: './data/imagenet-sketch'
|
| 7 |
+
MODEL:
|
| 8 |
+
TYPE: intern_vit_6b
|
| 9 |
+
DROP_PATH_RATE: 0.0
|
| 10 |
+
INTERN_VIT_6B:
|
| 11 |
+
FREEZE_VIT: True
|
| 12 |
+
PATCH_SIZE: 14
|
| 13 |
+
PRETRAIN_SIZE: 224
|
| 14 |
+
QKV_BIAS: False
|
| 15 |
+
EMBED_DIM: 3200
|
| 16 |
+
NUM_HEADS: 25
|
| 17 |
+
MLP_RATIO: 4
|
| 18 |
+
INIT_VALUES: 0.1
|
| 19 |
+
QK_NORMALIZATION: True
|
| 20 |
+
DEPTH: 48
|
| 21 |
+
USE_FLASH_ATTN: True
|
| 22 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 23 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 24 |
+
TRAIN:
|
| 25 |
+
EMA:
|
| 26 |
+
ENABLE: False
|
| 27 |
+
DECAY: 0.998
|
| 28 |
+
EPOCHS: 10
|
| 29 |
+
WARMUP_EPOCHS: 1
|
| 30 |
+
WEIGHT_DECAY: 0.0
|
| 31 |
+
BASE_LR: 0.1 # 512
|
| 32 |
+
WARMUP_LR: .0
|
| 33 |
+
MIN_LR: .0
|
| 34 |
+
LR_LAYER_DECAY: false
|
| 35 |
+
OPTIMIZER:
|
| 36 |
+
NAME: 'sgd'
|
InternVL/classification/configs/intern_vit_6b_1k_224_test_imagenetv2.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DATA:
|
| 2 |
+
IMG_ON_MEMORY: False
|
| 3 |
+
BATCH_SIZE: 128
|
| 4 |
+
DATASET: 'imagenetv2'
|
| 5 |
+
TRANSFORM: 'build_transform_for_linear_probe'
|
| 6 |
+
DATA_PATH: './data/imagenetv2'
|
| 7 |
+
MODEL:
|
| 8 |
+
TYPE: intern_vit_6b
|
| 9 |
+
DROP_PATH_RATE: 0.0
|
| 10 |
+
INTERN_VIT_6B:
|
| 11 |
+
FREEZE_VIT: True
|
| 12 |
+
PATCH_SIZE: 14
|
| 13 |
+
PRETRAIN_SIZE: 224
|
| 14 |
+
QKV_BIAS: False
|
| 15 |
+
EMBED_DIM: 3200
|
| 16 |
+
NUM_HEADS: 25
|
| 17 |
+
MLP_RATIO: 4
|
| 18 |
+
INIT_VALUES: 0.1
|
| 19 |
+
QK_NORMALIZATION: True
|
| 20 |
+
DEPTH: 48
|
| 21 |
+
USE_FLASH_ATTN: True
|
| 22 |
+
PRETRAINED: "./pretrained/intern_vit_6b_224px.pth"
|
| 23 |
+
CLS_TARGET: 'cls_patch_concat'
|
| 24 |
+
TRAIN:
|
| 25 |
+
EMA:
|
| 26 |
+
ENABLE: False
|
| 27 |
+
DECAY: 0.998
|
| 28 |
+
EPOCHS: 10
|
| 29 |
+
WARMUP_EPOCHS: 1
|
| 30 |
+
WEIGHT_DECAY: 0.0
|
| 31 |
+
BASE_LR: 0.1 # 512
|
| 32 |
+
WARMUP_LR: .0
|
| 33 |
+
MIN_LR: .0
|
| 34 |
+
LR_LAYER_DECAY: false
|
| 35 |
+
OPTIMIZER:
|
| 36 |
+
NAME: 'sgd'
|
InternVL/classification/dataset/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from .build import build_loader, build_loader2
|
InternVL/classification/dataset/build.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
from timm.data import Mixup, create_transform
|
| 13 |
+
from torchvision import transforms
|
| 14 |
+
from torchvision.datasets import ImageFolder
|
| 15 |
+
|
| 16 |
+
from .cached_image_folder import ImageCephDataset
|
| 17 |
+
from .samplers import NodeDistributedSampler, SubsetRandomSampler
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from torchvision.transforms import InterpolationMode
|
| 21 |
+
|
| 22 |
+
def _pil_interp(method):
|
| 23 |
+
if method == 'bicubic':
|
| 24 |
+
return InterpolationMode.BICUBIC
|
| 25 |
+
elif method == 'lanczos':
|
| 26 |
+
return InterpolationMode.LANCZOS
|
| 27 |
+
elif method == 'hamming':
|
| 28 |
+
return InterpolationMode.HAMMING
|
| 29 |
+
else:
|
| 30 |
+
return InterpolationMode.BILINEAR
|
| 31 |
+
except:
|
| 32 |
+
from timm.data.transforms import _pil_interp
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TTA(torch.nn.Module):
|
| 36 |
+
|
| 37 |
+
def __init__(self, size, scales=[1.0, 1.05, 1.1]):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.size = size
|
| 40 |
+
self.scales = scales
|
| 41 |
+
|
| 42 |
+
def forward(self, img):
|
| 43 |
+
out = []
|
| 44 |
+
cc = transforms.CenterCrop(self.size)
|
| 45 |
+
for scale in self.scales:
|
| 46 |
+
size_ = int(scale * self.size)
|
| 47 |
+
rs = transforms.Resize(size_, interpolation=_pil_interp('bicubic'))
|
| 48 |
+
img_ = rs(img)
|
| 49 |
+
img_ = cc(img_)
|
| 50 |
+
out.append(img_)
|
| 51 |
+
|
| 52 |
+
return out
|
| 53 |
+
|
| 54 |
+
def __repr__(self) -> str:
|
| 55 |
+
return f'{self.__class__.__name__}(size={self.size}, scale={self.scales})'
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def build_loader(config):
|
| 59 |
+
config.defrost()
|
| 60 |
+
dataset_train, config.MODEL.NUM_CLASSES = build_dataset('train', config=config)
|
| 61 |
+
config.freeze()
|
| 62 |
+
print(f'local rank {config.LOCAL_RANK} / global rank {dist.get_rank()}'
|
| 63 |
+
'successfully build train dataset')
|
| 64 |
+
|
| 65 |
+
dataset_val, _ = build_dataset('val', config=config)
|
| 66 |
+
print(f'local rank {config.LOCAL_RANK} / global rank {dist.get_rank()}'
|
| 67 |
+
'successfully build val dataset')
|
| 68 |
+
|
| 69 |
+
dataset_test, _ = build_dataset('test', config=config)
|
| 70 |
+
print(f'local rank {config.LOCAL_RANK} / global rank {dist.get_rank()}'
|
| 71 |
+
'successfully build test dataset')
|
| 72 |
+
|
| 73 |
+
num_tasks = dist.get_world_size()
|
| 74 |
+
global_rank = dist.get_rank()
|
| 75 |
+
|
| 76 |
+
if dataset_train is not None:
|
| 77 |
+
if config.DATA.IMG_ON_MEMORY:
|
| 78 |
+
sampler_train = NodeDistributedSampler(dataset_train)
|
| 79 |
+
else:
|
| 80 |
+
if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == 'part':
|
| 81 |
+
indices = np.arange(dist.get_rank(), len(dataset_train), dist.get_world_size())
|
| 82 |
+
sampler_train = SubsetRandomSampler(indices)
|
| 83 |
+
else:
|
| 84 |
+
sampler_train = torch.utils.data.DistributedSampler(
|
| 85 |
+
dataset_train,
|
| 86 |
+
num_replicas=num_tasks,
|
| 87 |
+
rank=global_rank,
|
| 88 |
+
shuffle=True)
|
| 89 |
+
|
| 90 |
+
if dataset_val is not None:
|
| 91 |
+
if config.TEST.SEQUENTIAL:
|
| 92 |
+
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
|
| 93 |
+
else:
|
| 94 |
+
sampler_val = torch.utils.data.distributed.DistributedSampler(dataset_val, shuffle=False)
|
| 95 |
+
|
| 96 |
+
if dataset_test is not None:
|
| 97 |
+
if config.TEST.SEQUENTIAL:
|
| 98 |
+
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
|
| 99 |
+
else:
|
| 100 |
+
sampler_test = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
|
| 101 |
+
|
| 102 |
+
data_loader_train = torch.utils.data.DataLoader(
|
| 103 |
+
dataset_train,
|
| 104 |
+
sampler=sampler_train,
|
| 105 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 106 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 107 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 108 |
+
drop_last=True,
|
| 109 |
+
persistent_workers=True) if dataset_train is not None else None
|
| 110 |
+
|
| 111 |
+
data_loader_val = torch.utils.data.DataLoader(
|
| 112 |
+
dataset_val,
|
| 113 |
+
sampler=sampler_val,
|
| 114 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 115 |
+
shuffle=False,
|
| 116 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 117 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 118 |
+
drop_last=False,
|
| 119 |
+
persistent_workers=True) if dataset_val is not None else None
|
| 120 |
+
|
| 121 |
+
data_loader_test = torch.utils.data.DataLoader(
|
| 122 |
+
dataset_test,
|
| 123 |
+
sampler=sampler_test,
|
| 124 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 125 |
+
shuffle=False,
|
| 126 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 127 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 128 |
+
drop_last=False,
|
| 129 |
+
persistent_workers=True) if dataset_test is not None else None
|
| 130 |
+
|
| 131 |
+
# setup mixup / cutmix
|
| 132 |
+
mixup_fn = None
|
| 133 |
+
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
|
| 134 |
+
if mixup_active:
|
| 135 |
+
mixup_fn = Mixup(mixup_alpha=config.AUG.MIXUP,
|
| 136 |
+
cutmix_alpha=config.AUG.CUTMIX,
|
| 137 |
+
cutmix_minmax=config.AUG.CUTMIX_MINMAX,
|
| 138 |
+
prob=config.AUG.MIXUP_PROB,
|
| 139 |
+
switch_prob=config.AUG.MIXUP_SWITCH_PROB,
|
| 140 |
+
mode=config.AUG.MIXUP_MODE,
|
| 141 |
+
label_smoothing=config.MODEL.LABEL_SMOOTHING,
|
| 142 |
+
num_classes=config.MODEL.NUM_CLASSES)
|
| 143 |
+
|
| 144 |
+
return dataset_train, dataset_val, dataset_test, data_loader_train, \
|
| 145 |
+
data_loader_val, data_loader_test, mixup_fn
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def build_loader2(config):
|
| 149 |
+
config.defrost()
|
| 150 |
+
dataset_train, config.MODEL.NUM_CLASSES = build_dataset('train', config=config)
|
| 151 |
+
config.freeze()
|
| 152 |
+
dataset_val, _ = build_dataset('val', config=config)
|
| 153 |
+
dataset_test, _ = build_dataset('test', config=config)
|
| 154 |
+
|
| 155 |
+
data_loader_train = torch.utils.data.DataLoader(
|
| 156 |
+
dataset_train,
|
| 157 |
+
shuffle=True,
|
| 158 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 159 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 160 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 161 |
+
drop_last=True,
|
| 162 |
+
persistent_workers=True) if dataset_train is not None else None
|
| 163 |
+
|
| 164 |
+
data_loader_val = torch.utils.data.DataLoader(
|
| 165 |
+
dataset_val,
|
| 166 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 167 |
+
shuffle=False,
|
| 168 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 169 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 170 |
+
drop_last=False,
|
| 171 |
+
persistent_workers=True) if dataset_val is not None else None
|
| 172 |
+
|
| 173 |
+
data_loader_test = torch.utils.data.DataLoader(
|
| 174 |
+
dataset_test,
|
| 175 |
+
batch_size=config.DATA.BATCH_SIZE,
|
| 176 |
+
shuffle=False,
|
| 177 |
+
num_workers=config.DATA.NUM_WORKERS,
|
| 178 |
+
pin_memory=config.DATA.PIN_MEMORY,
|
| 179 |
+
drop_last=False,
|
| 180 |
+
persistent_workers=True) if dataset_test is not None else None
|
| 181 |
+
|
| 182 |
+
# setup mixup / cutmix
|
| 183 |
+
mixup_fn = None
|
| 184 |
+
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
|
| 185 |
+
if mixup_active:
|
| 186 |
+
mixup_fn = Mixup(mixup_alpha=config.AUG.MIXUP,
|
| 187 |
+
cutmix_alpha=config.AUG.CUTMIX,
|
| 188 |
+
cutmix_minmax=config.AUG.CUTMIX_MINMAX,
|
| 189 |
+
prob=config.AUG.MIXUP_PROB,
|
| 190 |
+
switch_prob=config.AUG.MIXUP_SWITCH_PROB,
|
| 191 |
+
mode=config.AUG.MIXUP_MODE,
|
| 192 |
+
label_smoothing=config.MODEL.LABEL_SMOOTHING,
|
| 193 |
+
num_classes=config.MODEL.NUM_CLASSES)
|
| 194 |
+
|
| 195 |
+
return dataset_train, dataset_val, dataset_test, data_loader_train, \
|
| 196 |
+
data_loader_val, data_loader_test, mixup_fn
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def build_dataset(split, config):
|
| 200 |
+
if config.DATA.TRANSFORM == 'build_transform':
|
| 201 |
+
transform = build_transform(split == 'train', config)
|
| 202 |
+
elif config.DATA.TRANSFORM == 'build_transform_for_linear_probe':
|
| 203 |
+
transform = build_transform_for_linear_probe(split == 'train', config)
|
| 204 |
+
else:
|
| 205 |
+
raise NotImplementedError
|
| 206 |
+
print(split, transform)
|
| 207 |
+
dataset = None
|
| 208 |
+
nb_classes = None
|
| 209 |
+
prefix = split
|
| 210 |
+
if config.DATA.DATASET == 'imagenet' or config.DATA.DATASET == 'imagenet-real':
|
| 211 |
+
if prefix == 'train' and not config.EVAL_MODE:
|
| 212 |
+
root = os.path.join(config.DATA.DATA_PATH, 'train')
|
| 213 |
+
dataset = ImageCephDataset(root, 'train',
|
| 214 |
+
transform=transform,
|
| 215 |
+
on_memory=config.DATA.IMG_ON_MEMORY)
|
| 216 |
+
elif prefix == 'val':
|
| 217 |
+
root = os.path.join(config.DATA.DATA_PATH, 'val')
|
| 218 |
+
dataset = ImageCephDataset(root, 'val', transform=transform)
|
| 219 |
+
nb_classes = 1000
|
| 220 |
+
elif config.DATA.DATASET == 'imagenet22K':
|
| 221 |
+
if prefix == 'train':
|
| 222 |
+
if not config.EVAL_MODE:
|
| 223 |
+
root = config.DATA.DATA_PATH
|
| 224 |
+
dataset = ImageCephDataset(root, 'train',
|
| 225 |
+
transform=transform,
|
| 226 |
+
on_memory=config.DATA.IMG_ON_MEMORY)
|
| 227 |
+
nb_classes = 21841
|
| 228 |
+
elif prefix == 'val':
|
| 229 |
+
root = os.path.join(config.DATA.DATA_PATH, 'val')
|
| 230 |
+
dataset = ImageCephDataset(root, 'val', transform=transform)
|
| 231 |
+
nb_classes = 1000
|
| 232 |
+
elif config.DATA.DATASET == 'imagenetv2':
|
| 233 |
+
from .imagenetv2 import ImageNetV2Dataset
|
| 234 |
+
if prefix == 'train' and not config.EVAL_MODE:
|
| 235 |
+
print(f'Only test split available for {config.DATA.DATASET}')
|
| 236 |
+
else:
|
| 237 |
+
dataset = ImageNetV2Dataset(variant='matched-frequency',
|
| 238 |
+
transform=transform,
|
| 239 |
+
location=config.DATA.DATA_PATH)
|
| 240 |
+
nb_classes = 1000
|
| 241 |
+
elif config.DATA.DATASET == 'imagenet_sketch':
|
| 242 |
+
if prefix == 'train' and not config.EVAL_MODE:
|
| 243 |
+
print(f'Only test split available for {config.DATA.DATASET}')
|
| 244 |
+
else:
|
| 245 |
+
dataset = ImageFolder(root=config.DATA.DATA_PATH, transform=transform)
|
| 246 |
+
nb_classes = 1000
|
| 247 |
+
elif config.DATA.DATASET == 'imagenet_a':
|
| 248 |
+
if prefix == 'train' and not config.EVAL_MODE:
|
| 249 |
+
print(f'Only test split available for {config.DATA.DATASET}')
|
| 250 |
+
else:
|
| 251 |
+
dataset = ImageFolder(root=config.DATA.DATA_PATH, transform=transform)
|
| 252 |
+
nb_classes = 1000 # actual number of classes is 200
|
| 253 |
+
elif config.DATA.DATASET == 'imagenet_r':
|
| 254 |
+
if prefix == 'train' and not config.EVAL_MODE:
|
| 255 |
+
print(f'Only test split available for {config.DATA.DATASET}')
|
| 256 |
+
else:
|
| 257 |
+
dataset = ImageFolder(root=config.DATA.DATA_PATH, transform=transform)
|
| 258 |
+
nb_classes = 1000 # actual number of classes is 200
|
| 259 |
+
else:
|
| 260 |
+
raise NotImplementedError(
|
| 261 |
+
f'build_dataset does support {config.DATA.DATASET}')
|
| 262 |
+
|
| 263 |
+
return dataset, nb_classes
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def build_transform_for_linear_probe(is_train, config):
|
| 267 |
+
# linear probe: weak augmentation
|
| 268 |
+
if is_train:
|
| 269 |
+
transform = transforms.Compose([
|
| 270 |
+
transforms.RandomResizedCrop(
|
| 271 |
+
config.DATA.IMG_SIZE, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 272 |
+
transforms.RandomHorizontalFlip(),
|
| 273 |
+
transforms.ToTensor(),
|
| 274 |
+
transforms.Normalize(mean=config.AUG.MEAN, std=config.AUG.STD)
|
| 275 |
+
])
|
| 276 |
+
else:
|
| 277 |
+
transform = transforms.Compose([
|
| 278 |
+
transforms.Resize(
|
| 279 |
+
config.DATA.IMG_SIZE, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 280 |
+
transforms.CenterCrop(config.DATA.IMG_SIZE),
|
| 281 |
+
transforms.ToTensor(),
|
| 282 |
+
transforms.Normalize(mean=config.AUG.MEAN, std=config.AUG.STD)
|
| 283 |
+
])
|
| 284 |
+
return transform
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def build_transform(is_train, config):
|
| 288 |
+
resize_im = config.DATA.IMG_SIZE > 32
|
| 289 |
+
if is_train:
|
| 290 |
+
# this should always dispatch to transforms_imagenet_train
|
| 291 |
+
transform = create_transform(
|
| 292 |
+
input_size=config.DATA.IMG_SIZE,
|
| 293 |
+
is_training=True,
|
| 294 |
+
color_jitter=config.AUG.COLOR_JITTER
|
| 295 |
+
if config.AUG.COLOR_JITTER > 0 else None,
|
| 296 |
+
auto_augment=config.AUG.AUTO_AUGMENT
|
| 297 |
+
if config.AUG.AUTO_AUGMENT != 'none' else None,
|
| 298 |
+
re_prob=config.AUG.REPROB,
|
| 299 |
+
re_mode=config.AUG.REMODE,
|
| 300 |
+
re_count=config.AUG.RECOUNT,
|
| 301 |
+
interpolation=config.DATA.INTERPOLATION,
|
| 302 |
+
)
|
| 303 |
+
if not resize_im:
|
| 304 |
+
# replace RandomResizedCropAndInterpolation with
|
| 305 |
+
# RandomCrop
|
| 306 |
+
transform.transforms[0] = transforms.RandomCrop(config.DATA.IMG_SIZE, padding=4)
|
| 307 |
+
|
| 308 |
+
return transform
|
| 309 |
+
|
| 310 |
+
t = []
|
| 311 |
+
if resize_im:
|
| 312 |
+
if config.TEST.CROP:
|
| 313 |
+
size = int(1.0 * config.DATA.IMG_SIZE)
|
| 314 |
+
t.append(
|
| 315 |
+
transforms.Resize(size, interpolation=_pil_interp(config.DATA.INTERPOLATION)),
|
| 316 |
+
# to maintain same ratio w.r.t. 224 images
|
| 317 |
+
)
|
| 318 |
+
t.append(transforms.CenterCrop(config.DATA.IMG_SIZE))
|
| 319 |
+
elif config.AUG.RANDOM_RESIZED_CROP:
|
| 320 |
+
t.append(
|
| 321 |
+
transforms.RandomResizedCrop(
|
| 322 |
+
(config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
|
| 323 |
+
interpolation=_pil_interp(config.DATA.INTERPOLATION)))
|
| 324 |
+
else:
|
| 325 |
+
t.append(
|
| 326 |
+
transforms.Resize(
|
| 327 |
+
(config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
|
| 328 |
+
interpolation=_pil_interp(config.DATA.INTERPOLATION)))
|
| 329 |
+
t.append(transforms.ToTensor())
|
| 330 |
+
t.append(transforms.Normalize(config.AUG.MEAN, config.AUG.STD))
|
| 331 |
+
|
| 332 |
+
return transforms.Compose(t)
|
InternVL/classification/dataset/cached_image_folder.py
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import io
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
import math
|
| 11 |
+
import os
|
| 12 |
+
import os.path as osp
|
| 13 |
+
import re
|
| 14 |
+
import time
|
| 15 |
+
from abc import abstractmethod
|
| 16 |
+
|
| 17 |
+
import mmcv
|
| 18 |
+
import torch
|
| 19 |
+
import torch.distributed as dist
|
| 20 |
+
import torch.utils.data as data
|
| 21 |
+
from mmcv.fileio import FileClient
|
| 22 |
+
from PIL import Image
|
| 23 |
+
from tqdm import tqdm, trange
|
| 24 |
+
|
| 25 |
+
from .zipreader import ZipReader, is_zip_path
|
| 26 |
+
|
| 27 |
+
_logger = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
_ERROR_RETRY = 50
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def has_file_allowed_extension(filename, extensions):
|
| 33 |
+
"""Checks if a file is an allowed extension.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
filename (string): path to a file
|
| 37 |
+
Returns:
|
| 38 |
+
bool: True if the filename ends with a known image extension
|
| 39 |
+
"""
|
| 40 |
+
filename_lower = filename.lower()
|
| 41 |
+
return any(filename_lower.endswith(ext) for ext in extensions)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def find_classes(dir):
|
| 45 |
+
classes = [
|
| 46 |
+
d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))
|
| 47 |
+
]
|
| 48 |
+
classes.sort()
|
| 49 |
+
class_to_idx = {classes[i]: i for i in range(len(classes))}
|
| 50 |
+
return classes, class_to_idx
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def make_dataset(dir, class_to_idx, extensions):
|
| 54 |
+
images = []
|
| 55 |
+
dir = os.path.expanduser(dir)
|
| 56 |
+
for target in sorted(os.listdir(dir)):
|
| 57 |
+
d = os.path.join(dir, target)
|
| 58 |
+
if not os.path.isdir(d):
|
| 59 |
+
continue
|
| 60 |
+
for root, _, fnames in sorted(os.walk(d)):
|
| 61 |
+
for fname in sorted(fnames):
|
| 62 |
+
if has_file_allowed_extension(fname, extensions):
|
| 63 |
+
path = os.path.join(root, fname)
|
| 64 |
+
item = (path, class_to_idx[target])
|
| 65 |
+
images.append(item)
|
| 66 |
+
|
| 67 |
+
return images
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def make_dataset_with_ann(ann_file, img_prefix, extensions):
|
| 71 |
+
images = []
|
| 72 |
+
with open(ann_file, 'r') as f:
|
| 73 |
+
contents = f.readlines()
|
| 74 |
+
for line_str in contents:
|
| 75 |
+
path_contents = [c for c in line_str.split('\t')]
|
| 76 |
+
im_file_name = path_contents[0]
|
| 77 |
+
class_index = int(path_contents[1])
|
| 78 |
+
assert str.lower(os.path.splitext(im_file_name)[-1]) in extensions
|
| 79 |
+
item = (os.path.join(img_prefix, im_file_name), class_index)
|
| 80 |
+
images.append(item)
|
| 81 |
+
|
| 82 |
+
return images
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class DatasetFolder(data.Dataset):
|
| 86 |
+
"""A generic data loader where the samples are arranged in this way: ::
|
| 87 |
+
|
| 88 |
+
root/class_x/xxx.ext
|
| 89 |
+
root/class_x/xxy.ext
|
| 90 |
+
root/class_x/xxz.ext
|
| 91 |
+
root/class_y/123.ext
|
| 92 |
+
root/class_y/nsdf3.ext
|
| 93 |
+
root/class_y/asd932_.ext
|
| 94 |
+
Args:
|
| 95 |
+
root (string): Root directory path.
|
| 96 |
+
loader (callable): A function to load a sample given its path.
|
| 97 |
+
extensions (list[string]): A list of allowed extensions.
|
| 98 |
+
transform (callable, optional): A function/transform that takes in
|
| 99 |
+
a sample and returns a transformed version.
|
| 100 |
+
E.g, ``transforms.RandomCrop`` for images.
|
| 101 |
+
target_transform (callable, optional): A function/transform that takes
|
| 102 |
+
in the target and transforms it.
|
| 103 |
+
Attributes:
|
| 104 |
+
samples (list): List of (sample path, class_index) tuples
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
def __init__(self,
|
| 108 |
+
root,
|
| 109 |
+
loader,
|
| 110 |
+
extensions,
|
| 111 |
+
ann_file='',
|
| 112 |
+
img_prefix='',
|
| 113 |
+
transform=None,
|
| 114 |
+
target_transform=None,
|
| 115 |
+
cache_mode='no'):
|
| 116 |
+
# image folder mode
|
| 117 |
+
if ann_file == '':
|
| 118 |
+
_, class_to_idx = find_classes(root)
|
| 119 |
+
samples = make_dataset(root, class_to_idx, extensions)
|
| 120 |
+
# zip mode
|
| 121 |
+
else:
|
| 122 |
+
samples = make_dataset_with_ann(os.path.join(root, ann_file),
|
| 123 |
+
os.path.join(root, img_prefix),
|
| 124 |
+
extensions)
|
| 125 |
+
|
| 126 |
+
if len(samples) == 0:
|
| 127 |
+
raise (RuntimeError('Found 0 files in subfolders of: ' + root +
|
| 128 |
+
'\n' + 'Supported extensions are: ' +
|
| 129 |
+
','.join(extensions)))
|
| 130 |
+
|
| 131 |
+
self.root = root
|
| 132 |
+
self.loader = loader
|
| 133 |
+
self.extensions = extensions
|
| 134 |
+
|
| 135 |
+
self.samples = samples
|
| 136 |
+
self.labels = [y_1k for _, y_1k in samples]
|
| 137 |
+
self.classes = list(set(self.labels))
|
| 138 |
+
|
| 139 |
+
self.transform = transform
|
| 140 |
+
self.target_transform = target_transform
|
| 141 |
+
|
| 142 |
+
self.cache_mode = cache_mode
|
| 143 |
+
if self.cache_mode != 'no':
|
| 144 |
+
self.init_cache()
|
| 145 |
+
|
| 146 |
+
def init_cache(self):
|
| 147 |
+
assert self.cache_mode in ['part', 'full']
|
| 148 |
+
n_sample = len(self.samples)
|
| 149 |
+
global_rank = dist.get_rank()
|
| 150 |
+
world_size = dist.get_world_size()
|
| 151 |
+
|
| 152 |
+
samples_bytes = [None for _ in range(n_sample)]
|
| 153 |
+
start_time = time.time()
|
| 154 |
+
for index in range(n_sample):
|
| 155 |
+
if index % (n_sample // 10) == 0:
|
| 156 |
+
t = time.time() - start_time
|
| 157 |
+
print(
|
| 158 |
+
f'global_rank {dist.get_rank()} cached {index}/{n_sample} takes {t:.2f}s per block'
|
| 159 |
+
)
|
| 160 |
+
start_time = time.time()
|
| 161 |
+
path, target = self.samples[index]
|
| 162 |
+
if self.cache_mode == 'full':
|
| 163 |
+
samples_bytes[index] = (ZipReader.read(path), target)
|
| 164 |
+
elif self.cache_mode == 'part' and index % world_size == global_rank:
|
| 165 |
+
samples_bytes[index] = (ZipReader.read(path), target)
|
| 166 |
+
else:
|
| 167 |
+
samples_bytes[index] = (path, target)
|
| 168 |
+
self.samples = samples_bytes
|
| 169 |
+
|
| 170 |
+
def __getitem__(self, index):
|
| 171 |
+
"""
|
| 172 |
+
Args:
|
| 173 |
+
index (int): Index
|
| 174 |
+
Returns:
|
| 175 |
+
tuple: (sample, target) where target is class_index of the target class.
|
| 176 |
+
"""
|
| 177 |
+
path, target = self.samples[index]
|
| 178 |
+
sample = self.loader(path)
|
| 179 |
+
if self.transform is not None:
|
| 180 |
+
sample = self.transform(sample)
|
| 181 |
+
if self.target_transform is not None:
|
| 182 |
+
target = self.target_transform(target)
|
| 183 |
+
|
| 184 |
+
return sample, target
|
| 185 |
+
|
| 186 |
+
def __len__(self):
|
| 187 |
+
return len(self.samples)
|
| 188 |
+
|
| 189 |
+
def __repr__(self):
|
| 190 |
+
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
|
| 191 |
+
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
|
| 192 |
+
fmt_str += ' Root Location: {}\n'.format(self.root)
|
| 193 |
+
tmp = ' Transforms (if any): '
|
| 194 |
+
fmt_str += '{0}{1}\n'.format(
|
| 195 |
+
tmp,
|
| 196 |
+
self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
|
| 197 |
+
tmp = ' Target Transforms (if any): '
|
| 198 |
+
fmt_str += '{0}{1}'.format(
|
| 199 |
+
tmp,
|
| 200 |
+
self.target_transform.__repr__().replace('\n',
|
| 201 |
+
'\n' + ' ' * len(tmp)))
|
| 202 |
+
|
| 203 |
+
return fmt_str
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def pil_loader(path):
|
| 210 |
+
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
|
| 211 |
+
if isinstance(path, bytes):
|
| 212 |
+
img = Image.open(io.BytesIO(path))
|
| 213 |
+
elif is_zip_path(path):
|
| 214 |
+
data = ZipReader.read(path)
|
| 215 |
+
img = Image.open(io.BytesIO(data))
|
| 216 |
+
else:
|
| 217 |
+
with open(path, 'rb') as f:
|
| 218 |
+
img = Image.open(f)
|
| 219 |
+
return img.convert('RGB')
|
| 220 |
+
|
| 221 |
+
return img.convert('RGB')
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def accimage_loader(path):
|
| 225 |
+
import accimage
|
| 226 |
+
try:
|
| 227 |
+
return accimage.Image(path)
|
| 228 |
+
except IOError:
|
| 229 |
+
# Potentially a decoding problem, fall back to PIL.Image
|
| 230 |
+
return pil_loader(path)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def default_img_loader(path):
|
| 234 |
+
from torchvision import get_image_backend
|
| 235 |
+
if get_image_backend() == 'accimage':
|
| 236 |
+
return accimage_loader(path)
|
| 237 |
+
else:
|
| 238 |
+
return pil_loader(path)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class CachedImageFolder(DatasetFolder):
|
| 242 |
+
"""A generic data loader where the images are arranged in this way: ::
|
| 243 |
+
|
| 244 |
+
root/dog/xxx.png
|
| 245 |
+
root/dog/xxy.png
|
| 246 |
+
root/dog/xxz.png
|
| 247 |
+
root/cat/123.png
|
| 248 |
+
root/cat/nsdf3.png
|
| 249 |
+
root/cat/asd932_.png
|
| 250 |
+
Args:
|
| 251 |
+
root (string): Root directory path.
|
| 252 |
+
transform (callable, optional): A function/transform that takes in an PIL image
|
| 253 |
+
and returns a transformed version. E.g, ``transforms.RandomCrop``
|
| 254 |
+
target_transform (callable, optional): A function/transform that takes in the
|
| 255 |
+
target and transforms it.
|
| 256 |
+
loader (callable, optional): A function to load an image given its path.
|
| 257 |
+
Attributes:
|
| 258 |
+
imgs (list): List of (image path, class_index) tuples
|
| 259 |
+
"""
|
| 260 |
+
|
| 261 |
+
def __init__(self,
|
| 262 |
+
root,
|
| 263 |
+
ann_file='',
|
| 264 |
+
img_prefix='',
|
| 265 |
+
transform=None,
|
| 266 |
+
target_transform=None,
|
| 267 |
+
loader=default_img_loader,
|
| 268 |
+
cache_mode='no'):
|
| 269 |
+
super(CachedImageFolder,
|
| 270 |
+
self).__init__(root,
|
| 271 |
+
loader,
|
| 272 |
+
IMG_EXTENSIONS,
|
| 273 |
+
ann_file=ann_file,
|
| 274 |
+
img_prefix=img_prefix,
|
| 275 |
+
transform=transform,
|
| 276 |
+
target_transform=target_transform,
|
| 277 |
+
cache_mode=cache_mode)
|
| 278 |
+
self.imgs = self.samples
|
| 279 |
+
|
| 280 |
+
def __getitem__(self, index):
|
| 281 |
+
"""
|
| 282 |
+
Args:
|
| 283 |
+
index (int): Index
|
| 284 |
+
Returns:
|
| 285 |
+
tuple: (image, target) where target is class_index of the target class.
|
| 286 |
+
"""
|
| 287 |
+
path, target = self.samples[index]
|
| 288 |
+
image = self.loader(path)
|
| 289 |
+
if self.transform is not None:
|
| 290 |
+
img = self.transform(image)
|
| 291 |
+
else:
|
| 292 |
+
img = image
|
| 293 |
+
if self.target_transform is not None:
|
| 294 |
+
target = self.target_transform(target)
|
| 295 |
+
|
| 296 |
+
return img, target
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
class ImageCephDataset(data.Dataset):
|
| 300 |
+
|
| 301 |
+
def __init__(self,
|
| 302 |
+
root,
|
| 303 |
+
split,
|
| 304 |
+
parser=None,
|
| 305 |
+
transform=None,
|
| 306 |
+
target_transform=None,
|
| 307 |
+
on_memory=False):
|
| 308 |
+
if '22k' in root:
|
| 309 |
+
# Imagenet 22k
|
| 310 |
+
annotation_root = 'meta_data/'
|
| 311 |
+
else:
|
| 312 |
+
# Imagenet
|
| 313 |
+
annotation_root = 'meta_data/'
|
| 314 |
+
if parser is None or isinstance(parser, str):
|
| 315 |
+
parser = ParserCephImage(root=root,
|
| 316 |
+
split=split,
|
| 317 |
+
annotation_root=annotation_root,
|
| 318 |
+
on_memory=on_memory)
|
| 319 |
+
self.parser = parser
|
| 320 |
+
self.transform = transform
|
| 321 |
+
self.target_transform = target_transform
|
| 322 |
+
self._consecutive_errors = 0
|
| 323 |
+
|
| 324 |
+
def __getitem__(self, index):
|
| 325 |
+
img, target = self.parser[index]
|
| 326 |
+
self._consecutive_errors = 0
|
| 327 |
+
if self.transform is not None:
|
| 328 |
+
img = self.transform(img)
|
| 329 |
+
if target is None:
|
| 330 |
+
target = -1
|
| 331 |
+
elif self.target_transform is not None:
|
| 332 |
+
target = self.target_transform(target)
|
| 333 |
+
return img, target
|
| 334 |
+
|
| 335 |
+
def __len__(self):
|
| 336 |
+
return len(self.parser)
|
| 337 |
+
|
| 338 |
+
def filename(self, index, basename=False, absolute=False):
|
| 339 |
+
return self.parser.filename(index, basename, absolute)
|
| 340 |
+
|
| 341 |
+
def filenames(self, basename=False, absolute=False):
|
| 342 |
+
return self.parser.filenames(basename, absolute)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
class Parser:
|
| 346 |
+
|
| 347 |
+
def __init__(self):
|
| 348 |
+
pass
|
| 349 |
+
|
| 350 |
+
@abstractmethod
|
| 351 |
+
def _filename(self, index, basename=False, absolute=False):
|
| 352 |
+
pass
|
| 353 |
+
|
| 354 |
+
def filename(self, index, basename=False, absolute=False):
|
| 355 |
+
return self._filename(index, basename=basename, absolute=absolute)
|
| 356 |
+
|
| 357 |
+
def filenames(self, basename=False, absolute=False):
|
| 358 |
+
return [
|
| 359 |
+
self._filename(index, basename=basename, absolute=absolute)
|
| 360 |
+
for index in range(len(self))
|
| 361 |
+
]
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class ParserCephImage(Parser):
|
| 365 |
+
|
| 366 |
+
def __init__(self,
|
| 367 |
+
root,
|
| 368 |
+
split,
|
| 369 |
+
annotation_root,
|
| 370 |
+
on_memory=False,
|
| 371 |
+
**kwargs):
|
| 372 |
+
super().__init__()
|
| 373 |
+
|
| 374 |
+
self.file_client = None
|
| 375 |
+
self.kwargs = kwargs
|
| 376 |
+
|
| 377 |
+
self.root = root # dataset:s3://imagenet22k
|
| 378 |
+
if '22k' in root:
|
| 379 |
+
self.io_backend = 'petrel'
|
| 380 |
+
with open(osp.join(annotation_root, '22k_class_to_idx.json'),
|
| 381 |
+
'r') as f:
|
| 382 |
+
self.class_to_idx = json.loads(f.read())
|
| 383 |
+
with open(osp.join(annotation_root, '22k_label.txt'), 'r') as f:
|
| 384 |
+
self.samples = f.read().splitlines()
|
| 385 |
+
else:
|
| 386 |
+
self.io_backend = 'disk'
|
| 387 |
+
self.class_to_idx = None
|
| 388 |
+
with open(osp.join(annotation_root, f'{split}.txt'), 'r') as f:
|
| 389 |
+
self.samples = f.read().splitlines()
|
| 390 |
+
local_rank = None
|
| 391 |
+
local_size = None
|
| 392 |
+
self._consecutive_errors = 0
|
| 393 |
+
self.on_memory = on_memory
|
| 394 |
+
if on_memory:
|
| 395 |
+
self.holder = {}
|
| 396 |
+
if local_rank is None:
|
| 397 |
+
local_rank = int(os.environ.get('LOCAL_RANK', 0))
|
| 398 |
+
if local_size is None:
|
| 399 |
+
local_size = int(os.environ.get('LOCAL_SIZE', 1))
|
| 400 |
+
self.local_rank = local_rank
|
| 401 |
+
self.local_size = local_size
|
| 402 |
+
self.rank = int(os.environ['RANK'])
|
| 403 |
+
self.world_size = int(os.environ['WORLD_SIZE'])
|
| 404 |
+
self.num_replicas = int(os.environ['WORLD_SIZE'])
|
| 405 |
+
self.num_parts = local_size
|
| 406 |
+
self.num_samples = int(
|
| 407 |
+
math.ceil(len(self.samples) * 1.0 / self.num_replicas))
|
| 408 |
+
self.total_size = self.num_samples * self.num_replicas
|
| 409 |
+
self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts
|
| 410 |
+
self.load_onto_memory_v2()
|
| 411 |
+
|
| 412 |
+
def load_onto_memory(self):
|
| 413 |
+
print('Loading images onto memory...', self.local_rank,
|
| 414 |
+
self.local_size)
|
| 415 |
+
if self.file_client is None:
|
| 416 |
+
self.file_client = FileClient(self.io_backend, **self.kwargs)
|
| 417 |
+
for index in trange(len(self.samples)):
|
| 418 |
+
if index % self.local_size != self.local_rank:
|
| 419 |
+
continue
|
| 420 |
+
path, _ = self.samples[index].split(' ')
|
| 421 |
+
path = osp.join(self.root, path)
|
| 422 |
+
img_bytes = self.file_client.get(path)
|
| 423 |
+
self.holder[path] = img_bytes
|
| 424 |
+
|
| 425 |
+
print('Loading complete!')
|
| 426 |
+
|
| 427 |
+
def load_onto_memory_v2(self):
|
| 428 |
+
# print("Loading images onto memory...", self.local_rank, self.local_size)
|
| 429 |
+
t = torch.Generator()
|
| 430 |
+
t.manual_seed(0)
|
| 431 |
+
indices = torch.randperm(len(self.samples), generator=t).tolist()
|
| 432 |
+
# indices = range(len(self.samples))
|
| 433 |
+
indices = [i for i in indices if i % self.num_parts == self.local_rank]
|
| 434 |
+
# add extra samples to make it evenly divisible
|
| 435 |
+
indices += indices[:(self.total_size_parts - len(indices))]
|
| 436 |
+
assert len(indices) == self.total_size_parts
|
| 437 |
+
|
| 438 |
+
# subsample
|
| 439 |
+
indices = indices[self.rank // self.num_parts:self.
|
| 440 |
+
total_size_parts:self.num_replicas // self.num_parts]
|
| 441 |
+
assert len(indices) == self.num_samples
|
| 442 |
+
|
| 443 |
+
if self.file_client is None:
|
| 444 |
+
self.file_client = FileClient(self.io_backend, **self.kwargs)
|
| 445 |
+
for index in tqdm(indices):
|
| 446 |
+
if index % self.local_size != self.local_rank:
|
| 447 |
+
continue
|
| 448 |
+
path, _ = self.samples[index].split(' ')
|
| 449 |
+
path = osp.join(self.root, path)
|
| 450 |
+
img_bytes = self.file_client.get(path)
|
| 451 |
+
|
| 452 |
+
self.holder[path] = img_bytes
|
| 453 |
+
|
| 454 |
+
print('Loading complete!')
|
| 455 |
+
|
| 456 |
+
def __getitem__(self, index):
|
| 457 |
+
if self.file_client is None:
|
| 458 |
+
self.file_client = FileClient(self.io_backend, **self.kwargs)
|
| 459 |
+
|
| 460 |
+
filepath, target = self.samples[index].split(' ')
|
| 461 |
+
filepath = osp.join(self.root, filepath)
|
| 462 |
+
|
| 463 |
+
try:
|
| 464 |
+
if self.on_memory:
|
| 465 |
+
img_bytes = self.holder[filepath]
|
| 466 |
+
else:
|
| 467 |
+
# pass
|
| 468 |
+
img_bytes = self.file_client.get(filepath)
|
| 469 |
+
img = mmcv.imfrombytes(img_bytes)[:, :, ::-1]
|
| 470 |
+
except Exception as e:
|
| 471 |
+
_logger.warning(
|
| 472 |
+
f'Skipped sample (index {index}, file {filepath}). {str(e)}')
|
| 473 |
+
self._consecutive_errors += 1
|
| 474 |
+
if self._consecutive_errors < _ERROR_RETRY:
|
| 475 |
+
return self.__getitem__((index + 1) % len(self))
|
| 476 |
+
else:
|
| 477 |
+
raise e
|
| 478 |
+
self._consecutive_errors = 0
|
| 479 |
+
|
| 480 |
+
img = Image.fromarray(img)
|
| 481 |
+
try:
|
| 482 |
+
if self.class_to_idx is not None:
|
| 483 |
+
target = self.class_to_idx[target]
|
| 484 |
+
else:
|
| 485 |
+
target = int(target)
|
| 486 |
+
except:
|
| 487 |
+
print(filepath, target)
|
| 488 |
+
exit()
|
| 489 |
+
|
| 490 |
+
return img, target
|
| 491 |
+
|
| 492 |
+
def __len__(self):
|
| 493 |
+
return len(self.samples)
|
| 494 |
+
|
| 495 |
+
def _filename(self, index, basename=False, absolute=False):
|
| 496 |
+
filename, _ = self.samples[index].split(' ')
|
| 497 |
+
filename = osp.join(self.root, filename)
|
| 498 |
+
|
| 499 |
+
return filename
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def get_temporal_info(date, miss_hour=False):
|
| 503 |
+
try:
|
| 504 |
+
if date:
|
| 505 |
+
if miss_hour:
|
| 506 |
+
pattern = re.compile(r'(\d*)-(\d*)-(\d*)', re.I)
|
| 507 |
+
else:
|
| 508 |
+
pattern = re.compile(r'(\d*)-(\d*)-(\d*) (\d*):(\d*):(\d*)',
|
| 509 |
+
re.I)
|
| 510 |
+
m = pattern.match(date.strip())
|
| 511 |
+
|
| 512 |
+
if m:
|
| 513 |
+
year = int(m.group(1))
|
| 514 |
+
month = int(m.group(2))
|
| 515 |
+
day = int(m.group(3))
|
| 516 |
+
x_month = math.sin(2 * math.pi * month / 12)
|
| 517 |
+
y_month = math.cos(2 * math.pi * month / 12)
|
| 518 |
+
if miss_hour:
|
| 519 |
+
x_hour = 0
|
| 520 |
+
y_hour = 0
|
| 521 |
+
else:
|
| 522 |
+
hour = int(m.group(4))
|
| 523 |
+
x_hour = math.sin(2 * math.pi * hour / 24)
|
| 524 |
+
y_hour = math.cos(2 * math.pi * hour / 24)
|
| 525 |
+
return [x_month, y_month, x_hour, y_hour]
|
| 526 |
+
else:
|
| 527 |
+
return [0, 0, 0, 0]
|
| 528 |
+
else:
|
| 529 |
+
return [0, 0, 0, 0]
|
| 530 |
+
except:
|
| 531 |
+
return [0, 0, 0, 0]
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def get_spatial_info(latitude, longitude):
|
| 535 |
+
if latitude and longitude:
|
| 536 |
+
latitude = math.radians(latitude)
|
| 537 |
+
longitude = math.radians(longitude)
|
| 538 |
+
x = math.cos(latitude) * math.cos(longitude)
|
| 539 |
+
y = math.cos(latitude) * math.sin(longitude)
|
| 540 |
+
z = math.sin(latitude)
|
| 541 |
+
return [x, y, z]
|
| 542 |
+
else:
|
| 543 |
+
return [0, 0, 0]
|
InternVL/classification/dataset/imagenet_a_r_indices.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Code from https://github.com/baaivision/EVA/blob/master/EVA-02/asuka/imagenet_a_r_indices.py
|
| 2 |
+
Thanks to the authors of EVA."""
|
| 3 |
+
|
| 4 |
+
all_wnids = [
|
| 5 |
+
'n01440764', 'n01443537', 'n01484850', 'n01491361', 'n01494475',
|
| 6 |
+
'n01496331', 'n01498041', 'n01514668', 'n01514859', 'n01518878',
|
| 7 |
+
'n01530575', 'n01531178', 'n01532829', 'n01534433', 'n01537544',
|
| 8 |
+
'n01558993', 'n01560419', 'n01580077', 'n01582220', 'n01592084',
|
| 9 |
+
'n01601694', 'n01608432', 'n01614925', 'n01616318', 'n01622779',
|
| 10 |
+
'n01629819', 'n01630670', 'n01631663', 'n01632458', 'n01632777',
|
| 11 |
+
'n01641577', 'n01644373', 'n01644900', 'n01664065', 'n01665541',
|
| 12 |
+
'n01667114', 'n01667778', 'n01669191', 'n01675722', 'n01677366',
|
| 13 |
+
'n01682714', 'n01685808', 'n01687978', 'n01688243', 'n01689811',
|
| 14 |
+
'n01692333', 'n01693334', 'n01694178', 'n01695060', 'n01697457',
|
| 15 |
+
'n01698640', 'n01704323', 'n01728572', 'n01728920', 'n01729322',
|
| 16 |
+
'n01729977', 'n01734418', 'n01735189', 'n01737021', 'n01739381',
|
| 17 |
+
'n01740131', 'n01742172', 'n01744401', 'n01748264', 'n01749939',
|
| 18 |
+
'n01751748', 'n01753488', 'n01755581', 'n01756291', 'n01768244',
|
| 19 |
+
'n01770081', 'n01770393', 'n01773157', 'n01773549', 'n01773797',
|
| 20 |
+
'n01774384', 'n01774750', 'n01775062', 'n01776313', 'n01784675',
|
| 21 |
+
'n01795545', 'n01796340', 'n01797886', 'n01798484', 'n01806143',
|
| 22 |
+
'n01806567', 'n01807496', 'n01817953', 'n01818515', 'n01819313',
|
| 23 |
+
'n01820546', 'n01824575', 'n01828970', 'n01829413', 'n01833805',
|
| 24 |
+
'n01843065', 'n01843383', 'n01847000', 'n01855032', 'n01855672',
|
| 25 |
+
'n01860187', 'n01871265', 'n01872401', 'n01873310', 'n01877812',
|
| 26 |
+
'n01882714', 'n01883070', 'n01910747', 'n01914609', 'n01917289',
|
| 27 |
+
'n01924916', 'n01930112', 'n01943899', 'n01944390', 'n01945685',
|
| 28 |
+
'n01950731', 'n01955084', 'n01968897', 'n01978287', 'n01978455',
|
| 29 |
+
'n01980166', 'n01981276', 'n01983481', 'n01984695', 'n01985128',
|
| 30 |
+
'n01986214', 'n01990800', 'n02002556', 'n02002724', 'n02006656',
|
| 31 |
+
'n02007558', 'n02009229', 'n02009912', 'n02011460', 'n02012849',
|
| 32 |
+
'n02013706', 'n02017213', 'n02018207', 'n02018795', 'n02025239',
|
| 33 |
+
'n02027492', 'n02028035', 'n02033041', 'n02037110', 'n02051845',
|
| 34 |
+
'n02056570', 'n02058221', 'n02066245', 'n02071294', 'n02074367',
|
| 35 |
+
'n02077923', 'n02085620', 'n02085782', 'n02085936', 'n02086079',
|
| 36 |
+
'n02086240', 'n02086646', 'n02086910', 'n02087046', 'n02087394',
|
| 37 |
+
'n02088094', 'n02088238', 'n02088364', 'n02088466', 'n02088632',
|
| 38 |
+
'n02089078', 'n02089867', 'n02089973', 'n02090379', 'n02090622',
|
| 39 |
+
'n02090721', 'n02091032', 'n02091134', 'n02091244', 'n02091467',
|
| 40 |
+
'n02091635', 'n02091831', 'n02092002', 'n02092339', 'n02093256',
|
| 41 |
+
'n02093428', 'n02093647', 'n02093754', 'n02093859', 'n02093991',
|
| 42 |
+
'n02094114', 'n02094258', 'n02094433', 'n02095314', 'n02095570',
|
| 43 |
+
'n02095889', 'n02096051', 'n02096177', 'n02096294', 'n02096437',
|
| 44 |
+
'n02096585', 'n02097047', 'n02097130', 'n02097209', 'n02097298',
|
| 45 |
+
'n02097474', 'n02097658', 'n02098105', 'n02098286', 'n02098413',
|
| 46 |
+
'n02099267', 'n02099429', 'n02099601', 'n02099712', 'n02099849',
|
| 47 |
+
'n02100236', 'n02100583', 'n02100735', 'n02100877', 'n02101006',
|
| 48 |
+
'n02101388', 'n02101556', 'n02102040', 'n02102177', 'n02102318',
|
| 49 |
+
'n02102480', 'n02102973', 'n02104029', 'n02104365', 'n02105056',
|
| 50 |
+
'n02105162', 'n02105251', 'n02105412', 'n02105505', 'n02105641',
|
| 51 |
+
'n02105855', 'n02106030', 'n02106166', 'n02106382', 'n02106550',
|
| 52 |
+
'n02106662', 'n02107142', 'n02107312', 'n02107574', 'n02107683',
|
| 53 |
+
'n02107908', 'n02108000', 'n02108089', 'n02108422', 'n02108551',
|
| 54 |
+
'n02108915', 'n02109047', 'n02109525', 'n02109961', 'n02110063',
|
| 55 |
+
'n02110185', 'n02110341', 'n02110627', 'n02110806', 'n02110958',
|
| 56 |
+
'n02111129', 'n02111277', 'n02111500', 'n02111889', 'n02112018',
|
| 57 |
+
'n02112137', 'n02112350', 'n02112706', 'n02113023', 'n02113186',
|
| 58 |
+
'n02113624', 'n02113712', 'n02113799', 'n02113978', 'n02114367',
|
| 59 |
+
'n02114548', 'n02114712', 'n02114855', 'n02115641', 'n02115913',
|
| 60 |
+
'n02116738', 'n02117135', 'n02119022', 'n02119789', 'n02120079',
|
| 61 |
+
'n02120505', 'n02123045', 'n02123159', 'n02123394', 'n02123597',
|
| 62 |
+
'n02124075', 'n02125311', 'n02127052', 'n02128385', 'n02128757',
|
| 63 |
+
'n02128925', 'n02129165', 'n02129604', 'n02130308', 'n02132136',
|
| 64 |
+
'n02133161', 'n02134084', 'n02134418', 'n02137549', 'n02138441',
|
| 65 |
+
'n02165105', 'n02165456', 'n02167151', 'n02168699', 'n02169497',
|
| 66 |
+
'n02172182', 'n02174001', 'n02177972', 'n02190166', 'n02206856',
|
| 67 |
+
'n02219486', 'n02226429', 'n02229544', 'n02231487', 'n02233338',
|
| 68 |
+
'n02236044', 'n02256656', 'n02259212', 'n02264363', 'n02268443',
|
| 69 |
+
'n02268853', 'n02276258', 'n02277742', 'n02279972', 'n02280649',
|
| 70 |
+
'n02281406', 'n02281787', 'n02317335', 'n02319095', 'n02321529',
|
| 71 |
+
'n02325366', 'n02326432', 'n02328150', 'n02342885', 'n02346627',
|
| 72 |
+
'n02356798', 'n02361337', 'n02363005', 'n02364673', 'n02389026',
|
| 73 |
+
'n02391049', 'n02395406', 'n02396427', 'n02397096', 'n02398521',
|
| 74 |
+
'n02403003', 'n02408429', 'n02410509', 'n02412080', 'n02415577',
|
| 75 |
+
'n02417914', 'n02422106', 'n02422699', 'n02423022', 'n02437312',
|
| 76 |
+
'n02437616', 'n02441942', 'n02442845', 'n02443114', 'n02443484',
|
| 77 |
+
'n02444819', 'n02445715', 'n02447366', 'n02454379', 'n02457408',
|
| 78 |
+
'n02480495', 'n02480855', 'n02481823', 'n02483362', 'n02483708',
|
| 79 |
+
'n02484975', 'n02486261', 'n02486410', 'n02487347', 'n02488291',
|
| 80 |
+
'n02488702', 'n02489166', 'n02490219', 'n02492035', 'n02492660',
|
| 81 |
+
'n02493509', 'n02493793', 'n02494079', 'n02497673', 'n02500267',
|
| 82 |
+
'n02504013', 'n02504458', 'n02509815', 'n02510455', 'n02514041',
|
| 83 |
+
'n02526121', 'n02536864', 'n02606052', 'n02607072', 'n02640242',
|
| 84 |
+
'n02641379', 'n02643566', 'n02655020', 'n02666196', 'n02667093',
|
| 85 |
+
'n02669723', 'n02672831', 'n02676566', 'n02687172', 'n02690373',
|
| 86 |
+
'n02692877', 'n02699494', 'n02701002', 'n02704792', 'n02708093',
|
| 87 |
+
'n02727426', 'n02730930', 'n02747177', 'n02749479', 'n02769748',
|
| 88 |
+
'n02776631', 'n02777292', 'n02782093', 'n02783161', 'n02786058',
|
| 89 |
+
'n02787622', 'n02788148', 'n02790996', 'n02791124', 'n02791270',
|
| 90 |
+
'n02793495', 'n02794156', 'n02795169', 'n02797295', 'n02799071',
|
| 91 |
+
'n02802426', 'n02804414', 'n02804610', 'n02807133', 'n02808304',
|
| 92 |
+
'n02808440', 'n02814533', 'n02814860', 'n02815834', 'n02817516',
|
| 93 |
+
'n02823428', 'n02823750', 'n02825657', 'n02834397', 'n02835271',
|
| 94 |
+
'n02837789', 'n02840245', 'n02841315', 'n02843684', 'n02859443',
|
| 95 |
+
'n02860847', 'n02865351', 'n02869837', 'n02870880', 'n02871525',
|
| 96 |
+
'n02877765', 'n02879718', 'n02883205', 'n02892201', 'n02892767',
|
| 97 |
+
'n02894605', 'n02895154', 'n02906734', 'n02909870', 'n02910353',
|
| 98 |
+
'n02916936', 'n02917067', 'n02927161', 'n02930766', 'n02939185',
|
| 99 |
+
'n02948072', 'n02950826', 'n02951358', 'n02951585', 'n02963159',
|
| 100 |
+
'n02965783', 'n02966193', 'n02966687', 'n02971356', 'n02974003',
|
| 101 |
+
'n02977058', 'n02978881', 'n02979186', 'n02980441', 'n02981792',
|
| 102 |
+
'n02988304', 'n02992211', 'n02992529', 'n02999410', 'n03000134',
|
| 103 |
+
'n03000247', 'n03000684', 'n03014705', 'n03016953', 'n03017168',
|
| 104 |
+
'n03018349', 'n03026506', 'n03028079', 'n03032252', 'n03041632',
|
| 105 |
+
'n03042490', 'n03045698', 'n03047690', 'n03062245', 'n03063599',
|
| 106 |
+
'n03063689', 'n03065424', 'n03075370', 'n03085013', 'n03089624',
|
| 107 |
+
'n03095699', 'n03100240', 'n03109150', 'n03110669', 'n03124043',
|
| 108 |
+
'n03124170', 'n03125729', 'n03126707', 'n03127747', 'n03127925',
|
| 109 |
+
'n03131574', 'n03133878', 'n03134739', 'n03141823', 'n03146219',
|
| 110 |
+
'n03160309', 'n03179701', 'n03180011', 'n03187595', 'n03188531',
|
| 111 |
+
'n03196217', 'n03197337', 'n03201208', 'n03207743', 'n03207941',
|
| 112 |
+
'n03208938', 'n03216828', 'n03218198', 'n03220513', 'n03223299',
|
| 113 |
+
'n03240683', 'n03249569', 'n03250847', 'n03255030', 'n03259280',
|
| 114 |
+
'n03271574', 'n03272010', 'n03272562', 'n03290653', 'n03291819',
|
| 115 |
+
'n03297495', 'n03314780', 'n03325584', 'n03337140', 'n03344393',
|
| 116 |
+
'n03345487', 'n03347037', 'n03355925', 'n03372029', 'n03376595',
|
| 117 |
+
'n03379051', 'n03384352', 'n03388043', 'n03388183', 'n03388549',
|
| 118 |
+
'n03393912', 'n03394916', 'n03400231', 'n03404251', 'n03417042',
|
| 119 |
+
'n03424325', 'n03425413', 'n03443371', 'n03444034', 'n03445777',
|
| 120 |
+
'n03445924', 'n03447447', 'n03447721', 'n03450230', 'n03452741',
|
| 121 |
+
'n03457902', 'n03459775', 'n03461385', 'n03467068', 'n03476684',
|
| 122 |
+
'n03476991', 'n03478589', 'n03481172', 'n03482405', 'n03483316',
|
| 123 |
+
'n03485407', 'n03485794', 'n03492542', 'n03494278', 'n03495258',
|
| 124 |
+
'n03496892', 'n03498962', 'n03527444', 'n03529860', 'n03530642',
|
| 125 |
+
'n03532672', 'n03534580', 'n03535780', 'n03538406', 'n03544143',
|
| 126 |
+
'n03584254', 'n03584829', 'n03590841', 'n03594734', 'n03594945',
|
| 127 |
+
'n03595614', 'n03598930', 'n03599486', 'n03602883', 'n03617480',
|
| 128 |
+
'n03623198', 'n03627232', 'n03630383', 'n03633091', 'n03637318',
|
| 129 |
+
'n03642806', 'n03649909', 'n03657121', 'n03658185', 'n03661043',
|
| 130 |
+
'n03662601', 'n03666591', 'n03670208', 'n03673027', 'n03676483',
|
| 131 |
+
'n03680355', 'n03690938', 'n03691459', 'n03692522', 'n03697007',
|
| 132 |
+
'n03706229', 'n03709823', 'n03710193', 'n03710637', 'n03710721',
|
| 133 |
+
'n03717622', 'n03720891', 'n03721384', 'n03724870', 'n03729826',
|
| 134 |
+
'n03733131', 'n03733281', 'n03733805', 'n03742115', 'n03743016',
|
| 135 |
+
'n03759954', 'n03761084', 'n03763968', 'n03764736', 'n03769881',
|
| 136 |
+
'n03770439', 'n03770679', 'n03773504', 'n03775071', 'n03775546',
|
| 137 |
+
'n03776460', 'n03777568', 'n03777754', 'n03781244', 'n03782006',
|
| 138 |
+
'n03785016', 'n03786901', 'n03787032', 'n03788195', 'n03788365',
|
| 139 |
+
'n03791053', 'n03792782', 'n03792972', 'n03793489', 'n03794056',
|
| 140 |
+
'n03796401', 'n03803284', 'n03804744', 'n03814639', 'n03814906',
|
| 141 |
+
'n03825788', 'n03832673', 'n03837869', 'n03838899', 'n03840681',
|
| 142 |
+
'n03841143', 'n03843555', 'n03854065', 'n03857828', 'n03866082',
|
| 143 |
+
'n03868242', 'n03868863', 'n03871628', 'n03873416', 'n03874293',
|
| 144 |
+
'n03874599', 'n03876231', 'n03877472', 'n03877845', 'n03884397',
|
| 145 |
+
'n03887697', 'n03888257', 'n03888605', 'n03891251', 'n03891332',
|
| 146 |
+
'n03895866', 'n03899768', 'n03902125', 'n03903868', 'n03908618',
|
| 147 |
+
'n03908714', 'n03916031', 'n03920288', 'n03924679', 'n03929660',
|
| 148 |
+
'n03929855', 'n03930313', 'n03930630', 'n03933933', 'n03935335',
|
| 149 |
+
'n03937543', 'n03938244', 'n03942813', 'n03944341', 'n03947888',
|
| 150 |
+
'n03950228', 'n03954731', 'n03956157', 'n03958227', 'n03961711',
|
| 151 |
+
'n03967562', 'n03970156', 'n03976467', 'n03976657', 'n03977966',
|
| 152 |
+
'n03980874', 'n03982430', 'n03983396', 'n03991062', 'n03992509',
|
| 153 |
+
'n03995372', 'n03998194', 'n04004767', 'n04005630', 'n04008634',
|
| 154 |
+
'n04009552', 'n04019541', 'n04023962', 'n04026417', 'n04033901',
|
| 155 |
+
'n04033995', 'n04037443', 'n04039381', 'n04040759', 'n04041544',
|
| 156 |
+
'n04044716', 'n04049303', 'n04065272', 'n04067472', 'n04069434',
|
| 157 |
+
'n04070727', 'n04074963', 'n04081281', 'n04086273', 'n04090263',
|
| 158 |
+
'n04099969', 'n04111531', 'n04116512', 'n04118538', 'n04118776',
|
| 159 |
+
'n04120489', 'n04125021', 'n04127249', 'n04131690', 'n04133789',
|
| 160 |
+
'n04136333', 'n04141076', 'n04141327', 'n04141975', 'n04146614',
|
| 161 |
+
'n04147183', 'n04149813', 'n04152593', 'n04153751', 'n04154565',
|
| 162 |
+
'n04162706', 'n04179913', 'n04192698', 'n04200800', 'n04201297',
|
| 163 |
+
'n04204238', 'n04204347', 'n04208210', 'n04209133', 'n04209239',
|
| 164 |
+
'n04228054', 'n04229816', 'n04235860', 'n04238763', 'n04239074',
|
| 165 |
+
'n04243546', 'n04251144', 'n04252077', 'n04252225', 'n04254120',
|
| 166 |
+
'n04254680', 'n04254777', 'n04258138', 'n04259630', 'n04263257',
|
| 167 |
+
'n04264628', 'n04265275', 'n04266014', 'n04270147', 'n04273569',
|
| 168 |
+
'n04275548', 'n04277352', 'n04285008', 'n04286575', 'n04296562',
|
| 169 |
+
'n04310018', 'n04311004', 'n04311174', 'n04317175', 'n04325704',
|
| 170 |
+
'n04326547', 'n04328186', 'n04330267', 'n04332243', 'n04335435',
|
| 171 |
+
'n04336792', 'n04344873', 'n04346328', 'n04347754', 'n04350905',
|
| 172 |
+
'n04355338', 'n04355933', 'n04356056', 'n04357314', 'n04366367',
|
| 173 |
+
'n04367480', 'n04370456', 'n04371430', 'n04371774', 'n04372370',
|
| 174 |
+
'n04376876', 'n04380533', 'n04389033', 'n04392985', 'n04398044',
|
| 175 |
+
'n04399382', 'n04404412', 'n04409515', 'n04417672', 'n04418357',
|
| 176 |
+
'n04423845', 'n04428191', 'n04429376', 'n04435653', 'n04442312',
|
| 177 |
+
'n04443257', 'n04447861', 'n04456115', 'n04458633', 'n04461696',
|
| 178 |
+
'n04462240', 'n04465501', 'n04467665', 'n04476259', 'n04479046',
|
| 179 |
+
'n04482393', 'n04483307', 'n04485082', 'n04486054', 'n04487081',
|
| 180 |
+
'n04487394', 'n04493381', 'n04501370', 'n04505470', 'n04507155',
|
| 181 |
+
'n04509417', 'n04515003', 'n04517823', 'n04522168', 'n04523525',
|
| 182 |
+
'n04525038', 'n04525305', 'n04532106', 'n04532670', 'n04536866',
|
| 183 |
+
'n04540053', 'n04542943', 'n04548280', 'n04548362', 'n04550184',
|
| 184 |
+
'n04552348', 'n04553703', 'n04554684', 'n04557648', 'n04560804',
|
| 185 |
+
'n04562935', 'n04579145', 'n04579432', 'n04584207', 'n04589890',
|
| 186 |
+
'n04590129', 'n04591157', 'n04591713', 'n04592741', 'n04596742',
|
| 187 |
+
'n04597913', 'n04599235', 'n04604644', 'n04606251', 'n04612504',
|
| 188 |
+
'n04613696', 'n06359193', 'n06596364', 'n06785654', 'n06794110',
|
| 189 |
+
'n06874185', 'n07248320', 'n07565083', 'n07579787', 'n07583066',
|
| 190 |
+
'n07584110', 'n07590611', 'n07613480', 'n07614500', 'n07615774',
|
| 191 |
+
'n07684084', 'n07693725', 'n07695742', 'n07697313', 'n07697537',
|
| 192 |
+
'n07711569', 'n07714571', 'n07714990', 'n07715103', 'n07716358',
|
| 193 |
+
'n07716906', 'n07717410', 'n07717556', 'n07718472', 'n07718747',
|
| 194 |
+
'n07720875', 'n07730033', 'n07734744', 'n07742313', 'n07745940',
|
| 195 |
+
'n07747607', 'n07749582', 'n07753113', 'n07753275', 'n07753592',
|
| 196 |
+
'n07754684', 'n07760859', 'n07768694', 'n07802026', 'n07831146',
|
| 197 |
+
'n07836838', 'n07860988', 'n07871810', 'n07873807', 'n07875152',
|
| 198 |
+
'n07880968', 'n07892512', 'n07920052', 'n07930864', 'n07932039',
|
| 199 |
+
'n09193705', 'n09229709', 'n09246464', 'n09256479', 'n09288635',
|
| 200 |
+
'n09332890', 'n09399592', 'n09421951', 'n09428293', 'n09468604',
|
| 201 |
+
'n09472597', 'n09835506', 'n10148035', 'n10565667', 'n11879895',
|
| 202 |
+
'n11939491', 'n12057211', 'n12144580', 'n12267677', 'n12620546',
|
| 203 |
+
'n12768682', 'n12985857', 'n12998815', 'n13037406', 'n13040303',
|
| 204 |
+
'n13044778', 'n13052670', 'n13054560', 'n13133613', 'n15075141'
|
| 205 |
+
]
|
| 206 |
+
|
| 207 |
+
imagenet_a_wnids = [
|
| 208 |
+
'n01498041', 'n01531178', 'n01534433', 'n01558993', 'n01580077',
|
| 209 |
+
'n01614925', 'n01616318', 'n01631663', 'n01641577', 'n01669191',
|
| 210 |
+
'n01677366', 'n01687978', 'n01694178', 'n01698640', 'n01735189',
|
| 211 |
+
'n01770081', 'n01770393', 'n01774750', 'n01784675', 'n01819313',
|
| 212 |
+
'n01820546', 'n01833805', 'n01843383', 'n01847000', 'n01855672',
|
| 213 |
+
'n01882714', 'n01910747', 'n01914609', 'n01924916', 'n01944390',
|
| 214 |
+
'n01985128', 'n01986214', 'n02007558', 'n02009912', 'n02037110',
|
| 215 |
+
'n02051845', 'n02077923', 'n02085620', 'n02099601', 'n02106550',
|
| 216 |
+
'n02106662', 'n02110958', 'n02119022', 'n02123394', 'n02127052',
|
| 217 |
+
'n02129165', 'n02133161', 'n02137549', 'n02165456', 'n02174001',
|
| 218 |
+
'n02177972', 'n02190166', 'n02206856', 'n02219486', 'n02226429',
|
| 219 |
+
'n02231487', 'n02233338', 'n02236044', 'n02259212', 'n02268443',
|
| 220 |
+
'n02279972', 'n02280649', 'n02281787', 'n02317335', 'n02325366',
|
| 221 |
+
'n02346627', 'n02356798', 'n02361337', 'n02410509', 'n02445715',
|
| 222 |
+
'n02454379', 'n02486410', 'n02492035', 'n02504458', 'n02655020',
|
| 223 |
+
'n02669723', 'n02672831', 'n02676566', 'n02690373', 'n02701002',
|
| 224 |
+
'n02730930', 'n02777292', 'n02782093', 'n02787622', 'n02793495',
|
| 225 |
+
'n02797295', 'n02802426', 'n02814860', 'n02815834', 'n02837789',
|
| 226 |
+
'n02879718', 'n02883205', 'n02895154', 'n02906734', 'n02948072',
|
| 227 |
+
'n02951358', 'n02980441', 'n02992211', 'n02999410', 'n03014705',
|
| 228 |
+
'n03026506', 'n03124043', 'n03125729', 'n03187595', 'n03196217',
|
| 229 |
+
'n03223299', 'n03250847', 'n03255030', 'n03291819', 'n03325584',
|
| 230 |
+
'n03355925', 'n03384352', 'n03388043', 'n03417042', 'n03443371',
|
| 231 |
+
'n03444034', 'n03445924', 'n03452741', 'n03483316', 'n03584829',
|
| 232 |
+
'n03590841', 'n03594945', 'n03617480', 'n03666591', 'n03670208',
|
| 233 |
+
'n03717622', 'n03720891', 'n03721384', 'n03724870', 'n03775071',
|
| 234 |
+
'n03788195', 'n03804744', 'n03837869', 'n03840681', 'n03854065',
|
| 235 |
+
'n03888257', 'n03891332', 'n03935335', 'n03982430', 'n04019541',
|
| 236 |
+
'n04033901', 'n04039381', 'n04067472', 'n04086273', 'n04099969',
|
| 237 |
+
'n04118538', 'n04131690', 'n04133789', 'n04141076', 'n04146614',
|
| 238 |
+
'n04147183', 'n04179913', 'n04208210', 'n04235860', 'n04252077',
|
| 239 |
+
'n04252225', 'n04254120', 'n04270147', 'n04275548', 'n04310018',
|
| 240 |
+
'n04317175', 'n04344873', 'n04347754', 'n04355338', 'n04366367',
|
| 241 |
+
'n04376876', 'n04389033', 'n04399382', 'n04442312', 'n04456115',
|
| 242 |
+
'n04482393', 'n04507155', 'n04509417', 'n04532670', 'n04540053',
|
| 243 |
+
'n04554684', 'n04562935', 'n04591713', 'n04606251', 'n07583066',
|
| 244 |
+
'n07695742', 'n07697313', 'n07697537', 'n07714990', 'n07718472',
|
| 245 |
+
'n07720875', 'n07734744', 'n07749582', 'n07753592', 'n07760859',
|
| 246 |
+
'n07768694', 'n07831146', 'n09229709', 'n09246464', 'n09472597',
|
| 247 |
+
'n09835506', 'n11879895', 'n12057211', 'n12144580', 'n12267677'
|
| 248 |
+
]
|
| 249 |
+
|
| 250 |
+
imagenet_a_mask = [wnid in set(imagenet_a_wnids) for wnid in all_wnids]
|
| 251 |
+
|
| 252 |
+
imagenet_r_wnids = {
|
| 253 |
+
'n01443537', 'n01484850', 'n01494475', 'n01498041', 'n01514859',
|
| 254 |
+
'n01518878', 'n01531178', 'n01534433', 'n01614925', 'n01616318',
|
| 255 |
+
'n01630670', 'n01632777', 'n01644373', 'n01677366', 'n01694178',
|
| 256 |
+
'n01748264', 'n01770393', 'n01774750', 'n01784675', 'n01806143',
|
| 257 |
+
'n01820546', 'n01833805', 'n01843383', 'n01847000', 'n01855672',
|
| 258 |
+
'n01860187', 'n01882714', 'n01910747', 'n01944390', 'n01983481',
|
| 259 |
+
'n01986214', 'n02007558', 'n02009912', 'n02051845', 'n02056570',
|
| 260 |
+
'n02066245', 'n02071294', 'n02077923', 'n02085620', 'n02086240',
|
| 261 |
+
'n02088094', 'n02088238', 'n02088364', 'n02088466', 'n02091032',
|
| 262 |
+
'n02091134', 'n02092339', 'n02094433', 'n02096585', 'n02097298',
|
| 263 |
+
'n02098286', 'n02099601', 'n02099712', 'n02102318', 'n02106030',
|
| 264 |
+
'n02106166', 'n02106550', 'n02106662', 'n02108089', 'n02108915',
|
| 265 |
+
'n02109525', 'n02110185', 'n02110341', 'n02110958', 'n02112018',
|
| 266 |
+
'n02112137', 'n02113023', 'n02113624', 'n02113799', 'n02114367',
|
| 267 |
+
'n02117135', 'n02119022', 'n02123045', 'n02128385', 'n02128757',
|
| 268 |
+
'n02129165', 'n02129604', 'n02130308', 'n02134084', 'n02138441',
|
| 269 |
+
'n02165456', 'n02190166', 'n02206856', 'n02219486', 'n02226429',
|
| 270 |
+
'n02233338', 'n02236044', 'n02268443', 'n02279972', 'n02317335',
|
| 271 |
+
'n02325366', 'n02346627', 'n02356798', 'n02363005', 'n02364673',
|
| 272 |
+
'n02391049', 'n02395406', 'n02398521', 'n02410509', 'n02423022',
|
| 273 |
+
'n02437616', 'n02445715', 'n02447366', 'n02480495', 'n02480855',
|
| 274 |
+
'n02481823', 'n02483362', 'n02486410', 'n02510455', 'n02526121',
|
| 275 |
+
'n02607072', 'n02655020', 'n02672831', 'n02701002', 'n02749479',
|
| 276 |
+
'n02769748', 'n02793495', 'n02797295', 'n02802426', 'n02808440',
|
| 277 |
+
'n02814860', 'n02823750', 'n02841315', 'n02843684', 'n02883205',
|
| 278 |
+
'n02906734', 'n02909870', 'n02939185', 'n02948072', 'n02950826',
|
| 279 |
+
'n02951358', 'n02966193', 'n02980441', 'n02992529', 'n03124170',
|
| 280 |
+
'n03272010', 'n03345487', 'n03372029', 'n03424325', 'n03452741',
|
| 281 |
+
'n03467068', 'n03481172', 'n03494278', 'n03495258', 'n03498962',
|
| 282 |
+
'n03594945', 'n03602883', 'n03630383', 'n03649909', 'n03676483',
|
| 283 |
+
'n03710193', 'n03773504', 'n03775071', 'n03888257', 'n03930630',
|
| 284 |
+
'n03947888', 'n04086273', 'n04118538', 'n04133789', 'n04141076',
|
| 285 |
+
'n04146614', 'n04147183', 'n04192698', 'n04254680', 'n04266014',
|
| 286 |
+
'n04275548', 'n04310018', 'n04325704', 'n04347754', 'n04389033',
|
| 287 |
+
'n04409515', 'n04465501', 'n04487394', 'n04522168', 'n04536866',
|
| 288 |
+
'n04552348', 'n04591713', 'n07614500', 'n07693725', 'n07695742',
|
| 289 |
+
'n07697313', 'n07697537', 'n07714571', 'n07714990', 'n07718472',
|
| 290 |
+
'n07720875', 'n07734744', 'n07742313', 'n07745940', 'n07749582',
|
| 291 |
+
'n07753275', 'n07753592', 'n07768694', 'n07873807', 'n07880968',
|
| 292 |
+
'n07920052', 'n09472597', 'n09835506', 'n10565667', 'n12267677'
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
imagenet_r_mask = [wnid in imagenet_r_wnids for wnid in all_wnids]
|
InternVL/classification/dataset/imagenet_real.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# EVA: Exploring the Limits of Masked Visual Representation Learning at Scale (https://arxiv.org/abs/2211.07636)
|
| 3 |
+
# Github source: https://github.com/baaivision/EVA
|
| 4 |
+
# Copyright (c) 2022 Beijing Academy of Artificial Intelligence (BAAI)
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# By Yuxin Fang
|
| 7 |
+
# Based on timm, DINO, DeiT and BEiT codebases
|
| 8 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
| 9 |
+
# https://github.com/facebookresearch/deit
|
| 10 |
+
# https://github.com/facebookresearch/dino
|
| 11 |
+
# https://github.com/microsoft/unilm/tree/master/beit
|
| 12 |
+
# --------------------------------------------------------'
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import os
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class RealLabelsImagenet:
|
| 21 |
+
|
| 22 |
+
def __init__(self, filenames, real_json='real.json', topk=(1, 5)):
|
| 23 |
+
with open(real_json) as real_labels:
|
| 24 |
+
real_labels = json.load(real_labels)
|
| 25 |
+
real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)}
|
| 26 |
+
self.real_labels = real_labels
|
| 27 |
+
self.filenames = filenames
|
| 28 |
+
assert len(self.filenames) == len(self.real_labels)
|
| 29 |
+
self.topk = topk
|
| 30 |
+
self.is_correct = {k: [] for k in topk}
|
| 31 |
+
self.sample_idx = 0
|
| 32 |
+
|
| 33 |
+
def add_result(self, output):
|
| 34 |
+
maxk = max(self.topk)
|
| 35 |
+
_, pred_batch = output.topk(maxk, 1, True, True)
|
| 36 |
+
pred_batch = pred_batch.cpu().numpy()
|
| 37 |
+
for pred in pred_batch:
|
| 38 |
+
filename = self.filenames[self.sample_idx]
|
| 39 |
+
filename = os.path.basename(filename)
|
| 40 |
+
if self.real_labels[filename]:
|
| 41 |
+
for k in self.topk:
|
| 42 |
+
self.is_correct[k].append(
|
| 43 |
+
any([p in self.real_labels[filename] for p in pred[:k]]))
|
| 44 |
+
self.sample_idx += 1
|
| 45 |
+
|
| 46 |
+
def get_accuracy(self, k=None):
|
| 47 |
+
if k is None:
|
| 48 |
+
return {k: float(np.mean(self.is_correct[k] for k in self.topk))}
|
| 49 |
+
else:
|
| 50 |
+
return float(np.mean(self.is_correct[k])) * 100
|
InternVL/classification/dataset/imagenetv2.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Code from https://github.com/mlfoundations/wise-ft/blob/master/src/datasets/imagenetv2.py
|
| 2 |
+
Thanks to the authors of wise-ft."""
|
| 3 |
+
import pathlib
|
| 4 |
+
import shutil
|
| 5 |
+
import tarfile
|
| 6 |
+
|
| 7 |
+
import requests
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
|
| 12 |
+
URLS = {'matched-frequency': 'https://imagenetv2public.s3-us-west-2.amazonaws.com/imagenetv2-matched-frequency.tar.gz',
|
| 13 |
+
'threshold-0.7': 'https://imagenetv2public.s3-us-west-2.amazonaws.com/imagenetv2-threshold0.7.tar.gz',
|
| 14 |
+
'top-images': 'https://imagenetv2public.s3-us-west-2.amazonaws.com/imagenetv2-top-images.tar.gz',
|
| 15 |
+
'val': 'https://imagenetv2public.s3-us-west-2.amazonaws.com/imagenet_validation.tar.gz'}
|
| 16 |
+
|
| 17 |
+
FNAMES = {'matched-frequency': 'imagenetv2-matched-frequency-format-val',
|
| 18 |
+
'threshold-0.7': 'imagenetv2-threshold0.7-format-val',
|
| 19 |
+
'top-images': 'imagenetv2-top-images-format-val',
|
| 20 |
+
'val': 'imagenet_validation'}
|
| 21 |
+
|
| 22 |
+
V2_DATASET_SIZE = 10000
|
| 23 |
+
VAL_DATASET_SIZE = 50000
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ImageNetV2Dataset(Dataset):
|
| 27 |
+
def __init__(self, variant='matched-frequency', transform=None, location='.'):
|
| 28 |
+
self.dataset_root = pathlib.Path(f'{location}/ImageNetV2-{variant}/')
|
| 29 |
+
self.tar_root = pathlib.Path(f'{location}/ImageNetV2-{variant}.tar.gz')
|
| 30 |
+
self.fnames = list(self.dataset_root.glob('**/*.jpeg'))
|
| 31 |
+
self.transform = transform
|
| 32 |
+
assert variant in URLS, f'unknown V2 Variant: {variant}'
|
| 33 |
+
if not self.dataset_root.exists() or len(self.fnames) != V2_DATASET_SIZE:
|
| 34 |
+
if not self.tar_root.exists():
|
| 35 |
+
print(f'Dataset {variant} not found on disk, downloading....')
|
| 36 |
+
response = requests.get(URLS[variant], stream=True)
|
| 37 |
+
total_size_in_bytes = int(response.headers.get('content-length', 0))
|
| 38 |
+
block_size = 1024 # 1 Kibibyte
|
| 39 |
+
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
|
| 40 |
+
with open(self.tar_root, 'wb') as f:
|
| 41 |
+
for data in response.iter_content(block_size):
|
| 42 |
+
progress_bar.update(len(data))
|
| 43 |
+
f.write(data)
|
| 44 |
+
progress_bar.close()
|
| 45 |
+
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
|
| 46 |
+
assert False, f'Downloading from {URLS[variant]} failed'
|
| 47 |
+
print('Extracting....')
|
| 48 |
+
tarfile.open(self.tar_root).extractall(f'{location}')
|
| 49 |
+
shutil.move(f'{location}/{FNAMES[variant]}', self.dataset_root)
|
| 50 |
+
self.fnames = list(self.dataset_root.glob('**/*.jpeg'))
|
| 51 |
+
|
| 52 |
+
def __len__(self):
|
| 53 |
+
return len(self.fnames)
|
| 54 |
+
|
| 55 |
+
def __getitem__(self, i):
|
| 56 |
+
img, label = Image.open(self.fnames[i]), int(self.fnames[i].parent.name)
|
| 57 |
+
if self.transform is not None:
|
| 58 |
+
img = self.transform(img)
|
| 59 |
+
return img, label
|
InternVL/classification/dataset/samplers.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import math
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.distributed as dist
|
| 13 |
+
from torch.utils.data.sampler import Sampler
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class SubsetRandomSampler(torch.utils.data.Sampler):
|
| 17 |
+
"""Samples elements randomly from a given list of indices, without
|
| 18 |
+
replacement.
|
| 19 |
+
|
| 20 |
+
Arguments:
|
| 21 |
+
indices (sequence): a sequence of indices
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, indices):
|
| 25 |
+
self.epoch = 0
|
| 26 |
+
self.indices = indices
|
| 27 |
+
|
| 28 |
+
def __iter__(self):
|
| 29 |
+
return (self.indices[i] for i in torch.randperm(len(self.indices)))
|
| 30 |
+
|
| 31 |
+
def __len__(self):
|
| 32 |
+
return len(self.indices)
|
| 33 |
+
|
| 34 |
+
def set_epoch(self, epoch):
|
| 35 |
+
self.epoch = epoch
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class NodeDistributedSampler(Sampler):
|
| 39 |
+
"""Sampler that restricts data loading to a subset of the dataset.
|
| 40 |
+
It is especially useful in conjunction with
|
| 41 |
+
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
|
| 42 |
+
process can pass a DistributedSampler instance as a DataLoader sampler,
|
| 43 |
+
and load a subset of the original dataset that is exclusive to it.
|
| 44 |
+
.. note::
|
| 45 |
+
Dataset is assumed to be of constant size.
|
| 46 |
+
Arguments:
|
| 47 |
+
dataset: Dataset used for sampling.
|
| 48 |
+
num_replicas (optional): Number of processes participating in
|
| 49 |
+
distributed training.
|
| 50 |
+
rank (optional): Rank of the current process within num_replicas.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self,
|
| 54 |
+
dataset,
|
| 55 |
+
num_replicas=None,
|
| 56 |
+
rank=None,
|
| 57 |
+
local_rank=None,
|
| 58 |
+
local_size=None):
|
| 59 |
+
if num_replicas is None:
|
| 60 |
+
if not dist.is_available():
|
| 61 |
+
raise RuntimeError(
|
| 62 |
+
'Requires distributed package to be available')
|
| 63 |
+
num_replicas = dist.get_world_size()
|
| 64 |
+
if rank is None:
|
| 65 |
+
if not dist.is_available():
|
| 66 |
+
raise RuntimeError(
|
| 67 |
+
'Requires distributed package to be available')
|
| 68 |
+
rank = dist.get_rank()
|
| 69 |
+
if local_rank is None:
|
| 70 |
+
local_rank = int(os.environ.get('LOCAL_RANK', 0))
|
| 71 |
+
if local_size is None:
|
| 72 |
+
local_size = int(os.environ.get('LOCAL_SIZE', 1))
|
| 73 |
+
self.dataset = dataset
|
| 74 |
+
self.num_replicas = num_replicas
|
| 75 |
+
self.num_parts = local_size
|
| 76 |
+
self.rank = rank
|
| 77 |
+
self.local_rank = local_rank
|
| 78 |
+
self.epoch = 0
|
| 79 |
+
self.num_samples = int(
|
| 80 |
+
math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
|
| 81 |
+
self.total_size = self.num_samples * self.num_replicas
|
| 82 |
+
|
| 83 |
+
self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts
|
| 84 |
+
|
| 85 |
+
def __iter__(self):
|
| 86 |
+
# deterministically shuffle based on epoch
|
| 87 |
+
g = torch.Generator()
|
| 88 |
+
g.manual_seed(self.epoch)
|
| 89 |
+
|
| 90 |
+
t = torch.Generator()
|
| 91 |
+
t.manual_seed(0)
|
| 92 |
+
|
| 93 |
+
indices = torch.randperm(len(self.dataset), generator=t).tolist()
|
| 94 |
+
# indices = range(len(self.dataset))
|
| 95 |
+
indices = [i for i in indices if i % self.num_parts == self.local_rank]
|
| 96 |
+
|
| 97 |
+
# add extra samples to make it evenly divisible
|
| 98 |
+
indices += indices[:(self.total_size_parts - len(indices))]
|
| 99 |
+
assert len(indices) == self.total_size_parts
|
| 100 |
+
|
| 101 |
+
# subsample
|
| 102 |
+
indices = indices[self.rank // self.num_parts:self.
|
| 103 |
+
total_size_parts:self.num_replicas // self.num_parts]
|
| 104 |
+
|
| 105 |
+
index = torch.randperm(len(indices), generator=g).tolist()
|
| 106 |
+
indices = list(np.array(indices)[index])
|
| 107 |
+
|
| 108 |
+
assert len(indices) == self.num_samples
|
| 109 |
+
|
| 110 |
+
return iter(indices)
|
| 111 |
+
|
| 112 |
+
def __len__(self):
|
| 113 |
+
return self.num_samples
|
| 114 |
+
|
| 115 |
+
def set_epoch(self, epoch):
|
| 116 |
+
self.epoch = epoch
|
InternVL/classification/dataset/zipreader.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import io
|
| 8 |
+
import os
|
| 9 |
+
import zipfile
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
from PIL import Image, ImageFile
|
| 13 |
+
|
| 14 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def is_zip_path(img_or_path):
|
| 18 |
+
"""judge if this is a zip path."""
|
| 19 |
+
return '.zip@' in img_or_path
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ZipReader(object):
|
| 23 |
+
"""A class to read zipped files."""
|
| 24 |
+
zip_bank = dict()
|
| 25 |
+
|
| 26 |
+
def __init__(self):
|
| 27 |
+
super(ZipReader, self).__init__()
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def get_zipfile(path):
|
| 31 |
+
zip_bank = ZipReader.zip_bank
|
| 32 |
+
if path not in zip_bank:
|
| 33 |
+
zfile = zipfile.ZipFile(path, 'r')
|
| 34 |
+
zip_bank[path] = zfile
|
| 35 |
+
return zip_bank[path]
|
| 36 |
+
|
| 37 |
+
@staticmethod
|
| 38 |
+
def split_zip_style_path(path):
|
| 39 |
+
pos_at = path.index('@')
|
| 40 |
+
assert pos_at != -1, "character '@' is not found from the given path '%s'" % path
|
| 41 |
+
|
| 42 |
+
zip_path = path[0:pos_at]
|
| 43 |
+
folder_path = path[pos_at + 1:]
|
| 44 |
+
folder_path = str.strip(folder_path, '/')
|
| 45 |
+
return zip_path, folder_path
|
| 46 |
+
|
| 47 |
+
@staticmethod
|
| 48 |
+
def list_folder(path):
|
| 49 |
+
zip_path, folder_path = ZipReader.split_zip_style_path(path)
|
| 50 |
+
|
| 51 |
+
zfile = ZipReader.get_zipfile(zip_path)
|
| 52 |
+
folder_list = []
|
| 53 |
+
for file_foler_name in zfile.namelist():
|
| 54 |
+
file_foler_name = str.strip(file_foler_name, '/')
|
| 55 |
+
if file_foler_name.startswith(folder_path) and \
|
| 56 |
+
len(os.path.splitext(file_foler_name)[-1]) == 0 and \
|
| 57 |
+
file_foler_name != folder_path:
|
| 58 |
+
if len(folder_path) == 0:
|
| 59 |
+
folder_list.append(file_foler_name)
|
| 60 |
+
else:
|
| 61 |
+
folder_list.append(file_foler_name[len(folder_path) + 1:])
|
| 62 |
+
|
| 63 |
+
return folder_list
|
| 64 |
+
|
| 65 |
+
@staticmethod
|
| 66 |
+
def list_files(path, extension=None):
|
| 67 |
+
if extension is None:
|
| 68 |
+
extension = ['.*']
|
| 69 |
+
zip_path, folder_path = ZipReader.split_zip_style_path(path)
|
| 70 |
+
|
| 71 |
+
zfile = ZipReader.get_zipfile(zip_path)
|
| 72 |
+
file_lists = []
|
| 73 |
+
for file_foler_name in zfile.namelist():
|
| 74 |
+
file_foler_name = str.strip(file_foler_name, '/')
|
| 75 |
+
if file_foler_name.startswith(folder_path) and \
|
| 76 |
+
str.lower(os.path.splitext(file_foler_name)[-1]) in extension:
|
| 77 |
+
if len(folder_path) == 0:
|
| 78 |
+
file_lists.append(file_foler_name)
|
| 79 |
+
else:
|
| 80 |
+
file_lists.append(file_foler_name[len(folder_path) + 1:])
|
| 81 |
+
|
| 82 |
+
return file_lists
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def read(path):
|
| 86 |
+
zip_path, path_img = ZipReader.split_zip_style_path(path)
|
| 87 |
+
zfile = ZipReader.get_zipfile(zip_path)
|
| 88 |
+
data = zfile.read(path_img)
|
| 89 |
+
return data
|
| 90 |
+
|
| 91 |
+
@staticmethod
|
| 92 |
+
def imread(path):
|
| 93 |
+
zip_path, path_img = ZipReader.split_zip_style_path(path)
|
| 94 |
+
zfile = ZipReader.get_zipfile(zip_path)
|
| 95 |
+
data = zfile.read(path_img)
|
| 96 |
+
try:
|
| 97 |
+
im = Image.open(io.BytesIO(data))
|
| 98 |
+
except:
|
| 99 |
+
print('ERROR IMG LOADED: ', path_img)
|
| 100 |
+
random_img = np.random.rand(224, 224, 3) * 255
|
| 101 |
+
im = Image.fromarray(np.uint8(random_img))
|
| 102 |
+
return im
|
InternVL/classification/meta_data/22k_class_to_idx.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
InternVL/classification/meta_data/imagenet_classes.json
ADDED
|
@@ -0,0 +1,1002 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"n01440764": 0,
|
| 3 |
+
"n01443537": 1,
|
| 4 |
+
"n01484850": 2,
|
| 5 |
+
"n01491361": 3,
|
| 6 |
+
"n01494475": 4,
|
| 7 |
+
"n01496331": 5,
|
| 8 |
+
"n01498041": 6,
|
| 9 |
+
"n01514668": 7,
|
| 10 |
+
"n01514859": 8,
|
| 11 |
+
"n01518878": 9,
|
| 12 |
+
"n01530575": 10,
|
| 13 |
+
"n01531178": 11,
|
| 14 |
+
"n01532829": 12,
|
| 15 |
+
"n01534433": 13,
|
| 16 |
+
"n01537544": 14,
|
| 17 |
+
"n01558993": 15,
|
| 18 |
+
"n01560419": 16,
|
| 19 |
+
"n01580077": 17,
|
| 20 |
+
"n01582220": 18,
|
| 21 |
+
"n01592084": 19,
|
| 22 |
+
"n01601694": 20,
|
| 23 |
+
"n01608432": 21,
|
| 24 |
+
"n01614925": 22,
|
| 25 |
+
"n01616318": 23,
|
| 26 |
+
"n01622779": 24,
|
| 27 |
+
"n01629819": 25,
|
| 28 |
+
"n01630670": 26,
|
| 29 |
+
"n01631663": 27,
|
| 30 |
+
"n01632458": 28,
|
| 31 |
+
"n01632777": 29,
|
| 32 |
+
"n01641577": 30,
|
| 33 |
+
"n01644373": 31,
|
| 34 |
+
"n01644900": 32,
|
| 35 |
+
"n01664065": 33,
|
| 36 |
+
"n01665541": 34,
|
| 37 |
+
"n01667114": 35,
|
| 38 |
+
"n01667778": 36,
|
| 39 |
+
"n01669191": 37,
|
| 40 |
+
"n01675722": 38,
|
| 41 |
+
"n01677366": 39,
|
| 42 |
+
"n01682714": 40,
|
| 43 |
+
"n01685808": 41,
|
| 44 |
+
"n01687978": 42,
|
| 45 |
+
"n01688243": 43,
|
| 46 |
+
"n01689811": 44,
|
| 47 |
+
"n01692333": 45,
|
| 48 |
+
"n01693334": 46,
|
| 49 |
+
"n01694178": 47,
|
| 50 |
+
"n01695060": 48,
|
| 51 |
+
"n01697457": 49,
|
| 52 |
+
"n01698640": 50,
|
| 53 |
+
"n01704323": 51,
|
| 54 |
+
"n01728572": 52,
|
| 55 |
+
"n01728920": 53,
|
| 56 |
+
"n01729322": 54,
|
| 57 |
+
"n01729977": 55,
|
| 58 |
+
"n01734418": 56,
|
| 59 |
+
"n01735189": 57,
|
| 60 |
+
"n01737021": 58,
|
| 61 |
+
"n01739381": 59,
|
| 62 |
+
"n01740131": 60,
|
| 63 |
+
"n01742172": 61,
|
| 64 |
+
"n01744401": 62,
|
| 65 |
+
"n01748264": 63,
|
| 66 |
+
"n01749939": 64,
|
| 67 |
+
"n01751748": 65,
|
| 68 |
+
"n01753488": 66,
|
| 69 |
+
"n01755581": 67,
|
| 70 |
+
"n01756291": 68,
|
| 71 |
+
"n01768244": 69,
|
| 72 |
+
"n01770081": 70,
|
| 73 |
+
"n01770393": 71,
|
| 74 |
+
"n01773157": 72,
|
| 75 |
+
"n01773549": 73,
|
| 76 |
+
"n01773797": 74,
|
| 77 |
+
"n01774384": 75,
|
| 78 |
+
"n01774750": 76,
|
| 79 |
+
"n01775062": 77,
|
| 80 |
+
"n01776313": 78,
|
| 81 |
+
"n01784675": 79,
|
| 82 |
+
"n01795545": 80,
|
| 83 |
+
"n01796340": 81,
|
| 84 |
+
"n01797886": 82,
|
| 85 |
+
"n01798484": 83,
|
| 86 |
+
"n01806143": 84,
|
| 87 |
+
"n01806567": 85,
|
| 88 |
+
"n01807496": 86,
|
| 89 |
+
"n01817953": 87,
|
| 90 |
+
"n01818515": 88,
|
| 91 |
+
"n01819313": 89,
|
| 92 |
+
"n01820546": 90,
|
| 93 |
+
"n01824575": 91,
|
| 94 |
+
"n01828970": 92,
|
| 95 |
+
"n01829413": 93,
|
| 96 |
+
"n01833805": 94,
|
| 97 |
+
"n01843065": 95,
|
| 98 |
+
"n01843383": 96,
|
| 99 |
+
"n01847000": 97,
|
| 100 |
+
"n01855032": 98,
|
| 101 |
+
"n01855672": 99,
|
| 102 |
+
"n01860187": 100,
|
| 103 |
+
"n01871265": 101,
|
| 104 |
+
"n01872401": 102,
|
| 105 |
+
"n01873310": 103,
|
| 106 |
+
"n01877812": 104,
|
| 107 |
+
"n01882714": 105,
|
| 108 |
+
"n01883070": 106,
|
| 109 |
+
"n01910747": 107,
|
| 110 |
+
"n01914609": 108,
|
| 111 |
+
"n01917289": 109,
|
| 112 |
+
"n01924916": 110,
|
| 113 |
+
"n01930112": 111,
|
| 114 |
+
"n01943899": 112,
|
| 115 |
+
"n01944390": 113,
|
| 116 |
+
"n01945685": 114,
|
| 117 |
+
"n01950731": 115,
|
| 118 |
+
"n01955084": 116,
|
| 119 |
+
"n01968897": 117,
|
| 120 |
+
"n01978287": 118,
|
| 121 |
+
"n01978455": 119,
|
| 122 |
+
"n01980166": 120,
|
| 123 |
+
"n01981276": 121,
|
| 124 |
+
"n01983481": 122,
|
| 125 |
+
"n01984695": 123,
|
| 126 |
+
"n01985128": 124,
|
| 127 |
+
"n01986214": 125,
|
| 128 |
+
"n01990800": 126,
|
| 129 |
+
"n02002556": 127,
|
| 130 |
+
"n02002724": 128,
|
| 131 |
+
"n02006656": 129,
|
| 132 |
+
"n02007558": 130,
|
| 133 |
+
"n02009229": 131,
|
| 134 |
+
"n02009912": 132,
|
| 135 |
+
"n02011460": 133,
|
| 136 |
+
"n02012849": 134,
|
| 137 |
+
"n02013706": 135,
|
| 138 |
+
"n02017213": 136,
|
| 139 |
+
"n02018207": 137,
|
| 140 |
+
"n02018795": 138,
|
| 141 |
+
"n02025239": 139,
|
| 142 |
+
"n02027492": 140,
|
| 143 |
+
"n02028035": 141,
|
| 144 |
+
"n02033041": 142,
|
| 145 |
+
"n02037110": 143,
|
| 146 |
+
"n02051845": 144,
|
| 147 |
+
"n02056570": 145,
|
| 148 |
+
"n02058221": 146,
|
| 149 |
+
"n02066245": 147,
|
| 150 |
+
"n02071294": 148,
|
| 151 |
+
"n02074367": 149,
|
| 152 |
+
"n02077923": 150,
|
| 153 |
+
"n02085620": 151,
|
| 154 |
+
"n02085782": 152,
|
| 155 |
+
"n02085936": 153,
|
| 156 |
+
"n02086079": 154,
|
| 157 |
+
"n02086240": 155,
|
| 158 |
+
"n02086646": 156,
|
| 159 |
+
"n02086910": 157,
|
| 160 |
+
"n02087046": 158,
|
| 161 |
+
"n02087394": 159,
|
| 162 |
+
"n02088094": 160,
|
| 163 |
+
"n02088238": 161,
|
| 164 |
+
"n02088364": 162,
|
| 165 |
+
"n02088466": 163,
|
| 166 |
+
"n02088632": 164,
|
| 167 |
+
"n02089078": 165,
|
| 168 |
+
"n02089867": 166,
|
| 169 |
+
"n02089973": 167,
|
| 170 |
+
"n02090379": 168,
|
| 171 |
+
"n02090622": 169,
|
| 172 |
+
"n02090721": 170,
|
| 173 |
+
"n02091032": 171,
|
| 174 |
+
"n02091134": 172,
|
| 175 |
+
"n02091244": 173,
|
| 176 |
+
"n02091467": 174,
|
| 177 |
+
"n02091635": 175,
|
| 178 |
+
"n02091831": 176,
|
| 179 |
+
"n02092002": 177,
|
| 180 |
+
"n02092339": 178,
|
| 181 |
+
"n02093256": 179,
|
| 182 |
+
"n02093428": 180,
|
| 183 |
+
"n02093647": 181,
|
| 184 |
+
"n02093754": 182,
|
| 185 |
+
"n02093859": 183,
|
| 186 |
+
"n02093991": 184,
|
| 187 |
+
"n02094114": 185,
|
| 188 |
+
"n02094258": 186,
|
| 189 |
+
"n02094433": 187,
|
| 190 |
+
"n02095314": 188,
|
| 191 |
+
"n02095570": 189,
|
| 192 |
+
"n02095889": 190,
|
| 193 |
+
"n02096051": 191,
|
| 194 |
+
"n02096177": 192,
|
| 195 |
+
"n02096294": 193,
|
| 196 |
+
"n02096437": 194,
|
| 197 |
+
"n02096585": 195,
|
| 198 |
+
"n02097047": 196,
|
| 199 |
+
"n02097130": 197,
|
| 200 |
+
"n02097209": 198,
|
| 201 |
+
"n02097298": 199,
|
| 202 |
+
"n02097474": 200,
|
| 203 |
+
"n02097658": 201,
|
| 204 |
+
"n02098105": 202,
|
| 205 |
+
"n02098286": 203,
|
| 206 |
+
"n02098413": 204,
|
| 207 |
+
"n02099267": 205,
|
| 208 |
+
"n02099429": 206,
|
| 209 |
+
"n02099601": 207,
|
| 210 |
+
"n02099712": 208,
|
| 211 |
+
"n02099849": 209,
|
| 212 |
+
"n02100236": 210,
|
| 213 |
+
"n02100583": 211,
|
| 214 |
+
"n02100735": 212,
|
| 215 |
+
"n02100877": 213,
|
| 216 |
+
"n02101006": 214,
|
| 217 |
+
"n02101388": 215,
|
| 218 |
+
"n02101556": 216,
|
| 219 |
+
"n02102040": 217,
|
| 220 |
+
"n02102177": 218,
|
| 221 |
+
"n02102318": 219,
|
| 222 |
+
"n02102480": 220,
|
| 223 |
+
"n02102973": 221,
|
| 224 |
+
"n02104029": 222,
|
| 225 |
+
"n02104365": 223,
|
| 226 |
+
"n02105056": 224,
|
| 227 |
+
"n02105162": 225,
|
| 228 |
+
"n02105251": 226,
|
| 229 |
+
"n02105412": 227,
|
| 230 |
+
"n02105505": 228,
|
| 231 |
+
"n02105641": 229,
|
| 232 |
+
"n02105855": 230,
|
| 233 |
+
"n02106030": 231,
|
| 234 |
+
"n02106166": 232,
|
| 235 |
+
"n02106382": 233,
|
| 236 |
+
"n02106550": 234,
|
| 237 |
+
"n02106662": 235,
|
| 238 |
+
"n02107142": 236,
|
| 239 |
+
"n02107312": 237,
|
| 240 |
+
"n02107574": 238,
|
| 241 |
+
"n02107683": 239,
|
| 242 |
+
"n02107908": 240,
|
| 243 |
+
"n02108000": 241,
|
| 244 |
+
"n02108089": 242,
|
| 245 |
+
"n02108422": 243,
|
| 246 |
+
"n02108551": 244,
|
| 247 |
+
"n02108915": 245,
|
| 248 |
+
"n02109047": 246,
|
| 249 |
+
"n02109525": 247,
|
| 250 |
+
"n02109961": 248,
|
| 251 |
+
"n02110063": 249,
|
| 252 |
+
"n02110185": 250,
|
| 253 |
+
"n02110341": 251,
|
| 254 |
+
"n02110627": 252,
|
| 255 |
+
"n02110806": 253,
|
| 256 |
+
"n02110958": 254,
|
| 257 |
+
"n02111129": 255,
|
| 258 |
+
"n02111277": 256,
|
| 259 |
+
"n02111500": 257,
|
| 260 |
+
"n02111889": 258,
|
| 261 |
+
"n02112018": 259,
|
| 262 |
+
"n02112137": 260,
|
| 263 |
+
"n02112350": 261,
|
| 264 |
+
"n02112706": 262,
|
| 265 |
+
"n02113023": 263,
|
| 266 |
+
"n02113186": 264,
|
| 267 |
+
"n02113624": 265,
|
| 268 |
+
"n02113712": 266,
|
| 269 |
+
"n02113799": 267,
|
| 270 |
+
"n02113978": 268,
|
| 271 |
+
"n02114367": 269,
|
| 272 |
+
"n02114548": 270,
|
| 273 |
+
"n02114712": 271,
|
| 274 |
+
"n02114855": 272,
|
| 275 |
+
"n02115641": 273,
|
| 276 |
+
"n02115913": 274,
|
| 277 |
+
"n02116738": 275,
|
| 278 |
+
"n02117135": 276,
|
| 279 |
+
"n02119022": 277,
|
| 280 |
+
"n02119789": 278,
|
| 281 |
+
"n02120079": 279,
|
| 282 |
+
"n02120505": 280,
|
| 283 |
+
"n02123045": 281,
|
| 284 |
+
"n02123159": 282,
|
| 285 |
+
"n02123394": 283,
|
| 286 |
+
"n02123597": 284,
|
| 287 |
+
"n02124075": 285,
|
| 288 |
+
"n02125311": 286,
|
| 289 |
+
"n02127052": 287,
|
| 290 |
+
"n02128385": 288,
|
| 291 |
+
"n02128757": 289,
|
| 292 |
+
"n02128925": 290,
|
| 293 |
+
"n02129165": 291,
|
| 294 |
+
"n02129604": 292,
|
| 295 |
+
"n02130308": 293,
|
| 296 |
+
"n02132136": 294,
|
| 297 |
+
"n02133161": 295,
|
| 298 |
+
"n02134084": 296,
|
| 299 |
+
"n02134418": 297,
|
| 300 |
+
"n02137549": 298,
|
| 301 |
+
"n02138441": 299,
|
| 302 |
+
"n02165105": 300,
|
| 303 |
+
"n02165456": 301,
|
| 304 |
+
"n02167151": 302,
|
| 305 |
+
"n02168699": 303,
|
| 306 |
+
"n02169497": 304,
|
| 307 |
+
"n02172182": 305,
|
| 308 |
+
"n02174001": 306,
|
| 309 |
+
"n02177972": 307,
|
| 310 |
+
"n02190166": 308,
|
| 311 |
+
"n02206856": 309,
|
| 312 |
+
"n02219486": 310,
|
| 313 |
+
"n02226429": 311,
|
| 314 |
+
"n02229544": 312,
|
| 315 |
+
"n02231487": 313,
|
| 316 |
+
"n02233338": 314,
|
| 317 |
+
"n02236044": 315,
|
| 318 |
+
"n02256656": 316,
|
| 319 |
+
"n02259212": 317,
|
| 320 |
+
"n02264363": 318,
|
| 321 |
+
"n02268443": 319,
|
| 322 |
+
"n02268853": 320,
|
| 323 |
+
"n02276258": 321,
|
| 324 |
+
"n02277742": 322,
|
| 325 |
+
"n02279972": 323,
|
| 326 |
+
"n02280649": 324,
|
| 327 |
+
"n02281406": 325,
|
| 328 |
+
"n02281787": 326,
|
| 329 |
+
"n02317335": 327,
|
| 330 |
+
"n02319095": 328,
|
| 331 |
+
"n02321529": 329,
|
| 332 |
+
"n02325366": 330,
|
| 333 |
+
"n02326432": 331,
|
| 334 |
+
"n02328150": 332,
|
| 335 |
+
"n02342885": 333,
|
| 336 |
+
"n02346627": 334,
|
| 337 |
+
"n02356798": 335,
|
| 338 |
+
"n02361337": 336,
|
| 339 |
+
"n02363005": 337,
|
| 340 |
+
"n02364673": 338,
|
| 341 |
+
"n02389026": 339,
|
| 342 |
+
"n02391049": 340,
|
| 343 |
+
"n02395406": 341,
|
| 344 |
+
"n02396427": 342,
|
| 345 |
+
"n02397096": 343,
|
| 346 |
+
"n02398521": 344,
|
| 347 |
+
"n02403003": 345,
|
| 348 |
+
"n02408429": 346,
|
| 349 |
+
"n02410509": 347,
|
| 350 |
+
"n02412080": 348,
|
| 351 |
+
"n02415577": 349,
|
| 352 |
+
"n02417914": 350,
|
| 353 |
+
"n02422106": 351,
|
| 354 |
+
"n02422699": 352,
|
| 355 |
+
"n02423022": 353,
|
| 356 |
+
"n02437312": 354,
|
| 357 |
+
"n02437616": 355,
|
| 358 |
+
"n02441942": 356,
|
| 359 |
+
"n02442845": 357,
|
| 360 |
+
"n02443114": 358,
|
| 361 |
+
"n02443484": 359,
|
| 362 |
+
"n02444819": 360,
|
| 363 |
+
"n02445715": 361,
|
| 364 |
+
"n02447366": 362,
|
| 365 |
+
"n02454379": 363,
|
| 366 |
+
"n02457408": 364,
|
| 367 |
+
"n02480495": 365,
|
| 368 |
+
"n02480855": 366,
|
| 369 |
+
"n02481823": 367,
|
| 370 |
+
"n02483362": 368,
|
| 371 |
+
"n02483708": 369,
|
| 372 |
+
"n02484975": 370,
|
| 373 |
+
"n02486261": 371,
|
| 374 |
+
"n02486410": 372,
|
| 375 |
+
"n02487347": 373,
|
| 376 |
+
"n02488291": 374,
|
| 377 |
+
"n02488702": 375,
|
| 378 |
+
"n02489166": 376,
|
| 379 |
+
"n02490219": 377,
|
| 380 |
+
"n02492035": 378,
|
| 381 |
+
"n02492660": 379,
|
| 382 |
+
"n02493509": 380,
|
| 383 |
+
"n02493793": 381,
|
| 384 |
+
"n02494079": 382,
|
| 385 |
+
"n02497673": 383,
|
| 386 |
+
"n02500267": 384,
|
| 387 |
+
"n02504013": 385,
|
| 388 |
+
"n02504458": 386,
|
| 389 |
+
"n02509815": 387,
|
| 390 |
+
"n02510455": 388,
|
| 391 |
+
"n02514041": 389,
|
| 392 |
+
"n02526121": 390,
|
| 393 |
+
"n02536864": 391,
|
| 394 |
+
"n02606052": 392,
|
| 395 |
+
"n02607072": 393,
|
| 396 |
+
"n02640242": 394,
|
| 397 |
+
"n02641379": 395,
|
| 398 |
+
"n02643566": 396,
|
| 399 |
+
"n02655020": 397,
|
| 400 |
+
"n02666196": 398,
|
| 401 |
+
"n02667093": 399,
|
| 402 |
+
"n02669723": 400,
|
| 403 |
+
"n02672831": 401,
|
| 404 |
+
"n02676566": 402,
|
| 405 |
+
"n02687172": 403,
|
| 406 |
+
"n02690373": 404,
|
| 407 |
+
"n02692877": 405,
|
| 408 |
+
"n02699494": 406,
|
| 409 |
+
"n02701002": 407,
|
| 410 |
+
"n02704792": 408,
|
| 411 |
+
"n02708093": 409,
|
| 412 |
+
"n02727426": 410,
|
| 413 |
+
"n02730930": 411,
|
| 414 |
+
"n02747177": 412,
|
| 415 |
+
"n02749479": 413,
|
| 416 |
+
"n02769748": 414,
|
| 417 |
+
"n02776631": 415,
|
| 418 |
+
"n02777292": 416,
|
| 419 |
+
"n02782093": 417,
|
| 420 |
+
"n02783161": 418,
|
| 421 |
+
"n02786058": 419,
|
| 422 |
+
"n02787622": 420,
|
| 423 |
+
"n02788148": 421,
|
| 424 |
+
"n02790996": 422,
|
| 425 |
+
"n02791124": 423,
|
| 426 |
+
"n02791270": 424,
|
| 427 |
+
"n02793495": 425,
|
| 428 |
+
"n02794156": 426,
|
| 429 |
+
"n02795169": 427,
|
| 430 |
+
"n02797295": 428,
|
| 431 |
+
"n02799071": 429,
|
| 432 |
+
"n02802426": 430,
|
| 433 |
+
"n02804414": 431,
|
| 434 |
+
"n02804610": 432,
|
| 435 |
+
"n02807133": 433,
|
| 436 |
+
"n02808304": 434,
|
| 437 |
+
"n02808440": 435,
|
| 438 |
+
"n02814533": 436,
|
| 439 |
+
"n02814860": 437,
|
| 440 |
+
"n02815834": 438,
|
| 441 |
+
"n02817516": 439,
|
| 442 |
+
"n02823428": 440,
|
| 443 |
+
"n02823750": 441,
|
| 444 |
+
"n02825657": 442,
|
| 445 |
+
"n02834397": 443,
|
| 446 |
+
"n02835271": 444,
|
| 447 |
+
"n02837789": 445,
|
| 448 |
+
"n02840245": 446,
|
| 449 |
+
"n02841315": 447,
|
| 450 |
+
"n02843684": 448,
|
| 451 |
+
"n02859443": 449,
|
| 452 |
+
"n02860847": 450,
|
| 453 |
+
"n02865351": 451,
|
| 454 |
+
"n02869837": 452,
|
| 455 |
+
"n02870880": 453,
|
| 456 |
+
"n02871525": 454,
|
| 457 |
+
"n02877765": 455,
|
| 458 |
+
"n02879718": 456,
|
| 459 |
+
"n02883205": 457,
|
| 460 |
+
"n02892201": 458,
|
| 461 |
+
"n02892767": 459,
|
| 462 |
+
"n02894605": 460,
|
| 463 |
+
"n02895154": 461,
|
| 464 |
+
"n02906734": 462,
|
| 465 |
+
"n02909870": 463,
|
| 466 |
+
"n02910353": 464,
|
| 467 |
+
"n02916936": 465,
|
| 468 |
+
"n02917067": 466,
|
| 469 |
+
"n02927161": 467,
|
| 470 |
+
"n02930766": 468,
|
| 471 |
+
"n02939185": 469,
|
| 472 |
+
"n02948072": 470,
|
| 473 |
+
"n02950826": 471,
|
| 474 |
+
"n02951358": 472,
|
| 475 |
+
"n02951585": 473,
|
| 476 |
+
"n02963159": 474,
|
| 477 |
+
"n02965783": 475,
|
| 478 |
+
"n02966193": 476,
|
| 479 |
+
"n02966687": 477,
|
| 480 |
+
"n02971356": 478,
|
| 481 |
+
"n02974003": 479,
|
| 482 |
+
"n02977058": 480,
|
| 483 |
+
"n02978881": 481,
|
| 484 |
+
"n02979186": 482,
|
| 485 |
+
"n02980441": 483,
|
| 486 |
+
"n02981792": 484,
|
| 487 |
+
"n02988304": 485,
|
| 488 |
+
"n02992211": 486,
|
| 489 |
+
"n02992529": 487,
|
| 490 |
+
"n02999410": 488,
|
| 491 |
+
"n03000134": 489,
|
| 492 |
+
"n03000247": 490,
|
| 493 |
+
"n03000684": 491,
|
| 494 |
+
"n03014705": 492,
|
| 495 |
+
"n03016953": 493,
|
| 496 |
+
"n03017168": 494,
|
| 497 |
+
"n03018349": 495,
|
| 498 |
+
"n03026506": 496,
|
| 499 |
+
"n03028079": 497,
|
| 500 |
+
"n03032252": 498,
|
| 501 |
+
"n03041632": 499,
|
| 502 |
+
"n03042490": 500,
|
| 503 |
+
"n03045698": 501,
|
| 504 |
+
"n03047690": 502,
|
| 505 |
+
"n03062245": 503,
|
| 506 |
+
"n03063599": 504,
|
| 507 |
+
"n03063689": 505,
|
| 508 |
+
"n03065424": 506,
|
| 509 |
+
"n03075370": 507,
|
| 510 |
+
"n03085013": 508,
|
| 511 |
+
"n03089624": 509,
|
| 512 |
+
"n03095699": 510,
|
| 513 |
+
"n03100240": 511,
|
| 514 |
+
"n03109150": 512,
|
| 515 |
+
"n03110669": 513,
|
| 516 |
+
"n03124043": 514,
|
| 517 |
+
"n03124170": 515,
|
| 518 |
+
"n03125729": 516,
|
| 519 |
+
"n03126707": 517,
|
| 520 |
+
"n03127747": 518,
|
| 521 |
+
"n03127925": 519,
|
| 522 |
+
"n03131574": 520,
|
| 523 |
+
"n03133878": 521,
|
| 524 |
+
"n03134739": 522,
|
| 525 |
+
"n03141823": 523,
|
| 526 |
+
"n03146219": 524,
|
| 527 |
+
"n03160309": 525,
|
| 528 |
+
"n03179701": 526,
|
| 529 |
+
"n03180011": 527,
|
| 530 |
+
"n03187595": 528,
|
| 531 |
+
"n03188531": 529,
|
| 532 |
+
"n03196217": 530,
|
| 533 |
+
"n03197337": 531,
|
| 534 |
+
"n03201208": 532,
|
| 535 |
+
"n03207743": 533,
|
| 536 |
+
"n03207941": 534,
|
| 537 |
+
"n03208938": 535,
|
| 538 |
+
"n03216828": 536,
|
| 539 |
+
"n03218198": 537,
|
| 540 |
+
"n03220513": 538,
|
| 541 |
+
"n03223299": 539,
|
| 542 |
+
"n03240683": 540,
|
| 543 |
+
"n03249569": 541,
|
| 544 |
+
"n03250847": 542,
|
| 545 |
+
"n03255030": 543,
|
| 546 |
+
"n03259280": 544,
|
| 547 |
+
"n03271574": 545,
|
| 548 |
+
"n03272010": 546,
|
| 549 |
+
"n03272562": 547,
|
| 550 |
+
"n03290653": 548,
|
| 551 |
+
"n03291819": 549,
|
| 552 |
+
"n03297495": 550,
|
| 553 |
+
"n03314780": 551,
|
| 554 |
+
"n03325584": 552,
|
| 555 |
+
"n03337140": 553,
|
| 556 |
+
"n03344393": 554,
|
| 557 |
+
"n03345487": 555,
|
| 558 |
+
"n03347037": 556,
|
| 559 |
+
"n03355925": 557,
|
| 560 |
+
"n03372029": 558,
|
| 561 |
+
"n03376595": 559,
|
| 562 |
+
"n03379051": 560,
|
| 563 |
+
"n03384352": 561,
|
| 564 |
+
"n03388043": 562,
|
| 565 |
+
"n03388183": 563,
|
| 566 |
+
"n03388549": 564,
|
| 567 |
+
"n03393912": 565,
|
| 568 |
+
"n03394916": 566,
|
| 569 |
+
"n03400231": 567,
|
| 570 |
+
"n03404251": 568,
|
| 571 |
+
"n03417042": 569,
|
| 572 |
+
"n03424325": 570,
|
| 573 |
+
"n03425413": 571,
|
| 574 |
+
"n03443371": 572,
|
| 575 |
+
"n03444034": 573,
|
| 576 |
+
"n03445777": 574,
|
| 577 |
+
"n03445924": 575,
|
| 578 |
+
"n03447447": 576,
|
| 579 |
+
"n03447721": 577,
|
| 580 |
+
"n03450230": 578,
|
| 581 |
+
"n03452741": 579,
|
| 582 |
+
"n03457902": 580,
|
| 583 |
+
"n03459775": 581,
|
| 584 |
+
"n03461385": 582,
|
| 585 |
+
"n03467068": 583,
|
| 586 |
+
"n03476684": 584,
|
| 587 |
+
"n03476991": 585,
|
| 588 |
+
"n03478589": 586,
|
| 589 |
+
"n03481172": 587,
|
| 590 |
+
"n03482405": 588,
|
| 591 |
+
"n03483316": 589,
|
| 592 |
+
"n03485407": 590,
|
| 593 |
+
"n03485794": 591,
|
| 594 |
+
"n03492542": 592,
|
| 595 |
+
"n03494278": 593,
|
| 596 |
+
"n03495258": 594,
|
| 597 |
+
"n03496892": 595,
|
| 598 |
+
"n03498962": 596,
|
| 599 |
+
"n03527444": 597,
|
| 600 |
+
"n03529860": 598,
|
| 601 |
+
"n03530642": 599,
|
| 602 |
+
"n03532672": 600,
|
| 603 |
+
"n03534580": 601,
|
| 604 |
+
"n03535780": 602,
|
| 605 |
+
"n03538406": 603,
|
| 606 |
+
"n03544143": 604,
|
| 607 |
+
"n03584254": 605,
|
| 608 |
+
"n03584829": 606,
|
| 609 |
+
"n03590841": 607,
|
| 610 |
+
"n03594734": 608,
|
| 611 |
+
"n03594945": 609,
|
| 612 |
+
"n03595614": 610,
|
| 613 |
+
"n03598930": 611,
|
| 614 |
+
"n03599486": 612,
|
| 615 |
+
"n03602883": 613,
|
| 616 |
+
"n03617480": 614,
|
| 617 |
+
"n03623198": 615,
|
| 618 |
+
"n03627232": 616,
|
| 619 |
+
"n03630383": 617,
|
| 620 |
+
"n03633091": 618,
|
| 621 |
+
"n03637318": 619,
|
| 622 |
+
"n03642806": 620,
|
| 623 |
+
"n03649909": 621,
|
| 624 |
+
"n03657121": 622,
|
| 625 |
+
"n03658185": 623,
|
| 626 |
+
"n03661043": 624,
|
| 627 |
+
"n03662601": 625,
|
| 628 |
+
"n03666591": 626,
|
| 629 |
+
"n03670208": 627,
|
| 630 |
+
"n03673027": 628,
|
| 631 |
+
"n03676483": 629,
|
| 632 |
+
"n03680355": 630,
|
| 633 |
+
"n03690938": 631,
|
| 634 |
+
"n03691459": 632,
|
| 635 |
+
"n03692522": 633,
|
| 636 |
+
"n03697007": 634,
|
| 637 |
+
"n03706229": 635,
|
| 638 |
+
"n03709823": 636,
|
| 639 |
+
"n03710193": 637,
|
| 640 |
+
"n03710637": 638,
|
| 641 |
+
"n03710721": 639,
|
| 642 |
+
"n03717622": 640,
|
| 643 |
+
"n03720891": 641,
|
| 644 |
+
"n03721384": 642,
|
| 645 |
+
"n03724870": 643,
|
| 646 |
+
"n03729826": 644,
|
| 647 |
+
"n03733131": 645,
|
| 648 |
+
"n03733281": 646,
|
| 649 |
+
"n03733805": 647,
|
| 650 |
+
"n03742115": 648,
|
| 651 |
+
"n03743016": 649,
|
| 652 |
+
"n03759954": 650,
|
| 653 |
+
"n03761084": 651,
|
| 654 |
+
"n03763968": 652,
|
| 655 |
+
"n03764736": 653,
|
| 656 |
+
"n03769881": 654,
|
| 657 |
+
"n03770439": 655,
|
| 658 |
+
"n03770679": 656,
|
| 659 |
+
"n03773504": 657,
|
| 660 |
+
"n03775071": 658,
|
| 661 |
+
"n03775546": 659,
|
| 662 |
+
"n03776460": 660,
|
| 663 |
+
"n03777568": 661,
|
| 664 |
+
"n03777754": 662,
|
| 665 |
+
"n03781244": 663,
|
| 666 |
+
"n03782006": 664,
|
| 667 |
+
"n03785016": 665,
|
| 668 |
+
"n03786901": 666,
|
| 669 |
+
"n03787032": 667,
|
| 670 |
+
"n03788195": 668,
|
| 671 |
+
"n03788365": 669,
|
| 672 |
+
"n03791053": 670,
|
| 673 |
+
"n03792782": 671,
|
| 674 |
+
"n03792972": 672,
|
| 675 |
+
"n03793489": 673,
|
| 676 |
+
"n03794056": 674,
|
| 677 |
+
"n03796401": 675,
|
| 678 |
+
"n03803284": 676,
|
| 679 |
+
"n03804744": 677,
|
| 680 |
+
"n03814639": 678,
|
| 681 |
+
"n03814906": 679,
|
| 682 |
+
"n03825788": 680,
|
| 683 |
+
"n03832673": 681,
|
| 684 |
+
"n03837869": 682,
|
| 685 |
+
"n03838899": 683,
|
| 686 |
+
"n03840681": 684,
|
| 687 |
+
"n03841143": 685,
|
| 688 |
+
"n03843555": 686,
|
| 689 |
+
"n03854065": 687,
|
| 690 |
+
"n03857828": 688,
|
| 691 |
+
"n03866082": 689,
|
| 692 |
+
"n03868242": 690,
|
| 693 |
+
"n03868863": 691,
|
| 694 |
+
"n03871628": 692,
|
| 695 |
+
"n03873416": 693,
|
| 696 |
+
"n03874293": 694,
|
| 697 |
+
"n03874599": 695,
|
| 698 |
+
"n03876231": 696,
|
| 699 |
+
"n03877472": 697,
|
| 700 |
+
"n03877845": 698,
|
| 701 |
+
"n03884397": 699,
|
| 702 |
+
"n03887697": 700,
|
| 703 |
+
"n03888257": 701,
|
| 704 |
+
"n03888605": 702,
|
| 705 |
+
"n03891251": 703,
|
| 706 |
+
"n03891332": 704,
|
| 707 |
+
"n03895866": 705,
|
| 708 |
+
"n03899768": 706,
|
| 709 |
+
"n03902125": 707,
|
| 710 |
+
"n03903868": 708,
|
| 711 |
+
"n03908618": 709,
|
| 712 |
+
"n03908714": 710,
|
| 713 |
+
"n03916031": 711,
|
| 714 |
+
"n03920288": 712,
|
| 715 |
+
"n03924679": 713,
|
| 716 |
+
"n03929660": 714,
|
| 717 |
+
"n03929855": 715,
|
| 718 |
+
"n03930313": 716,
|
| 719 |
+
"n03930630": 717,
|
| 720 |
+
"n03933933": 718,
|
| 721 |
+
"n03935335": 719,
|
| 722 |
+
"n03937543": 720,
|
| 723 |
+
"n03938244": 721,
|
| 724 |
+
"n03942813": 722,
|
| 725 |
+
"n03944341": 723,
|
| 726 |
+
"n03947888": 724,
|
| 727 |
+
"n03950228": 725,
|
| 728 |
+
"n03954731": 726,
|
| 729 |
+
"n03956157": 727,
|
| 730 |
+
"n03958227": 728,
|
| 731 |
+
"n03961711": 729,
|
| 732 |
+
"n03967562": 730,
|
| 733 |
+
"n03970156": 731,
|
| 734 |
+
"n03976467": 732,
|
| 735 |
+
"n03976657": 733,
|
| 736 |
+
"n03977966": 734,
|
| 737 |
+
"n03980874": 735,
|
| 738 |
+
"n03982430": 736,
|
| 739 |
+
"n03983396": 737,
|
| 740 |
+
"n03991062": 738,
|
| 741 |
+
"n03992509": 739,
|
| 742 |
+
"n03995372": 740,
|
| 743 |
+
"n03998194": 741,
|
| 744 |
+
"n04004767": 742,
|
| 745 |
+
"n04005630": 743,
|
| 746 |
+
"n04008634": 744,
|
| 747 |
+
"n04009552": 745,
|
| 748 |
+
"n04019541": 746,
|
| 749 |
+
"n04023962": 747,
|
| 750 |
+
"n04026417": 748,
|
| 751 |
+
"n04033901": 749,
|
| 752 |
+
"n04033995": 750,
|
| 753 |
+
"n04037443": 751,
|
| 754 |
+
"n04039381": 752,
|
| 755 |
+
"n04040759": 753,
|
| 756 |
+
"n04041544": 754,
|
| 757 |
+
"n04044716": 755,
|
| 758 |
+
"n04049303": 756,
|
| 759 |
+
"n04065272": 757,
|
| 760 |
+
"n04067472": 758,
|
| 761 |
+
"n04069434": 759,
|
| 762 |
+
"n04070727": 760,
|
| 763 |
+
"n04074963": 761,
|
| 764 |
+
"n04081281": 762,
|
| 765 |
+
"n04086273": 763,
|
| 766 |
+
"n04090263": 764,
|
| 767 |
+
"n04099969": 765,
|
| 768 |
+
"n04111531": 766,
|
| 769 |
+
"n04116512": 767,
|
| 770 |
+
"n04118538": 768,
|
| 771 |
+
"n04118776": 769,
|
| 772 |
+
"n04120489": 770,
|
| 773 |
+
"n04125021": 771,
|
| 774 |
+
"n04127249": 772,
|
| 775 |
+
"n04131690": 773,
|
| 776 |
+
"n04133789": 774,
|
| 777 |
+
"n04136333": 775,
|
| 778 |
+
"n04141076": 776,
|
| 779 |
+
"n04141327": 777,
|
| 780 |
+
"n04141975": 778,
|
| 781 |
+
"n04146614": 779,
|
| 782 |
+
"n04147183": 780,
|
| 783 |
+
"n04149813": 781,
|
| 784 |
+
"n04152593": 782,
|
| 785 |
+
"n04153751": 783,
|
| 786 |
+
"n04154565": 784,
|
| 787 |
+
"n04162706": 785,
|
| 788 |
+
"n04179913": 786,
|
| 789 |
+
"n04192698": 787,
|
| 790 |
+
"n04200800": 788,
|
| 791 |
+
"n04201297": 789,
|
| 792 |
+
"n04204238": 790,
|
| 793 |
+
"n04204347": 791,
|
| 794 |
+
"n04208210": 792,
|
| 795 |
+
"n04209133": 793,
|
| 796 |
+
"n04209239": 794,
|
| 797 |
+
"n04228054": 795,
|
| 798 |
+
"n04229816": 796,
|
| 799 |
+
"n04235860": 797,
|
| 800 |
+
"n04238763": 798,
|
| 801 |
+
"n04239074": 799,
|
| 802 |
+
"n04243546": 800,
|
| 803 |
+
"n04251144": 801,
|
| 804 |
+
"n04252077": 802,
|
| 805 |
+
"n04252225": 803,
|
| 806 |
+
"n04254120": 804,
|
| 807 |
+
"n04254680": 805,
|
| 808 |
+
"n04254777": 806,
|
| 809 |
+
"n04258138": 807,
|
| 810 |
+
"n04259630": 808,
|
| 811 |
+
"n04263257": 809,
|
| 812 |
+
"n04264628": 810,
|
| 813 |
+
"n04265275": 811,
|
| 814 |
+
"n04266014": 812,
|
| 815 |
+
"n04270147": 813,
|
| 816 |
+
"n04273569": 814,
|
| 817 |
+
"n04275548": 815,
|
| 818 |
+
"n04277352": 816,
|
| 819 |
+
"n04285008": 817,
|
| 820 |
+
"n04286575": 818,
|
| 821 |
+
"n04296562": 819,
|
| 822 |
+
"n04310018": 820,
|
| 823 |
+
"n04311004": 821,
|
| 824 |
+
"n04311174": 822,
|
| 825 |
+
"n04317175": 823,
|
| 826 |
+
"n04325704": 824,
|
| 827 |
+
"n04326547": 825,
|
| 828 |
+
"n04328186": 826,
|
| 829 |
+
"n04330267": 827,
|
| 830 |
+
"n04332243": 828,
|
| 831 |
+
"n04335435": 829,
|
| 832 |
+
"n04336792": 830,
|
| 833 |
+
"n04344873": 831,
|
| 834 |
+
"n04346328": 832,
|
| 835 |
+
"n04347754": 833,
|
| 836 |
+
"n04350905": 834,
|
| 837 |
+
"n04355338": 835,
|
| 838 |
+
"n04355933": 836,
|
| 839 |
+
"n04356056": 837,
|
| 840 |
+
"n04357314": 838,
|
| 841 |
+
"n04366367": 839,
|
| 842 |
+
"n04367480": 840,
|
| 843 |
+
"n04370456": 841,
|
| 844 |
+
"n04371430": 842,
|
| 845 |
+
"n04371774": 843,
|
| 846 |
+
"n04372370": 844,
|
| 847 |
+
"n04376876": 845,
|
| 848 |
+
"n04380533": 846,
|
| 849 |
+
"n04389033": 847,
|
| 850 |
+
"n04392985": 848,
|
| 851 |
+
"n04398044": 849,
|
| 852 |
+
"n04399382": 850,
|
| 853 |
+
"n04404412": 851,
|
| 854 |
+
"n04409515": 852,
|
| 855 |
+
"n04417672": 853,
|
| 856 |
+
"n04418357": 854,
|
| 857 |
+
"n04423845": 855,
|
| 858 |
+
"n04428191": 856,
|
| 859 |
+
"n04429376": 857,
|
| 860 |
+
"n04435653": 858,
|
| 861 |
+
"n04442312": 859,
|
| 862 |
+
"n04443257": 860,
|
| 863 |
+
"n04447861": 861,
|
| 864 |
+
"n04456115": 862,
|
| 865 |
+
"n04458633": 863,
|
| 866 |
+
"n04461696": 864,
|
| 867 |
+
"n04462240": 865,
|
| 868 |
+
"n04465501": 866,
|
| 869 |
+
"n04467665": 867,
|
| 870 |
+
"n04476259": 868,
|
| 871 |
+
"n04479046": 869,
|
| 872 |
+
"n04482393": 870,
|
| 873 |
+
"n04483307": 871,
|
| 874 |
+
"n04485082": 872,
|
| 875 |
+
"n04486054": 873,
|
| 876 |
+
"n04487081": 874,
|
| 877 |
+
"n04487394": 875,
|
| 878 |
+
"n04493381": 876,
|
| 879 |
+
"n04501370": 877,
|
| 880 |
+
"n04505470": 878,
|
| 881 |
+
"n04507155": 879,
|
| 882 |
+
"n04509417": 880,
|
| 883 |
+
"n04515003": 881,
|
| 884 |
+
"n04517823": 882,
|
| 885 |
+
"n04522168": 883,
|
| 886 |
+
"n04523525": 884,
|
| 887 |
+
"n04525038": 885,
|
| 888 |
+
"n04525305": 886,
|
| 889 |
+
"n04532106": 887,
|
| 890 |
+
"n04532670": 888,
|
| 891 |
+
"n04536866": 889,
|
| 892 |
+
"n04540053": 890,
|
| 893 |
+
"n04542943": 891,
|
| 894 |
+
"n04548280": 892,
|
| 895 |
+
"n04548362": 893,
|
| 896 |
+
"n04550184": 894,
|
| 897 |
+
"n04552348": 895,
|
| 898 |
+
"n04553703": 896,
|
| 899 |
+
"n04554684": 897,
|
| 900 |
+
"n04557648": 898,
|
| 901 |
+
"n04560804": 899,
|
| 902 |
+
"n04562935": 900,
|
| 903 |
+
"n04579145": 901,
|
| 904 |
+
"n04579432": 902,
|
| 905 |
+
"n04584207": 903,
|
| 906 |
+
"n04589890": 904,
|
| 907 |
+
"n04590129": 905,
|
| 908 |
+
"n04591157": 906,
|
| 909 |
+
"n04591713": 907,
|
| 910 |
+
"n04592741": 908,
|
| 911 |
+
"n04596742": 909,
|
| 912 |
+
"n04597913": 910,
|
| 913 |
+
"n04599235": 911,
|
| 914 |
+
"n04604644": 912,
|
| 915 |
+
"n04606251": 913,
|
| 916 |
+
"n04612504": 914,
|
| 917 |
+
"n04613696": 915,
|
| 918 |
+
"n06359193": 916,
|
| 919 |
+
"n06596364": 917,
|
| 920 |
+
"n06785654": 918,
|
| 921 |
+
"n06794110": 919,
|
| 922 |
+
"n06874185": 920,
|
| 923 |
+
"n07248320": 921,
|
| 924 |
+
"n07565083": 922,
|
| 925 |
+
"n07579787": 923,
|
| 926 |
+
"n07583066": 924,
|
| 927 |
+
"n07584110": 925,
|
| 928 |
+
"n07590611": 926,
|
| 929 |
+
"n07613480": 927,
|
| 930 |
+
"n07614500": 928,
|
| 931 |
+
"n07615774": 929,
|
| 932 |
+
"n07684084": 930,
|
| 933 |
+
"n07693725": 931,
|
| 934 |
+
"n07695742": 932,
|
| 935 |
+
"n07697313": 933,
|
| 936 |
+
"n07697537": 934,
|
| 937 |
+
"n07711569": 935,
|
| 938 |
+
"n07714571": 936,
|
| 939 |
+
"n07714990": 937,
|
| 940 |
+
"n07715103": 938,
|
| 941 |
+
"n07716358": 939,
|
| 942 |
+
"n07716906": 940,
|
| 943 |
+
"n07717410": 941,
|
| 944 |
+
"n07717556": 942,
|
| 945 |
+
"n07718472": 943,
|
| 946 |
+
"n07718747": 944,
|
| 947 |
+
"n07720875": 945,
|
| 948 |
+
"n07730033": 946,
|
| 949 |
+
"n07734744": 947,
|
| 950 |
+
"n07742313": 948,
|
| 951 |
+
"n07745940": 949,
|
| 952 |
+
"n07747607": 950,
|
| 953 |
+
"n07749582": 951,
|
| 954 |
+
"n07753113": 952,
|
| 955 |
+
"n07753275": 953,
|
| 956 |
+
"n07753592": 954,
|
| 957 |
+
"n07754684": 955,
|
| 958 |
+
"n07760859": 956,
|
| 959 |
+
"n07768694": 957,
|
| 960 |
+
"n07802026": 958,
|
| 961 |
+
"n07831146": 959,
|
| 962 |
+
"n07836838": 960,
|
| 963 |
+
"n07860988": 961,
|
| 964 |
+
"n07871810": 962,
|
| 965 |
+
"n07873807": 963,
|
| 966 |
+
"n07875152": 964,
|
| 967 |
+
"n07880968": 965,
|
| 968 |
+
"n07892512": 966,
|
| 969 |
+
"n07920052": 967,
|
| 970 |
+
"n07930864": 968,
|
| 971 |
+
"n07932039": 969,
|
| 972 |
+
"n09193705": 970,
|
| 973 |
+
"n09229709": 971,
|
| 974 |
+
"n09246464": 972,
|
| 975 |
+
"n09256479": 973,
|
| 976 |
+
"n09288635": 974,
|
| 977 |
+
"n09332890": 975,
|
| 978 |
+
"n09399592": 976,
|
| 979 |
+
"n09421951": 977,
|
| 980 |
+
"n09428293": 978,
|
| 981 |
+
"n09468604": 979,
|
| 982 |
+
"n09472597": 980,
|
| 983 |
+
"n09835506": 981,
|
| 984 |
+
"n10148035": 982,
|
| 985 |
+
"n10565667": 983,
|
| 986 |
+
"n11879895": 984,
|
| 987 |
+
"n11939491": 985,
|
| 988 |
+
"n12057211": 986,
|
| 989 |
+
"n12144580": 987,
|
| 990 |
+
"n12267677": 988,
|
| 991 |
+
"n12620546": 989,
|
| 992 |
+
"n12768682": 990,
|
| 993 |
+
"n12985857": 991,
|
| 994 |
+
"n12998815": 992,
|
| 995 |
+
"n13037406": 993,
|
| 996 |
+
"n13040303": 994,
|
| 997 |
+
"n13044778": 995,
|
| 998 |
+
"n13052670": 996,
|
| 999 |
+
"n13054560": 997,
|
| 1000 |
+
"n13133613": 998,
|
| 1001 |
+
"n15075141": 999
|
| 1002 |
+
}
|
InternVL/classification/meta_data/map22kto1k.txt
ADDED
|
@@ -0,0 +1,1000 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
359
|
| 2 |
+
368
|
| 3 |
+
460
|
| 4 |
+
475
|
| 5 |
+
486
|
| 6 |
+
492
|
| 7 |
+
496
|
| 8 |
+
514
|
| 9 |
+
516
|
| 10 |
+
525
|
| 11 |
+
547
|
| 12 |
+
548
|
| 13 |
+
556
|
| 14 |
+
563
|
| 15 |
+
575
|
| 16 |
+
641
|
| 17 |
+
648
|
| 18 |
+
723
|
| 19 |
+
733
|
| 20 |
+
765
|
| 21 |
+
801
|
| 22 |
+
826
|
| 23 |
+
852
|
| 24 |
+
858
|
| 25 |
+
878
|
| 26 |
+
896
|
| 27 |
+
900
|
| 28 |
+
905
|
| 29 |
+
908
|
| 30 |
+
910
|
| 31 |
+
935
|
| 32 |
+
946
|
| 33 |
+
947
|
| 34 |
+
994
|
| 35 |
+
999
|
| 36 |
+
1003
|
| 37 |
+
1005
|
| 38 |
+
1010
|
| 39 |
+
1027
|
| 40 |
+
1029
|
| 41 |
+
1048
|
| 42 |
+
1055
|
| 43 |
+
1064
|
| 44 |
+
1065
|
| 45 |
+
1069
|
| 46 |
+
1075
|
| 47 |
+
1079
|
| 48 |
+
1081
|
| 49 |
+
1085
|
| 50 |
+
1088
|
| 51 |
+
1093
|
| 52 |
+
1106
|
| 53 |
+
1143
|
| 54 |
+
1144
|
| 55 |
+
1145
|
| 56 |
+
1147
|
| 57 |
+
1168
|
| 58 |
+
1171
|
| 59 |
+
1178
|
| 60 |
+
1187
|
| 61 |
+
1190
|
| 62 |
+
1197
|
| 63 |
+
1205
|
| 64 |
+
1216
|
| 65 |
+
1223
|
| 66 |
+
1230
|
| 67 |
+
1236
|
| 68 |
+
1241
|
| 69 |
+
1245
|
| 70 |
+
1257
|
| 71 |
+
1259
|
| 72 |
+
1260
|
| 73 |
+
1267
|
| 74 |
+
1268
|
| 75 |
+
1269
|
| 76 |
+
1271
|
| 77 |
+
1272
|
| 78 |
+
1273
|
| 79 |
+
1277
|
| 80 |
+
1303
|
| 81 |
+
1344
|
| 82 |
+
1349
|
| 83 |
+
1355
|
| 84 |
+
1357
|
| 85 |
+
1384
|
| 86 |
+
1388
|
| 87 |
+
1391
|
| 88 |
+
1427
|
| 89 |
+
1429
|
| 90 |
+
1432
|
| 91 |
+
1437
|
| 92 |
+
1450
|
| 93 |
+
1461
|
| 94 |
+
1462
|
| 95 |
+
1474
|
| 96 |
+
1502
|
| 97 |
+
1503
|
| 98 |
+
1512
|
| 99 |
+
1552
|
| 100 |
+
1555
|
| 101 |
+
1577
|
| 102 |
+
1584
|
| 103 |
+
1587
|
| 104 |
+
1589
|
| 105 |
+
1599
|
| 106 |
+
1615
|
| 107 |
+
1616
|
| 108 |
+
1681
|
| 109 |
+
1692
|
| 110 |
+
1701
|
| 111 |
+
1716
|
| 112 |
+
1729
|
| 113 |
+
1757
|
| 114 |
+
1759
|
| 115 |
+
1764
|
| 116 |
+
1777
|
| 117 |
+
1786
|
| 118 |
+
1822
|
| 119 |
+
1841
|
| 120 |
+
1842
|
| 121 |
+
1848
|
| 122 |
+
1850
|
| 123 |
+
1856
|
| 124 |
+
1860
|
| 125 |
+
1861
|
| 126 |
+
1864
|
| 127 |
+
1876
|
| 128 |
+
1897
|
| 129 |
+
1898
|
| 130 |
+
1910
|
| 131 |
+
1913
|
| 132 |
+
1918
|
| 133 |
+
1922
|
| 134 |
+
1928
|
| 135 |
+
1932
|
| 136 |
+
1935
|
| 137 |
+
1947
|
| 138 |
+
1951
|
| 139 |
+
1953
|
| 140 |
+
1970
|
| 141 |
+
1977
|
| 142 |
+
1979
|
| 143 |
+
2001
|
| 144 |
+
2017
|
| 145 |
+
2067
|
| 146 |
+
2081
|
| 147 |
+
2087
|
| 148 |
+
2112
|
| 149 |
+
2128
|
| 150 |
+
2135
|
| 151 |
+
2147
|
| 152 |
+
2174
|
| 153 |
+
2175
|
| 154 |
+
2176
|
| 155 |
+
2177
|
| 156 |
+
2178
|
| 157 |
+
2181
|
| 158 |
+
2183
|
| 159 |
+
2184
|
| 160 |
+
2187
|
| 161 |
+
2189
|
| 162 |
+
2190
|
| 163 |
+
2191
|
| 164 |
+
2192
|
| 165 |
+
2193
|
| 166 |
+
2197
|
| 167 |
+
2202
|
| 168 |
+
2203
|
| 169 |
+
2206
|
| 170 |
+
2208
|
| 171 |
+
2209
|
| 172 |
+
2211
|
| 173 |
+
2212
|
| 174 |
+
2213
|
| 175 |
+
2214
|
| 176 |
+
2215
|
| 177 |
+
2216
|
| 178 |
+
2217
|
| 179 |
+
2219
|
| 180 |
+
2222
|
| 181 |
+
2223
|
| 182 |
+
2224
|
| 183 |
+
2225
|
| 184 |
+
2226
|
| 185 |
+
2227
|
| 186 |
+
2228
|
| 187 |
+
2229
|
| 188 |
+
2230
|
| 189 |
+
2236
|
| 190 |
+
2238
|
| 191 |
+
2240
|
| 192 |
+
2241
|
| 193 |
+
2242
|
| 194 |
+
2243
|
| 195 |
+
2244
|
| 196 |
+
2245
|
| 197 |
+
2247
|
| 198 |
+
2248
|
| 199 |
+
2249
|
| 200 |
+
2250
|
| 201 |
+
2251
|
| 202 |
+
2252
|
| 203 |
+
2255
|
| 204 |
+
2256
|
| 205 |
+
2257
|
| 206 |
+
2262
|
| 207 |
+
2263
|
| 208 |
+
2264
|
| 209 |
+
2265
|
| 210 |
+
2266
|
| 211 |
+
2268
|
| 212 |
+
2270
|
| 213 |
+
2271
|
| 214 |
+
2272
|
| 215 |
+
2273
|
| 216 |
+
2275
|
| 217 |
+
2276
|
| 218 |
+
2279
|
| 219 |
+
2280
|
| 220 |
+
2281
|
| 221 |
+
2282
|
| 222 |
+
2285
|
| 223 |
+
2289
|
| 224 |
+
2292
|
| 225 |
+
2295
|
| 226 |
+
2296
|
| 227 |
+
2297
|
| 228 |
+
2298
|
| 229 |
+
2299
|
| 230 |
+
2300
|
| 231 |
+
2301
|
| 232 |
+
2302
|
| 233 |
+
2303
|
| 234 |
+
2304
|
| 235 |
+
2305
|
| 236 |
+
2306
|
| 237 |
+
2309
|
| 238 |
+
2310
|
| 239 |
+
2312
|
| 240 |
+
2313
|
| 241 |
+
2314
|
| 242 |
+
2315
|
| 243 |
+
2316
|
| 244 |
+
2318
|
| 245 |
+
2319
|
| 246 |
+
2321
|
| 247 |
+
2322
|
| 248 |
+
2326
|
| 249 |
+
2329
|
| 250 |
+
2330
|
| 251 |
+
2331
|
| 252 |
+
2332
|
| 253 |
+
2334
|
| 254 |
+
2335
|
| 255 |
+
2336
|
| 256 |
+
2337
|
| 257 |
+
2338
|
| 258 |
+
2339
|
| 259 |
+
2341
|
| 260 |
+
2342
|
| 261 |
+
2343
|
| 262 |
+
2344
|
| 263 |
+
2346
|
| 264 |
+
2348
|
| 265 |
+
2349
|
| 266 |
+
2351
|
| 267 |
+
2352
|
| 268 |
+
2353
|
| 269 |
+
2355
|
| 270 |
+
2357
|
| 271 |
+
2358
|
| 272 |
+
2359
|
| 273 |
+
2360
|
| 274 |
+
2364
|
| 275 |
+
2365
|
| 276 |
+
2368
|
| 277 |
+
2369
|
| 278 |
+
2377
|
| 279 |
+
2382
|
| 280 |
+
2383
|
| 281 |
+
2385
|
| 282 |
+
2397
|
| 283 |
+
2398
|
| 284 |
+
2400
|
| 285 |
+
2402
|
| 286 |
+
2405
|
| 287 |
+
2412
|
| 288 |
+
2421
|
| 289 |
+
2428
|
| 290 |
+
2431
|
| 291 |
+
2432
|
| 292 |
+
2433
|
| 293 |
+
2436
|
| 294 |
+
2441
|
| 295 |
+
2445
|
| 296 |
+
2450
|
| 297 |
+
2453
|
| 298 |
+
2454
|
| 299 |
+
2465
|
| 300 |
+
2469
|
| 301 |
+
2532
|
| 302 |
+
2533
|
| 303 |
+
2538
|
| 304 |
+
2544
|
| 305 |
+
2547
|
| 306 |
+
2557
|
| 307 |
+
2565
|
| 308 |
+
2578
|
| 309 |
+
2612
|
| 310 |
+
2658
|
| 311 |
+
2702
|
| 312 |
+
2722
|
| 313 |
+
2731
|
| 314 |
+
2738
|
| 315 |
+
2741
|
| 316 |
+
2747
|
| 317 |
+
2810
|
| 318 |
+
2818
|
| 319 |
+
2833
|
| 320 |
+
2844
|
| 321 |
+
2845
|
| 322 |
+
2867
|
| 323 |
+
2874
|
| 324 |
+
2882
|
| 325 |
+
2884
|
| 326 |
+
2888
|
| 327 |
+
2889
|
| 328 |
+
3008
|
| 329 |
+
3012
|
| 330 |
+
3019
|
| 331 |
+
3029
|
| 332 |
+
3033
|
| 333 |
+
3042
|
| 334 |
+
3091
|
| 335 |
+
3106
|
| 336 |
+
3138
|
| 337 |
+
3159
|
| 338 |
+
3164
|
| 339 |
+
3169
|
| 340 |
+
3280
|
| 341 |
+
3296
|
| 342 |
+
3311
|
| 343 |
+
3318
|
| 344 |
+
3320
|
| 345 |
+
3324
|
| 346 |
+
3330
|
| 347 |
+
3366
|
| 348 |
+
3375
|
| 349 |
+
3381
|
| 350 |
+
3406
|
| 351 |
+
3419
|
| 352 |
+
3432
|
| 353 |
+
3434
|
| 354 |
+
3435
|
| 355 |
+
3493
|
| 356 |
+
3495
|
| 357 |
+
3503
|
| 358 |
+
3509
|
| 359 |
+
3511
|
| 360 |
+
3513
|
| 361 |
+
3517
|
| 362 |
+
3521
|
| 363 |
+
3526
|
| 364 |
+
3546
|
| 365 |
+
3554
|
| 366 |
+
3600
|
| 367 |
+
3601
|
| 368 |
+
3606
|
| 369 |
+
3612
|
| 370 |
+
3613
|
| 371 |
+
3616
|
| 372 |
+
3622
|
| 373 |
+
3623
|
| 374 |
+
3627
|
| 375 |
+
3632
|
| 376 |
+
3634
|
| 377 |
+
3636
|
| 378 |
+
3638
|
| 379 |
+
3644
|
| 380 |
+
3646
|
| 381 |
+
3649
|
| 382 |
+
3650
|
| 383 |
+
3651
|
| 384 |
+
3656
|
| 385 |
+
3663
|
| 386 |
+
3673
|
| 387 |
+
3674
|
| 388 |
+
3689
|
| 389 |
+
3690
|
| 390 |
+
3702
|
| 391 |
+
3733
|
| 392 |
+
3769
|
| 393 |
+
3971
|
| 394 |
+
3974
|
| 395 |
+
4065
|
| 396 |
+
4068
|
| 397 |
+
4073
|
| 398 |
+
4102
|
| 399 |
+
4136
|
| 400 |
+
4140
|
| 401 |
+
4151
|
| 402 |
+
4159
|
| 403 |
+
4165
|
| 404 |
+
4207
|
| 405 |
+
4219
|
| 406 |
+
4226
|
| 407 |
+
4249
|
| 408 |
+
4256
|
| 409 |
+
4263
|
| 410 |
+
4270
|
| 411 |
+
4313
|
| 412 |
+
4321
|
| 413 |
+
4378
|
| 414 |
+
4386
|
| 415 |
+
4478
|
| 416 |
+
4508
|
| 417 |
+
4512
|
| 418 |
+
4536
|
| 419 |
+
4542
|
| 420 |
+
4550
|
| 421 |
+
4560
|
| 422 |
+
4562
|
| 423 |
+
4570
|
| 424 |
+
4571
|
| 425 |
+
4572
|
| 426 |
+
4583
|
| 427 |
+
4588
|
| 428 |
+
4594
|
| 429 |
+
4604
|
| 430 |
+
4608
|
| 431 |
+
4623
|
| 432 |
+
4634
|
| 433 |
+
4636
|
| 434 |
+
4646
|
| 435 |
+
4651
|
| 436 |
+
4652
|
| 437 |
+
4686
|
| 438 |
+
4688
|
| 439 |
+
4691
|
| 440 |
+
4699
|
| 441 |
+
4724
|
| 442 |
+
4727
|
| 443 |
+
4737
|
| 444 |
+
4770
|
| 445 |
+
4774
|
| 446 |
+
4789
|
| 447 |
+
4802
|
| 448 |
+
4807
|
| 449 |
+
4819
|
| 450 |
+
4880
|
| 451 |
+
4886
|
| 452 |
+
4908
|
| 453 |
+
4927
|
| 454 |
+
4931
|
| 455 |
+
4936
|
| 456 |
+
4964
|
| 457 |
+
4976
|
| 458 |
+
4993
|
| 459 |
+
5028
|
| 460 |
+
5033
|
| 461 |
+
5043
|
| 462 |
+
5046
|
| 463 |
+
5096
|
| 464 |
+
5111
|
| 465 |
+
5114
|
| 466 |
+
5131
|
| 467 |
+
5132
|
| 468 |
+
5183
|
| 469 |
+
5199
|
| 470 |
+
5235
|
| 471 |
+
5275
|
| 472 |
+
5291
|
| 473 |
+
5293
|
| 474 |
+
5294
|
| 475 |
+
5343
|
| 476 |
+
5360
|
| 477 |
+
5362
|
| 478 |
+
5364
|
| 479 |
+
5390
|
| 480 |
+
5402
|
| 481 |
+
5418
|
| 482 |
+
5428
|
| 483 |
+
5430
|
| 484 |
+
5437
|
| 485 |
+
5443
|
| 486 |
+
5473
|
| 487 |
+
5484
|
| 488 |
+
5486
|
| 489 |
+
5505
|
| 490 |
+
5507
|
| 491 |
+
5508
|
| 492 |
+
5510
|
| 493 |
+
5567
|
| 494 |
+
5578
|
| 495 |
+
5580
|
| 496 |
+
5584
|
| 497 |
+
5606
|
| 498 |
+
5613
|
| 499 |
+
5629
|
| 500 |
+
5672
|
| 501 |
+
5676
|
| 502 |
+
5692
|
| 503 |
+
5701
|
| 504 |
+
5760
|
| 505 |
+
5769
|
| 506 |
+
5770
|
| 507 |
+
5779
|
| 508 |
+
5814
|
| 509 |
+
5850
|
| 510 |
+
5871
|
| 511 |
+
5893
|
| 512 |
+
5911
|
| 513 |
+
5949
|
| 514 |
+
5954
|
| 515 |
+
6005
|
| 516 |
+
6006
|
| 517 |
+
6012
|
| 518 |
+
6017
|
| 519 |
+
6023
|
| 520 |
+
6024
|
| 521 |
+
6040
|
| 522 |
+
6050
|
| 523 |
+
6054
|
| 524 |
+
6087
|
| 525 |
+
6105
|
| 526 |
+
6157
|
| 527 |
+
6235
|
| 528 |
+
6237
|
| 529 |
+
6256
|
| 530 |
+
6259
|
| 531 |
+
6286
|
| 532 |
+
6291
|
| 533 |
+
6306
|
| 534 |
+
6339
|
| 535 |
+
6341
|
| 536 |
+
6343
|
| 537 |
+
6379
|
| 538 |
+
6383
|
| 539 |
+
6393
|
| 540 |
+
6405
|
| 541 |
+
6479
|
| 542 |
+
6511
|
| 543 |
+
6517
|
| 544 |
+
6541
|
| 545 |
+
6561
|
| 546 |
+
6608
|
| 547 |
+
6611
|
| 548 |
+
6615
|
| 549 |
+
6678
|
| 550 |
+
6682
|
| 551 |
+
6707
|
| 552 |
+
6752
|
| 553 |
+
6798
|
| 554 |
+
6850
|
| 555 |
+
6880
|
| 556 |
+
6885
|
| 557 |
+
6890
|
| 558 |
+
6920
|
| 559 |
+
6981
|
| 560 |
+
7000
|
| 561 |
+
7009
|
| 562 |
+
7038
|
| 563 |
+
7049
|
| 564 |
+
7050
|
| 565 |
+
7052
|
| 566 |
+
7073
|
| 567 |
+
7078
|
| 568 |
+
7098
|
| 569 |
+
7111
|
| 570 |
+
7165
|
| 571 |
+
7198
|
| 572 |
+
7204
|
| 573 |
+
7280
|
| 574 |
+
7283
|
| 575 |
+
7286
|
| 576 |
+
7287
|
| 577 |
+
7293
|
| 578 |
+
7294
|
| 579 |
+
7305
|
| 580 |
+
7318
|
| 581 |
+
7341
|
| 582 |
+
7346
|
| 583 |
+
7354
|
| 584 |
+
7382
|
| 585 |
+
7427
|
| 586 |
+
7428
|
| 587 |
+
7435
|
| 588 |
+
7445
|
| 589 |
+
7450
|
| 590 |
+
7455
|
| 591 |
+
7467
|
| 592 |
+
7469
|
| 593 |
+
7497
|
| 594 |
+
7502
|
| 595 |
+
7506
|
| 596 |
+
7514
|
| 597 |
+
7523
|
| 598 |
+
7651
|
| 599 |
+
7661
|
| 600 |
+
7664
|
| 601 |
+
7672
|
| 602 |
+
7679
|
| 603 |
+
7685
|
| 604 |
+
7696
|
| 605 |
+
7730
|
| 606 |
+
7871
|
| 607 |
+
7873
|
| 608 |
+
7895
|
| 609 |
+
7914
|
| 610 |
+
7915
|
| 611 |
+
7920
|
| 612 |
+
7934
|
| 613 |
+
7935
|
| 614 |
+
7949
|
| 615 |
+
8009
|
| 616 |
+
8036
|
| 617 |
+
8051
|
| 618 |
+
8065
|
| 619 |
+
8074
|
| 620 |
+
8090
|
| 621 |
+
8112
|
| 622 |
+
8140
|
| 623 |
+
8164
|
| 624 |
+
8168
|
| 625 |
+
8178
|
| 626 |
+
8182
|
| 627 |
+
8198
|
| 628 |
+
8212
|
| 629 |
+
8216
|
| 630 |
+
8230
|
| 631 |
+
8242
|
| 632 |
+
8288
|
| 633 |
+
8289
|
| 634 |
+
8295
|
| 635 |
+
8318
|
| 636 |
+
8352
|
| 637 |
+
8368
|
| 638 |
+
8371
|
| 639 |
+
8375
|
| 640 |
+
8376
|
| 641 |
+
8401
|
| 642 |
+
8416
|
| 643 |
+
8419
|
| 644 |
+
8436
|
| 645 |
+
8460
|
| 646 |
+
8477
|
| 647 |
+
8478
|
| 648 |
+
8482
|
| 649 |
+
8498
|
| 650 |
+
8500
|
| 651 |
+
8539
|
| 652 |
+
8543
|
| 653 |
+
8552
|
| 654 |
+
8555
|
| 655 |
+
8580
|
| 656 |
+
8584
|
| 657 |
+
8586
|
| 658 |
+
8594
|
| 659 |
+
8598
|
| 660 |
+
8601
|
| 661 |
+
8606
|
| 662 |
+
8610
|
| 663 |
+
8611
|
| 664 |
+
8622
|
| 665 |
+
8627
|
| 666 |
+
8639
|
| 667 |
+
8649
|
| 668 |
+
8650
|
| 669 |
+
8653
|
| 670 |
+
8654
|
| 671 |
+
8667
|
| 672 |
+
8672
|
| 673 |
+
8673
|
| 674 |
+
8674
|
| 675 |
+
8676
|
| 676 |
+
8684
|
| 677 |
+
8720
|
| 678 |
+
8723
|
| 679 |
+
8750
|
| 680 |
+
8753
|
| 681 |
+
8801
|
| 682 |
+
8815
|
| 683 |
+
8831
|
| 684 |
+
8835
|
| 685 |
+
8842
|
| 686 |
+
8845
|
| 687 |
+
8858
|
| 688 |
+
8897
|
| 689 |
+
8916
|
| 690 |
+
8951
|
| 691 |
+
8954
|
| 692 |
+
8959
|
| 693 |
+
8970
|
| 694 |
+
8976
|
| 695 |
+
8981
|
| 696 |
+
8983
|
| 697 |
+
8989
|
| 698 |
+
8991
|
| 699 |
+
8993
|
| 700 |
+
9019
|
| 701 |
+
9039
|
| 702 |
+
9042
|
| 703 |
+
9043
|
| 704 |
+
9056
|
| 705 |
+
9057
|
| 706 |
+
9070
|
| 707 |
+
9087
|
| 708 |
+
9098
|
| 709 |
+
9106
|
| 710 |
+
9130
|
| 711 |
+
9131
|
| 712 |
+
9155
|
| 713 |
+
9171
|
| 714 |
+
9183
|
| 715 |
+
9198
|
| 716 |
+
9199
|
| 717 |
+
9201
|
| 718 |
+
9204
|
| 719 |
+
9211
|
| 720 |
+
9220
|
| 721 |
+
9224
|
| 722 |
+
9228
|
| 723 |
+
9249
|
| 724 |
+
9259
|
| 725 |
+
9270
|
| 726 |
+
9278
|
| 727 |
+
9294
|
| 728 |
+
9299
|
| 729 |
+
9309
|
| 730 |
+
9321
|
| 731 |
+
9344
|
| 732 |
+
9351
|
| 733 |
+
9375
|
| 734 |
+
9376
|
| 735 |
+
9381
|
| 736 |
+
9391
|
| 737 |
+
9400
|
| 738 |
+
9404
|
| 739 |
+
9440
|
| 740 |
+
9448
|
| 741 |
+
9463
|
| 742 |
+
9474
|
| 743 |
+
9501
|
| 744 |
+
9504
|
| 745 |
+
9513
|
| 746 |
+
9514
|
| 747 |
+
9544
|
| 748 |
+
9566
|
| 749 |
+
9575
|
| 750 |
+
9607
|
| 751 |
+
9608
|
| 752 |
+
9623
|
| 753 |
+
9632
|
| 754 |
+
9638
|
| 755 |
+
9642
|
| 756 |
+
9655
|
| 757 |
+
9673
|
| 758 |
+
9739
|
| 759 |
+
9751
|
| 760 |
+
9759
|
| 761 |
+
9766
|
| 762 |
+
9777
|
| 763 |
+
9801
|
| 764 |
+
9819
|
| 765 |
+
9838
|
| 766 |
+
9878
|
| 767 |
+
9923
|
| 768 |
+
9955
|
| 769 |
+
9960
|
| 770 |
+
9962
|
| 771 |
+
9969
|
| 772 |
+
9996
|
| 773 |
+
10009
|
| 774 |
+
10030
|
| 775 |
+
10039
|
| 776 |
+
10051
|
| 777 |
+
10072
|
| 778 |
+
10074
|
| 779 |
+
10077
|
| 780 |
+
10093
|
| 781 |
+
10096
|
| 782 |
+
10108
|
| 783 |
+
10117
|
| 784 |
+
10120
|
| 785 |
+
10123
|
| 786 |
+
10157
|
| 787 |
+
10225
|
| 788 |
+
10275
|
| 789 |
+
10303
|
| 790 |
+
10306
|
| 791 |
+
10313
|
| 792 |
+
10314
|
| 793 |
+
10331
|
| 794 |
+
10336
|
| 795 |
+
10337
|
| 796 |
+
10412
|
| 797 |
+
10422
|
| 798 |
+
10450
|
| 799 |
+
10462
|
| 800 |
+
10464
|
| 801 |
+
10486
|
| 802 |
+
10518
|
| 803 |
+
10521
|
| 804 |
+
10522
|
| 805 |
+
10531
|
| 806 |
+
10533
|
| 807 |
+
10534
|
| 808 |
+
10550
|
| 809 |
+
10558
|
| 810 |
+
10573
|
| 811 |
+
10582
|
| 812 |
+
10585
|
| 813 |
+
10588
|
| 814 |
+
10611
|
| 815 |
+
10625
|
| 816 |
+
10634
|
| 817 |
+
10637
|
| 818 |
+
10676
|
| 819 |
+
10682
|
| 820 |
+
10725
|
| 821 |
+
10775
|
| 822 |
+
10781
|
| 823 |
+
10782
|
| 824 |
+
10806
|
| 825 |
+
10836
|
| 826 |
+
10839
|
| 827 |
+
10847
|
| 828 |
+
10858
|
| 829 |
+
10870
|
| 830 |
+
10880
|
| 831 |
+
10883
|
| 832 |
+
10907
|
| 833 |
+
10913
|
| 834 |
+
10920
|
| 835 |
+
10935
|
| 836 |
+
10946
|
| 837 |
+
10950
|
| 838 |
+
10951
|
| 839 |
+
10956
|
| 840 |
+
10998
|
| 841 |
+
11002
|
| 842 |
+
11017
|
| 843 |
+
11022
|
| 844 |
+
11024
|
| 845 |
+
11026
|
| 846 |
+
11044
|
| 847 |
+
11054
|
| 848 |
+
11094
|
| 849 |
+
11109
|
| 850 |
+
11136
|
| 851 |
+
11136
|
| 852 |
+
11167
|
| 853 |
+
11185
|
| 854 |
+
11220
|
| 855 |
+
11222
|
| 856 |
+
11241
|
| 857 |
+
11254
|
| 858 |
+
11258
|
| 859 |
+
11278
|
| 860 |
+
11305
|
| 861 |
+
11310
|
| 862 |
+
11330
|
| 863 |
+
11366
|
| 864 |
+
11376
|
| 865 |
+
11388
|
| 866 |
+
11391
|
| 867 |
+
11400
|
| 868 |
+
11406
|
| 869 |
+
11436
|
| 870 |
+
11448
|
| 871 |
+
11465
|
| 872 |
+
11468
|
| 873 |
+
11472
|
| 874 |
+
11477
|
| 875 |
+
11482
|
| 876 |
+
11483
|
| 877 |
+
11506
|
| 878 |
+
11535
|
| 879 |
+
11557
|
| 880 |
+
11565
|
| 881 |
+
11574
|
| 882 |
+
11583
|
| 883 |
+
11593
|
| 884 |
+
11610
|
| 885 |
+
11611
|
| 886 |
+
11618
|
| 887 |
+
11620
|
| 888 |
+
11639
|
| 889 |
+
11642
|
| 890 |
+
11663
|
| 891 |
+
11673
|
| 892 |
+
11688
|
| 893 |
+
11708
|
| 894 |
+
11709
|
| 895 |
+
11715
|
| 896 |
+
11720
|
| 897 |
+
11725
|
| 898 |
+
11728
|
| 899 |
+
11742
|
| 900 |
+
11759
|
| 901 |
+
11770
|
| 902 |
+
11836
|
| 903 |
+
11838
|
| 904 |
+
11855
|
| 905 |
+
11875
|
| 906 |
+
11877
|
| 907 |
+
11883
|
| 908 |
+
11888
|
| 909 |
+
11895
|
| 910 |
+
11916
|
| 911 |
+
11922
|
| 912 |
+
11929
|
| 913 |
+
11943
|
| 914 |
+
11951
|
| 915 |
+
11979
|
| 916 |
+
11983
|
| 917 |
+
12213
|
| 918 |
+
12228
|
| 919 |
+
12238
|
| 920 |
+
12240
|
| 921 |
+
12241
|
| 922 |
+
12246
|
| 923 |
+
12282
|
| 924 |
+
12348
|
| 925 |
+
12368
|
| 926 |
+
12372
|
| 927 |
+
12421
|
| 928 |
+
12559
|
| 929 |
+
12565
|
| 930 |
+
12574
|
| 931 |
+
12687
|
| 932 |
+
12754
|
| 933 |
+
12767
|
| 934 |
+
12777
|
| 935 |
+
12779
|
| 936 |
+
12811
|
| 937 |
+
12831
|
| 938 |
+
12834
|
| 939 |
+
12835
|
| 940 |
+
12842
|
| 941 |
+
12846
|
| 942 |
+
12848
|
| 943 |
+
12849
|
| 944 |
+
12855
|
| 945 |
+
12857
|
| 946 |
+
12872
|
| 947 |
+
12937
|
| 948 |
+
12970
|
| 949 |
+
13016
|
| 950 |
+
13037
|
| 951 |
+
13045
|
| 952 |
+
13058
|
| 953 |
+
13084
|
| 954 |
+
13085
|
| 955 |
+
13087
|
| 956 |
+
13093
|
| 957 |
+
13133
|
| 958 |
+
13181
|
| 959 |
+
13229
|
| 960 |
+
13405
|
| 961 |
+
13443
|
| 962 |
+
13613
|
| 963 |
+
13689
|
| 964 |
+
13697
|
| 965 |
+
13708
|
| 966 |
+
13748
|
| 967 |
+
13803
|
| 968 |
+
13981
|
| 969 |
+
14050
|
| 970 |
+
14058
|
| 971 |
+
14218
|
| 972 |
+
14245
|
| 973 |
+
14255
|
| 974 |
+
14263
|
| 975 |
+
14293
|
| 976 |
+
14323
|
| 977 |
+
14366
|
| 978 |
+
14388
|
| 979 |
+
14393
|
| 980 |
+
14437
|
| 981 |
+
14441
|
| 982 |
+
14964
|
| 983 |
+
15730
|
| 984 |
+
16742
|
| 985 |
+
18035
|
| 986 |
+
18203
|
| 987 |
+
18533
|
| 988 |
+
18790
|
| 989 |
+
19100
|
| 990 |
+
20017
|
| 991 |
+
20460
|
| 992 |
+
21024
|
| 993 |
+
21043
|
| 994 |
+
21161
|
| 995 |
+
21169
|
| 996 |
+
21179
|
| 997 |
+
21194
|
| 998 |
+
21198
|
| 999 |
+
21367
|
| 1000 |
+
21815
|
InternVL/classification/meta_data/real.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
InternVL/classification/models/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from .build import build_model
|
InternVL/classification/models/build.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
from .intern_vit_6b import InternViT6B
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def build_model(config):
|
| 11 |
+
model_type = config.MODEL.TYPE
|
| 12 |
+
if model_type == 'intern_vit_6b':
|
| 13 |
+
model = InternViT6B(
|
| 14 |
+
num_classes=config.MODEL.NUM_CLASSES,
|
| 15 |
+
patch_size=config.MODEL.INTERN_VIT_6B.PATCH_SIZE,
|
| 16 |
+
img_size=config.DATA.IMG_SIZE,
|
| 17 |
+
pretrain_size=config.MODEL.INTERN_VIT_6B.PRETRAIN_SIZE,
|
| 18 |
+
qkv_bias=config.MODEL.INTERN_VIT_6B.QKV_BIAS,
|
| 19 |
+
drop_path_rate=config.MODEL.DROP_PATH_RATE,
|
| 20 |
+
embed_dim=config.MODEL.INTERN_VIT_6B.EMBED_DIM,
|
| 21 |
+
num_heads=config.MODEL.INTERN_VIT_6B.NUM_HEADS,
|
| 22 |
+
mlp_ratio=config.MODEL.INTERN_VIT_6B.MLP_RATIO,
|
| 23 |
+
init_values=config.MODEL.INTERN_VIT_6B.INIT_VALUES,
|
| 24 |
+
qk_normalization=config.MODEL.INTERN_VIT_6B.QK_NORMALIZATION,
|
| 25 |
+
depth=config.MODEL.INTERN_VIT_6B.DEPTH,
|
| 26 |
+
use_flash_attn=config.MODEL.INTERN_VIT_6B.USE_FLASH_ATTN,
|
| 27 |
+
with_cp=config.TRAIN.USE_CHECKPOINT,
|
| 28 |
+
freeze_vit=config.MODEL.INTERN_VIT_6B.FREEZE_VIT,
|
| 29 |
+
pretrained=config.MODEL.INTERN_VIT_6B.PRETRAINED,
|
| 30 |
+
cls_target=config.MODEL.INTERN_VIT_6B.CLS_TARGET,
|
| 31 |
+
head_norm_type=config.MODEL.INTERN_VIT_6B.HEAD_NORM_TYPE,
|
| 32 |
+
)
|
| 33 |
+
else:
|
| 34 |
+
raise NotImplementedError(f'Unkown model: {model_type}')
|
| 35 |
+
|
| 36 |
+
return model
|
InternVL/classification/models/flash_attention.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from einops import rearrange
|
| 4 |
+
|
| 5 |
+
try: # v1
|
| 6 |
+
from flash_attn.flash_attn_interface import \
|
| 7 |
+
flash_attn_unpadded_qkvpacked_func
|
| 8 |
+
except: # v2
|
| 9 |
+
from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
| 10 |
+
|
| 11 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class FlashAttention(nn.Module):
|
| 15 |
+
"""Implement the scaled dot product attention with softmax.
|
| 16 |
+
Arguments
|
| 17 |
+
---------
|
| 18 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 19 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 20 |
+
runtime)
|
| 21 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 22 |
+
(default: 0.0)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.softmax_scale = softmax_scale
|
| 28 |
+
self.dropout_p = attention_dropout
|
| 29 |
+
|
| 30 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
| 31 |
+
max_s=None, need_weights=False):
|
| 32 |
+
"""Implements the multihead softmax attention.
|
| 33 |
+
Arguments
|
| 34 |
+
---------
|
| 35 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 36 |
+
if unpadded: (nnz, 3, h, d)
|
| 37 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
| 38 |
+
"""
|
| 39 |
+
assert not need_weights
|
| 40 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 41 |
+
assert qkv.is_cuda
|
| 42 |
+
|
| 43 |
+
if cu_seqlens is None:
|
| 44 |
+
batch_size = qkv.shape[0]
|
| 45 |
+
seqlen = qkv.shape[1]
|
| 46 |
+
if key_padding_mask is None:
|
| 47 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
| 48 |
+
max_s = seqlen
|
| 49 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
| 50 |
+
device=qkv.device)
|
| 51 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
| 52 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 53 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 54 |
+
)
|
| 55 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 56 |
+
else:
|
| 57 |
+
nheads = qkv.shape[-2]
|
| 58 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
| 59 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
| 60 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
| 61 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
| 62 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 63 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 64 |
+
)
|
| 65 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
| 66 |
+
indices, batch_size, seqlen),
|
| 67 |
+
'b s (h d) -> b s h d', h=nheads)
|
| 68 |
+
else:
|
| 69 |
+
assert max_s is not None
|
| 70 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
| 71 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 72 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
return output, None
|
InternVL/classification/models/intern_vit_6b.py
ADDED
|
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
from functools import partial
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import torch.utils.checkpoint as checkpoint
|
| 12 |
+
from einops import rearrange
|
| 13 |
+
from timm.models.layers import DropPath, to_2tuple
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from .flash_attention import FlashAttention
|
| 17 |
+
has_flash_attn = True
|
| 18 |
+
except:
|
| 19 |
+
print('FlashAttention is not installed.')
|
| 20 |
+
has_flash_attn = False
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _freeze_params(module):
|
| 24 |
+
for param in module.parameters():
|
| 25 |
+
param.requires_grad = False
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class CrossAttention(nn.Module):
|
| 29 |
+
def __init__(
|
| 30 |
+
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
|
| 31 |
+
proj_drop=0., attn_head_dim=None, out_dim=None):
|
| 32 |
+
super().__init__()
|
| 33 |
+
if out_dim is None:
|
| 34 |
+
out_dim = dim
|
| 35 |
+
self.num_heads = num_heads
|
| 36 |
+
head_dim = dim // num_heads
|
| 37 |
+
if attn_head_dim is not None:
|
| 38 |
+
head_dim = attn_head_dim
|
| 39 |
+
all_head_dim = head_dim * self.num_heads
|
| 40 |
+
self.scale = qk_scale or head_dim ** -0.5
|
| 41 |
+
assert all_head_dim == dim
|
| 42 |
+
|
| 43 |
+
self.q = nn.Linear(dim, all_head_dim, bias=False)
|
| 44 |
+
self.k = nn.Linear(dim, all_head_dim, bias=False)
|
| 45 |
+
self.v = nn.Linear(dim, all_head_dim, bias=False)
|
| 46 |
+
|
| 47 |
+
if qkv_bias:
|
| 48 |
+
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 49 |
+
self.k_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 50 |
+
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 51 |
+
else:
|
| 52 |
+
self.q_bias = None
|
| 53 |
+
self.k_bias = None
|
| 54 |
+
self.v_bias = None
|
| 55 |
+
|
| 56 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 57 |
+
self.proj = nn.Linear(all_head_dim, out_dim)
|
| 58 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 59 |
+
|
| 60 |
+
def forward(self, x, k=None, v=None):
|
| 61 |
+
B, N, C = x.shape
|
| 62 |
+
N_k = k.shape[1]
|
| 63 |
+
N_v = v.shape[1]
|
| 64 |
+
|
| 65 |
+
q_bias, k_bias, v_bias = None, None, None
|
| 66 |
+
if self.q_bias is not None:
|
| 67 |
+
q_bias = self.q_bias
|
| 68 |
+
k_bias = self.k_bias
|
| 69 |
+
v_bias = self.v_bias
|
| 70 |
+
|
| 71 |
+
q = F.linear(input=x, weight=self.q.weight, bias=q_bias)
|
| 72 |
+
q = q.reshape(B, N, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0) # (B, N_head, N_q, dim)
|
| 73 |
+
|
| 74 |
+
k = F.linear(input=k, weight=self.k.weight, bias=k_bias)
|
| 75 |
+
k = k.reshape(B, N_k, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0)
|
| 76 |
+
|
| 77 |
+
v = F.linear(input=v, weight=self.v.weight, bias=v_bias)
|
| 78 |
+
v = v.reshape(B, N_v, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0)
|
| 79 |
+
|
| 80 |
+
q = q * self.scale
|
| 81 |
+
attn = (q @ k.transpose(-2, -1)) # (B, N_head, N_q, N_k)
|
| 82 |
+
|
| 83 |
+
attn = attn.softmax(dim=-1)
|
| 84 |
+
attn = self.attn_drop(attn)
|
| 85 |
+
|
| 86 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
| 87 |
+
x = self.proj(x)
|
| 88 |
+
x = self.proj_drop(x)
|
| 89 |
+
|
| 90 |
+
return x
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class AttentiveBlock(nn.Module):
|
| 94 |
+
|
| 95 |
+
def __init__(self, dim, num_heads, qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
| 96 |
+
drop_path=0., norm_layer=nn.LayerNorm, attn_head_dim=None, out_dim=None):
|
| 97 |
+
super().__init__()
|
| 98 |
+
|
| 99 |
+
self.norm1_q = norm_layer(dim)
|
| 100 |
+
self.norm1_k = norm_layer(dim)
|
| 101 |
+
self.norm1_v = norm_layer(dim)
|
| 102 |
+
self.cross_attn = CrossAttention(
|
| 103 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
|
| 104 |
+
proj_drop=drop, attn_head_dim=attn_head_dim, out_dim=out_dim)
|
| 105 |
+
|
| 106 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 107 |
+
|
| 108 |
+
def forward(self, x_q, x_kv, pos_q, pos_k, bool_masked_pos, rel_pos_bias=None):
|
| 109 |
+
x_q = self.norm1_q(x_q + pos_q)
|
| 110 |
+
x_k = self.norm1_k(x_kv + pos_k)
|
| 111 |
+
x_v = self.norm1_v(x_kv)
|
| 112 |
+
x = self.cross_attn(x_q, k=x_k, v=x_v)
|
| 113 |
+
|
| 114 |
+
return x
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class AttentionPoolingBlock(AttentiveBlock):
|
| 118 |
+
|
| 119 |
+
def forward(self, x):
|
| 120 |
+
x_q = x.mean(1, keepdim=True)
|
| 121 |
+
x_kv, pos_q, pos_k = x, 0, 0
|
| 122 |
+
x = super().forward(x_q, x_kv, pos_q, pos_k, bool_masked_pos=None, rel_pos_bias=None)
|
| 123 |
+
x = x.squeeze(1)
|
| 124 |
+
return x
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class RMSNorm(nn.Module):
|
| 128 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 129 |
+
super().__init__()
|
| 130 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 131 |
+
self.variance_epsilon = eps
|
| 132 |
+
|
| 133 |
+
def forward(self, hidden_states):
|
| 134 |
+
input_dtype = hidden_states.dtype
|
| 135 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 136 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 137 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 138 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
from apex.normalization import FusedRMSNorm
|
| 143 |
+
|
| 144 |
+
RMSNorm = FusedRMSNorm # noqa
|
| 145 |
+
|
| 146 |
+
print('Discovered apex.normalization.FusedRMSNorm - will use it instead of RMSNorm')
|
| 147 |
+
except ImportError:
|
| 148 |
+
# using the normal RMSNorm
|
| 149 |
+
pass
|
| 150 |
+
except Exception:
|
| 151 |
+
print('discovered apex but it failed to load, falling back to RMSNorm')
|
| 152 |
+
pass
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class LayerScale(nn.Module):
|
| 156 |
+
def __init__(self, dim, init_values=1e-5, inplace=False, force_fp32=False):
|
| 157 |
+
super().__init__()
|
| 158 |
+
self.inplace = inplace
|
| 159 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 160 |
+
self.force_fp32 = force_fp32
|
| 161 |
+
|
| 162 |
+
@torch.cuda.amp.autocast(enabled=False)
|
| 163 |
+
def forward(self, x):
|
| 164 |
+
if self.force_fp32:
|
| 165 |
+
output_type = x.dtype
|
| 166 |
+
out = x.float().mul_(self.gamma.float()) if self.inplace else x.float() * self.gamma.float()
|
| 167 |
+
return out.to(dtype=output_type)
|
| 168 |
+
else:
|
| 169 |
+
out = x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 170 |
+
return out
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class Attention(nn.Module):
|
| 174 |
+
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., use_flash_attn=False,
|
| 175 |
+
causal=False, norm_layer=nn.LayerNorm, qk_normalization=False):
|
| 176 |
+
super().__init__()
|
| 177 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 178 |
+
self.num_heads = num_heads
|
| 179 |
+
head_dim = dim // num_heads
|
| 180 |
+
self.scale = head_dim ** -0.5
|
| 181 |
+
|
| 182 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 183 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 184 |
+
self.proj = nn.Linear(dim, dim)
|
| 185 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 186 |
+
|
| 187 |
+
self.use_flash_attn = use_flash_attn
|
| 188 |
+
if use_flash_attn:
|
| 189 |
+
self.causal = causal
|
| 190 |
+
self.inner_attn = FlashAttention(attention_dropout=attn_drop)
|
| 191 |
+
|
| 192 |
+
self.qk_normalization = qk_normalization
|
| 193 |
+
self.q_norm = norm_layer(dim) if qk_normalization else nn.Identity()
|
| 194 |
+
self.k_norm = norm_layer(dim) if qk_normalization else nn.Identity()
|
| 195 |
+
|
| 196 |
+
def _naive_attn(self, x):
|
| 197 |
+
B, N, C = x.shape
|
| 198 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 199 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
| 200 |
+
|
| 201 |
+
if self.qk_normalization:
|
| 202 |
+
B_, H_, N_, D_ = q.shape
|
| 203 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 204 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 205 |
+
|
| 206 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
| 207 |
+
attn = attn.softmax(dim=-1)
|
| 208 |
+
attn = self.attn_drop(attn)
|
| 209 |
+
|
| 210 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 211 |
+
x = self.proj(x)
|
| 212 |
+
x = self.proj_drop(x)
|
| 213 |
+
return x
|
| 214 |
+
|
| 215 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
| 216 |
+
qkv = self.qkv(x)
|
| 217 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
| 218 |
+
|
| 219 |
+
if self.qk_normalization:
|
| 220 |
+
q, k, v = qkv.unbind(2)
|
| 221 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
| 222 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
| 223 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 224 |
+
|
| 225 |
+
context, _ = self.inner_attn(
|
| 226 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal
|
| 227 |
+
)
|
| 228 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
| 229 |
+
outs = self.proj_drop(outs)
|
| 230 |
+
return outs
|
| 231 |
+
|
| 232 |
+
def forward(self, x):
|
| 233 |
+
x = self._naive_attn(x) if not self.use_flash_attn else self._flash_attn(x)
|
| 234 |
+
return x
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class Mlp(nn.Module):
|
| 238 |
+
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
|
| 242 |
+
bias=True, drop=0.):
|
| 243 |
+
super().__init__()
|
| 244 |
+
out_features = out_features or in_features
|
| 245 |
+
hidden_features = hidden_features or in_features
|
| 246 |
+
bias = to_2tuple(bias)
|
| 247 |
+
drop_probs = to_2tuple(drop)
|
| 248 |
+
|
| 249 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])
|
| 250 |
+
self.act = act_layer()
|
| 251 |
+
self.drop1 = nn.Dropout(drop_probs[0])
|
| 252 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
|
| 253 |
+
self.drop2 = nn.Dropout(drop_probs[1])
|
| 254 |
+
|
| 255 |
+
def forward(self, x):
|
| 256 |
+
x = self.fc1(x)
|
| 257 |
+
x = self.act(x)
|
| 258 |
+
x = self.drop1(x)
|
| 259 |
+
x = self.fc2(x)
|
| 260 |
+
x = self.drop2(x)
|
| 261 |
+
return x
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class Block(nn.Module):
|
| 265 |
+
|
| 266 |
+
def __init__(
|
| 267 |
+
self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None,
|
| 268 |
+
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_flash_attn=False, with_cp=False,
|
| 269 |
+
qk_normalization=False, layerscale_force_fp32=False):
|
| 270 |
+
super().__init__()
|
| 271 |
+
|
| 272 |
+
self.norm1 = norm_layer(dim)
|
| 273 |
+
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
|
| 274 |
+
use_flash_attn=use_flash_attn, causal=False, norm_layer=norm_layer,
|
| 275 |
+
qk_normalization=qk_normalization)
|
| 276 |
+
self.ls1 = LayerScale(dim, init_values=init_values,
|
| 277 |
+
force_fp32=layerscale_force_fp32) if init_values else nn.Identity()
|
| 278 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
| 279 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 280 |
+
|
| 281 |
+
self.norm2 = norm_layer(dim)
|
| 282 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 283 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
| 284 |
+
self.ls2 = LayerScale(dim, init_values=init_values,
|
| 285 |
+
force_fp32=layerscale_force_fp32) if init_values else nn.Identity()
|
| 286 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 287 |
+
|
| 288 |
+
self.with_cp = with_cp
|
| 289 |
+
|
| 290 |
+
def forward(self, x):
|
| 291 |
+
|
| 292 |
+
def _inner_forward(x):
|
| 293 |
+
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
|
| 294 |
+
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
|
| 295 |
+
return x
|
| 296 |
+
|
| 297 |
+
if self.with_cp:
|
| 298 |
+
return checkpoint.checkpoint(_inner_forward, x)
|
| 299 |
+
else:
|
| 300 |
+
return _inner_forward(x)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
class PatchEmbed(nn.Module):
|
| 304 |
+
""" 2D Image to Patch Embedding
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
|
| 308 |
+
super().__init__()
|
| 309 |
+
img_size = to_2tuple(img_size)
|
| 310 |
+
patch_size = to_2tuple(patch_size)
|
| 311 |
+
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
| 312 |
+
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
|
| 313 |
+
self.img_size = img_size
|
| 314 |
+
self.patch_size = patch_size
|
| 315 |
+
self.num_patches = num_patches
|
| 316 |
+
self.flatten = flatten
|
| 317 |
+
|
| 318 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 319 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 320 |
+
|
| 321 |
+
def forward(self, x, **kwargs):
|
| 322 |
+
x = self.proj(x)
|
| 323 |
+
_, _, H, W = x.shape
|
| 324 |
+
if self.flatten:
|
| 325 |
+
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
| 326 |
+
x = self.norm(x)
|
| 327 |
+
return x, H, W
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class InternViT6B(nn.Module):
|
| 331 |
+
|
| 332 |
+
def __init__(self, in_chans=3, patch_size=14, img_size=224, pretrain_size=224, qkv_bias=False, drop_path_rate=0.0,
|
| 333 |
+
embed_dim=3200, num_heads=25, mlp_ratio=4, init_values=0.1, qk_normalization=True, depth=48,
|
| 334 |
+
use_flash_attn=True, with_cp=True, layerscale_force_fp32=False, freeze_vit=True,
|
| 335 |
+
cls_target='cls_patch_concat', num_classes=1000, attn_pool_num_heads=16, clip_embed_dim=768,
|
| 336 |
+
head_norm_type='bn', pretrained=None):
|
| 337 |
+
super().__init__()
|
| 338 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 339 |
+
|
| 340 |
+
self.pretrain_size = pretrain_size
|
| 341 |
+
self.drop_path_rate = drop_path_rate
|
| 342 |
+
self.img_size = img_size
|
| 343 |
+
self.patch_size = patch_size
|
| 344 |
+
self.cls_target = cls_target
|
| 345 |
+
self.depth = depth
|
| 346 |
+
|
| 347 |
+
use_flash_attn = use_flash_attn and has_flash_attn
|
| 348 |
+
if use_flash_attn and not has_flash_attn:
|
| 349 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
| 350 |
+
use_flash_attn = [use_flash_attn] * depth if not isinstance(use_flash_attn, list) else use_flash_attn
|
| 351 |
+
|
| 352 |
+
norm_layer_for_blocks = partial(RMSNorm, eps=1e-6)
|
| 353 |
+
self.norm_layer_for_blocks = norm_layer_for_blocks
|
| 354 |
+
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
|
| 355 |
+
num_patches = self.patch_embed.num_patches
|
| 356 |
+
self.num_patches = num_patches
|
| 357 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
| 358 |
+
self.pos_drop = nn.Identity()
|
| 359 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 360 |
+
|
| 361 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
|
| 362 |
+
|
| 363 |
+
self.blocks = nn.ModuleList([
|
| 364 |
+
Block(embed_dim, num_heads, mlp_ratio, qkv_bias=qkv_bias,
|
| 365 |
+
norm_layer=norm_layer_for_blocks,
|
| 366 |
+
drop_path=dpr[i], init_values=init_values, attn_drop=0.,
|
| 367 |
+
use_flash_attn=use_flash_attn[i],
|
| 368 |
+
with_cp=with_cp,
|
| 369 |
+
qk_normalization=qk_normalization,
|
| 370 |
+
layerscale_force_fp32=layerscale_force_fp32)
|
| 371 |
+
for i in range(depth)])
|
| 372 |
+
|
| 373 |
+
if cls_target == 'clip_projector':
|
| 374 |
+
self.clip_projector = AttentionPoolingBlock(
|
| 375 |
+
dim=embed_dim, num_heads=attn_pool_num_heads, qkv_bias=True, qk_scale=None,
|
| 376 |
+
drop=0., attn_drop=0., norm_layer=partial(nn.LayerNorm, eps=1e-5), out_dim=clip_embed_dim)
|
| 377 |
+
|
| 378 |
+
self.init_weights(pretrained)
|
| 379 |
+
|
| 380 |
+
if freeze_vit:
|
| 381 |
+
_freeze_params(self)
|
| 382 |
+
|
| 383 |
+
if cls_target == 'cls_patch_concat':
|
| 384 |
+
if head_norm_type == 'bn':
|
| 385 |
+
self.norm = nn.SyncBatchNorm(embed_dim * 2, eps=1e-6)
|
| 386 |
+
else:
|
| 387 |
+
self.norm = nn.LayerNorm(embed_dim * 2, eps=1e-6)
|
| 388 |
+
self.head = nn.Linear(embed_dim * 2, num_classes) if num_classes > 0 else nn.Identity()
|
| 389 |
+
elif cls_target == 'clip_projector':
|
| 390 |
+
if head_norm_type == 'bn':
|
| 391 |
+
self.norm = nn.SyncBatchNorm(clip_embed_dim, eps=1e-6)
|
| 392 |
+
else:
|
| 393 |
+
self.norm = nn.LayerNorm(clip_embed_dim, eps=1e-6)
|
| 394 |
+
self.head = nn.Linear(clip_embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
| 395 |
+
else:
|
| 396 |
+
raise NotImplementedError
|
| 397 |
+
|
| 398 |
+
if type(self.head) != nn.Identity:
|
| 399 |
+
self.head.weight.data.normal_(mean=0.0, std=0.01)
|
| 400 |
+
self.head.bias.data.zero_()
|
| 401 |
+
|
| 402 |
+
def init_weights(self, pretrained=None):
|
| 403 |
+
print(f'pretrained: {pretrained}')
|
| 404 |
+
|
| 405 |
+
def resize_pos_embed(pos_embed, H, W):
|
| 406 |
+
cls = pos_embed[:, :1, :]
|
| 407 |
+
pos_embed = pos_embed[:, 1:, :].reshape(
|
| 408 |
+
1, self.pretrain_size // 14, self.pretrain_size // 14, -1).permute(0, 3, 1, 2)
|
| 409 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
| 410 |
+
reshape(1, -1, H * W).permute(0, 2, 1)
|
| 411 |
+
pos_embed = torch.cat([cls, pos_embed], dim=1)
|
| 412 |
+
return pos_embed
|
| 413 |
+
|
| 414 |
+
if isinstance(pretrained, str):
|
| 415 |
+
checkpoint = torch.load(pretrained, map_location='cpu')
|
| 416 |
+
if 'module' in checkpoint:
|
| 417 |
+
checkpoint = checkpoint['module']
|
| 418 |
+
|
| 419 |
+
# resize pos_embed
|
| 420 |
+
pos_embed = checkpoint['pos_embed']
|
| 421 |
+
checkpoint['pos_embed'] = resize_pos_embed(
|
| 422 |
+
pos_embed, self.img_size // self.patch_size, self.img_size // self.patch_size)
|
| 423 |
+
# resize patch_embed
|
| 424 |
+
patch_embed = checkpoint['patch_embed.proj.weight']
|
| 425 |
+
checkpoint['patch_embed.proj.weight'] = F.interpolate(
|
| 426 |
+
patch_embed, size=(self.patch_size, self.patch_size),
|
| 427 |
+
mode='bicubic', align_corners=False)
|
| 428 |
+
message = self.load_state_dict(checkpoint, strict=False)
|
| 429 |
+
print(message)
|
| 430 |
+
|
| 431 |
+
@property
|
| 432 |
+
def dtype(self):
|
| 433 |
+
return self.patch_embed.proj.weight.dtype
|
| 434 |
+
|
| 435 |
+
def forward_features(self, x):
|
| 436 |
+
x, _, _ = self.patch_embed(x.type(self.dtype))
|
| 437 |
+
batch_size, seq_len, _ = x.size()
|
| 438 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
| 439 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 440 |
+
x = x + self.pos_embed
|
| 441 |
+
|
| 442 |
+
for idx, blk in enumerate(self.blocks):
|
| 443 |
+
x = blk(x)
|
| 444 |
+
return x
|
| 445 |
+
|
| 446 |
+
def forward(self, x):
|
| 447 |
+
x = self.forward_features(x)
|
| 448 |
+
if self.cls_target == 'cls_patch_concat':
|
| 449 |
+
x = torch.cat((x[:, 0, :], x[:, 1:, :].mean(dim=1)), dim=-1)
|
| 450 |
+
elif self.cls_target == 'clip_projector':
|
| 451 |
+
x = self.clip_projector(x)
|
| 452 |
+
else:
|
| 453 |
+
raise NotImplementedError
|
| 454 |
+
x = self.norm(x)
|
| 455 |
+
x = self.head(x)
|
| 456 |
+
return x
|
| 457 |
+
|
| 458 |
+
@torch.jit.ignore
|
| 459 |
+
def lr_decay_keywords(self, decay_ratio=0.95):
|
| 460 |
+
lr_ratios = {}
|
| 461 |
+
|
| 462 |
+
# blocks
|
| 463 |
+
for idx in range(self.depth):
|
| 464 |
+
tag = 'blocks.{}.'.format(idx)
|
| 465 |
+
decay = 1.0 * (decay_ratio ** (self.depth - idx))
|
| 466 |
+
lr_ratios[tag] = decay
|
| 467 |
+
|
| 468 |
+
# patch_embed
|
| 469 |
+
lr_ratios['patch_embed'] = 1.0 * (decay_ratio ** (self.depth + 1))
|
| 470 |
+
lr_ratios['pos_embed'] = 1.0 * (decay_ratio ** (self.depth + 1))
|
| 471 |
+
lr_ratios['cls_token'] = 1.0 * (decay_ratio ** (self.depth + 1))
|
| 472 |
+
|
| 473 |
+
return lr_ratios
|
InternVL/classification/work_dirs/intern_vit_6b_1k_224/log_rank0.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
InternVL/internvl_chat/examples/image1.jpg
ADDED
|
InternVL/internvl_chat/internvl/conversation.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversation prompt templates.
|
| 3 |
+
|
| 4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
| 5 |
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import dataclasses
|
| 9 |
+
from enum import IntEnum, auto
|
| 10 |
+
from typing import Any, Dict, List, Tuple, Union
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SeparatorStyle(IntEnum):
|
| 14 |
+
"""Separator styles."""
|
| 15 |
+
|
| 16 |
+
ADD_COLON_SINGLE = auto()
|
| 17 |
+
ADD_COLON_TWO = auto()
|
| 18 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
| 19 |
+
NO_COLON_SINGLE = auto()
|
| 20 |
+
NO_COLON_TWO = auto()
|
| 21 |
+
ADD_NEW_LINE_SINGLE = auto()
|
| 22 |
+
LLAMA2 = auto()
|
| 23 |
+
CHATGLM = auto()
|
| 24 |
+
CHATML = auto()
|
| 25 |
+
CHATINTERN = auto()
|
| 26 |
+
DOLLY = auto()
|
| 27 |
+
RWKV = auto()
|
| 28 |
+
PHOENIX = auto()
|
| 29 |
+
ROBIN = auto()
|
| 30 |
+
FALCON_CHAT = auto()
|
| 31 |
+
CHATGLM3 = auto()
|
| 32 |
+
INTERNVL_ZH = auto()
|
| 33 |
+
MPT = auto()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclasses.dataclass
|
| 37 |
+
class Conversation:
|
| 38 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
| 39 |
+
|
| 40 |
+
# The name of this template
|
| 41 |
+
name: str
|
| 42 |
+
# The template of the system prompt
|
| 43 |
+
system_template: str = '{system_message}'
|
| 44 |
+
# The system message
|
| 45 |
+
system_message: str = ''
|
| 46 |
+
# The names of two roles
|
| 47 |
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
| 48 |
+
# All messages. Each item is (role, message).
|
| 49 |
+
messages: List[List[str]] = ()
|
| 50 |
+
# The number of few shot examples
|
| 51 |
+
offset: int = 0
|
| 52 |
+
# The separator style and configurations
|
| 53 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
| 54 |
+
sep: str = '\n'
|
| 55 |
+
sep2: str = None
|
| 56 |
+
# Stop criteria (the default one is EOS token)
|
| 57 |
+
stop_str: Union[str, List[str]] = None
|
| 58 |
+
# Stops generation if meeting any token in this list
|
| 59 |
+
stop_token_ids: List[int] = None
|
| 60 |
+
|
| 61 |
+
def get_prompt(self) -> str:
|
| 62 |
+
"""Get the prompt for generation."""
|
| 63 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
| 64 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
| 65 |
+
ret = system_prompt + self.sep
|
| 66 |
+
for role, message in self.messages:
|
| 67 |
+
if message:
|
| 68 |
+
ret += role + ': ' + message + self.sep
|
| 69 |
+
else:
|
| 70 |
+
ret += role + ':'
|
| 71 |
+
return ret
|
| 72 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
| 73 |
+
seps = [self.sep, self.sep2]
|
| 74 |
+
ret = system_prompt + seps[0]
|
| 75 |
+
for i, (role, message) in enumerate(self.messages):
|
| 76 |
+
if message:
|
| 77 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 78 |
+
else:
|
| 79 |
+
ret += role + ':'
|
| 80 |
+
return ret
|
| 81 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
| 82 |
+
ret = system_prompt + self.sep
|
| 83 |
+
for role, message in self.messages:
|
| 84 |
+
if message:
|
| 85 |
+
ret += role + ': ' + message + self.sep
|
| 86 |
+
else:
|
| 87 |
+
ret += role + ': ' # must be end with a space
|
| 88 |
+
return ret
|
| 89 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
| 90 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
| 91 |
+
for role, message in self.messages:
|
| 92 |
+
if message:
|
| 93 |
+
ret += role + '\n' + message + self.sep
|
| 94 |
+
else:
|
| 95 |
+
ret += role + '\n'
|
| 96 |
+
return ret
|
| 97 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
| 98 |
+
ret = system_prompt
|
| 99 |
+
for role, message in self.messages:
|
| 100 |
+
if message:
|
| 101 |
+
ret += role + message + self.sep
|
| 102 |
+
else:
|
| 103 |
+
ret += role
|
| 104 |
+
return ret
|
| 105 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
| 106 |
+
seps = [self.sep, self.sep2]
|
| 107 |
+
ret = system_prompt
|
| 108 |
+
for i, (role, message) in enumerate(self.messages):
|
| 109 |
+
if message:
|
| 110 |
+
ret += role + message + seps[i % 2]
|
| 111 |
+
else:
|
| 112 |
+
ret += role
|
| 113 |
+
return ret
|
| 114 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
| 115 |
+
ret = system_prompt
|
| 116 |
+
for i, (role, message) in enumerate(self.messages):
|
| 117 |
+
if message:
|
| 118 |
+
ret += (
|
| 119 |
+
role
|
| 120 |
+
+ ': '
|
| 121 |
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
| 122 |
+
)
|
| 123 |
+
ret += '\n\n'
|
| 124 |
+
else:
|
| 125 |
+
ret += role + ':'
|
| 126 |
+
return ret
|
| 127 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
| 128 |
+
seps = [self.sep, self.sep2]
|
| 129 |
+
if self.system_message:
|
| 130 |
+
ret = system_prompt
|
| 131 |
+
else:
|
| 132 |
+
ret = '[INST] '
|
| 133 |
+
for i, (role, message) in enumerate(self.messages):
|
| 134 |
+
tag = self.roles[i % 2]
|
| 135 |
+
if message:
|
| 136 |
+
if i == 0:
|
| 137 |
+
ret += message + ' '
|
| 138 |
+
else:
|
| 139 |
+
ret += tag + ' ' + message + seps[i % 2]
|
| 140 |
+
else:
|
| 141 |
+
ret += tag
|
| 142 |
+
return ret
|
| 143 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
| 144 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
| 145 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
| 146 |
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
| 147 |
+
if system_prompt:
|
| 148 |
+
ret = system_prompt + self.sep
|
| 149 |
+
else:
|
| 150 |
+
ret = ''
|
| 151 |
+
|
| 152 |
+
for i, (role, message) in enumerate(self.messages):
|
| 153 |
+
if i % 2 == 0:
|
| 154 |
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
| 155 |
+
|
| 156 |
+
if message:
|
| 157 |
+
ret += f'{role}:{message}{self.sep}'
|
| 158 |
+
else:
|
| 159 |
+
ret += f'{role}:'
|
| 160 |
+
return ret
|
| 161 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
| 162 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
| 163 |
+
for role, message in self.messages:
|
| 164 |
+
if message:
|
| 165 |
+
ret += role + '\n' + message + self.sep + '\n'
|
| 166 |
+
else:
|
| 167 |
+
ret += role + '\n'
|
| 168 |
+
return ret
|
| 169 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
| 170 |
+
ret = ''
|
| 171 |
+
if self.system_message:
|
| 172 |
+
ret += system_prompt
|
| 173 |
+
for role, message in self.messages:
|
| 174 |
+
if message:
|
| 175 |
+
ret += role + '\n' + ' ' + message
|
| 176 |
+
else:
|
| 177 |
+
ret += role
|
| 178 |
+
return ret
|
| 179 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
| 180 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
| 181 |
+
seps = [self.sep, self.sep2]
|
| 182 |
+
ret = system_prompt
|
| 183 |
+
for i, (role, message) in enumerate(self.messages):
|
| 184 |
+
# if i % 2 == 0:
|
| 185 |
+
# ret += "<s>"
|
| 186 |
+
if message:
|
| 187 |
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
| 188 |
+
else:
|
| 189 |
+
ret += role + ':'
|
| 190 |
+
return ret
|
| 191 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
| 192 |
+
seps = [self.sep, self.sep2]
|
| 193 |
+
ret = system_prompt
|
| 194 |
+
for i, (role, message) in enumerate(self.messages):
|
| 195 |
+
if message:
|
| 196 |
+
ret += role + ':\n' + message + seps[i % 2]
|
| 197 |
+
if i % 2 == 1:
|
| 198 |
+
ret += '\n\n'
|
| 199 |
+
else:
|
| 200 |
+
ret += role + ':\n'
|
| 201 |
+
return ret
|
| 202 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
| 203 |
+
ret = system_prompt
|
| 204 |
+
for role, message in self.messages:
|
| 205 |
+
if message:
|
| 206 |
+
ret += role + ': ' + '<s>' + message + '</s>'
|
| 207 |
+
else:
|
| 208 |
+
ret += role + ': ' + '<s>'
|
| 209 |
+
return ret
|
| 210 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
| 211 |
+
ret = system_prompt + self.sep
|
| 212 |
+
for role, message in self.messages:
|
| 213 |
+
if message:
|
| 214 |
+
ret += role + ':\n' + message + self.sep
|
| 215 |
+
else:
|
| 216 |
+
ret += role + ':\n'
|
| 217 |
+
return ret
|
| 218 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
| 219 |
+
ret = ''
|
| 220 |
+
if self.system_message:
|
| 221 |
+
ret += system_prompt + self.sep
|
| 222 |
+
for role, message in self.messages:
|
| 223 |
+
if message:
|
| 224 |
+
ret += role + ': ' + message + self.sep
|
| 225 |
+
else:
|
| 226 |
+
ret += role + ':'
|
| 227 |
+
|
| 228 |
+
return ret
|
| 229 |
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
| 230 |
+
seps = [self.sep2, self.sep]
|
| 231 |
+
ret = self.system_message + seps[0]
|
| 232 |
+
for i, (role, message) in enumerate(self.messages):
|
| 233 |
+
if message:
|
| 234 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 235 |
+
else:
|
| 236 |
+
ret += role + ':'
|
| 237 |
+
return ret
|
| 238 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
| 239 |
+
ret = system_prompt + self.sep
|
| 240 |
+
for role, message in self.messages:
|
| 241 |
+
if message:
|
| 242 |
+
if type(message) is tuple:
|
| 243 |
+
message, _, _ = message
|
| 244 |
+
ret += role + message + self.sep
|
| 245 |
+
else:
|
| 246 |
+
ret += role
|
| 247 |
+
return ret
|
| 248 |
+
else:
|
| 249 |
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
| 250 |
+
|
| 251 |
+
def set_system_message(self, system_message: str):
|
| 252 |
+
"""Set the system message."""
|
| 253 |
+
self.system_message = system_message
|
| 254 |
+
|
| 255 |
+
def append_message(self, role: str, message: str):
|
| 256 |
+
"""Append a new message."""
|
| 257 |
+
self.messages.append([role, message])
|
| 258 |
+
|
| 259 |
+
def update_last_message(self, message: str):
|
| 260 |
+
"""Update the last output.
|
| 261 |
+
|
| 262 |
+
The last message is typically set to be None when constructing the prompt,
|
| 263 |
+
so we need to update it in-place after getting the response from a model.
|
| 264 |
+
"""
|
| 265 |
+
self.messages[-1][1] = message
|
| 266 |
+
|
| 267 |
+
def to_gradio_chatbot(self):
|
| 268 |
+
"""Convert the conversation to gradio chatbot format."""
|
| 269 |
+
ret = []
|
| 270 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
| 271 |
+
if i % 2 == 0:
|
| 272 |
+
ret.append([msg, None])
|
| 273 |
+
else:
|
| 274 |
+
ret[-1][-1] = msg
|
| 275 |
+
return ret
|
| 276 |
+
|
| 277 |
+
def to_openai_api_messages(self):
|
| 278 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
| 279 |
+
ret = [{'role': 'system', 'content': self.system_message}]
|
| 280 |
+
|
| 281 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
| 282 |
+
if i % 2 == 0:
|
| 283 |
+
ret.append({'role': 'user', 'content': msg})
|
| 284 |
+
else:
|
| 285 |
+
if msg is not None:
|
| 286 |
+
ret.append({'role': 'assistant', 'content': msg})
|
| 287 |
+
return ret
|
| 288 |
+
|
| 289 |
+
def copy(self):
|
| 290 |
+
return Conversation(
|
| 291 |
+
name=self.name,
|
| 292 |
+
system_template=self.system_template,
|
| 293 |
+
system_message=self.system_message,
|
| 294 |
+
roles=self.roles,
|
| 295 |
+
messages=[[x, y] for x, y in self.messages],
|
| 296 |
+
offset=self.offset,
|
| 297 |
+
sep_style=self.sep_style,
|
| 298 |
+
sep=self.sep,
|
| 299 |
+
sep2=self.sep2,
|
| 300 |
+
stop_str=self.stop_str,
|
| 301 |
+
stop_token_ids=self.stop_token_ids,
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
def dict(self):
|
| 305 |
+
return {
|
| 306 |
+
'template_name': self.name,
|
| 307 |
+
'system_message': self.system_message,
|
| 308 |
+
'roles': self.roles,
|
| 309 |
+
'messages': self.messages,
|
| 310 |
+
'offset': self.offset,
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
# A global registry for all conversation templates
|
| 315 |
+
conv_templates: Dict[str, Conversation] = {}
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
| 319 |
+
"""Register a new conversation template."""
|
| 320 |
+
if not override:
|
| 321 |
+
assert (
|
| 322 |
+
template.name not in conv_templates
|
| 323 |
+
), f'{template.name} has been registered.'
|
| 324 |
+
|
| 325 |
+
conv_templates[template.name] = template
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def get_conv_template(name: str) -> Conversation:
|
| 329 |
+
"""Get a conversation template."""
|
| 330 |
+
return conv_templates[name].copy()
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
# InternVL-Chat-V1-1 template
|
| 334 |
+
register_conv_template(
|
| 335 |
+
Conversation(
|
| 336 |
+
name='internvl_zh',
|
| 337 |
+
system_template='',
|
| 338 |
+
roles=('<human>', '<bot>'),
|
| 339 |
+
sep_style=SeparatorStyle.INTERNVL_ZH,
|
| 340 |
+
sep='</s>',
|
| 341 |
+
sep2=' ',
|
| 342 |
+
)
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
| 347 |
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
| 348 |
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
| 349 |
+
# Therefore, they are completely equivalent during inference.
|
| 350 |
+
register_conv_template(
|
| 351 |
+
Conversation(
|
| 352 |
+
name='Hermes-2',
|
| 353 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 354 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 355 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
|
| 356 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 357 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 358 |
+
sep_style=SeparatorStyle.MPT,
|
| 359 |
+
sep='<|im_end|>',
|
| 360 |
+
stop_token_ids=[
|
| 361 |
+
2,
|
| 362 |
+
6,
|
| 363 |
+
7,
|
| 364 |
+
8,
|
| 365 |
+
],
|
| 366 |
+
stop_str='<|endoftext|>',
|
| 367 |
+
)
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
register_conv_template(
|
| 372 |
+
Conversation(
|
| 373 |
+
name='internlm2-chat',
|
| 374 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 375 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 376 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
|
| 377 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 378 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 379 |
+
sep_style=SeparatorStyle.MPT,
|
| 380 |
+
sep='<|im_end|>',
|
| 381 |
+
stop_token_ids=[
|
| 382 |
+
2,
|
| 383 |
+
92543,
|
| 384 |
+
92542
|
| 385 |
+
]
|
| 386 |
+
)
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
register_conv_template(
|
| 391 |
+
Conversation(
|
| 392 |
+
name='phi3-chat',
|
| 393 |
+
system_template='<|system|>\n{system_message}',
|
| 394 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 395 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
|
| 396 |
+
system_message='你是由上海人工智能实验室联合商汤科技开���的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 397 |
+
roles=('<|user|>\n', '<|assistant|>\n'),
|
| 398 |
+
sep_style=SeparatorStyle.MPT,
|
| 399 |
+
sep='<|end|>',
|
| 400 |
+
stop_token_ids=[
|
| 401 |
+
2,
|
| 402 |
+
32000,
|
| 403 |
+
32007
|
| 404 |
+
]
|
| 405 |
+
)
|
| 406 |
+
)
|
InternVL/internvl_chat/internvl/dist_utils.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import socket
|
| 3 |
+
import subprocess
|
| 4 |
+
from datetime import timedelta
|
| 5 |
+
|
| 6 |
+
import deepspeed
|
| 7 |
+
import torch
|
| 8 |
+
import torch.multiprocessing as mp
|
| 9 |
+
from torch import distributed as dist
|
| 10 |
+
|
| 11 |
+
timeout = timedelta(minutes=60)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _find_free_port():
|
| 15 |
+
# Copied from https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py # noqa: E501
|
| 16 |
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 17 |
+
# Binding to port 0 will cause the OS to find an available port for us
|
| 18 |
+
sock.bind(('', 0))
|
| 19 |
+
port = sock.getsockname()[1]
|
| 20 |
+
sock.close()
|
| 21 |
+
# NOTE: there is still a chance the port could be taken by other processes.
|
| 22 |
+
return port
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _is_free_port(port):
|
| 26 |
+
ips = socket.gethostbyname_ex(socket.gethostname())[-1]
|
| 27 |
+
ips.append('localhost')
|
| 28 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 29 |
+
return all(s.connect_ex((ip, port)) != 0 for ip in ips)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def init_dist(launcher, backend='nccl', **kwargs):
|
| 33 |
+
if mp.get_start_method(allow_none=True) is None:
|
| 34 |
+
mp.set_start_method('spawn')
|
| 35 |
+
if launcher == 'pytorch':
|
| 36 |
+
_init_dist_pytorch(backend, **kwargs)
|
| 37 |
+
elif launcher == 'mpi':
|
| 38 |
+
_init_dist_mpi(backend, **kwargs)
|
| 39 |
+
elif launcher == 'slurm':
|
| 40 |
+
_init_dist_slurm(backend, **kwargs)
|
| 41 |
+
else:
|
| 42 |
+
raise ValueError(f'Invalid launcher type: {launcher}')
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _init_dist_pytorch(backend, **kwargs):
|
| 46 |
+
# TODO: use local_rank instead of rank % num_gpus
|
| 47 |
+
rank = int(os.environ['RANK'])
|
| 48 |
+
num_gpus = torch.cuda.device_count()
|
| 49 |
+
torch.cuda.set_device(rank % num_gpus)
|
| 50 |
+
# dist.init_process_group(backend=backend, **kwargs)
|
| 51 |
+
deepspeed.init_distributed(dist_backend=backend)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _init_dist_mpi(backend, **kwargs):
|
| 55 |
+
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
|
| 56 |
+
torch.cuda.set_device(local_rank)
|
| 57 |
+
if 'MASTER_PORT' not in os.environ:
|
| 58 |
+
# 29500 is torch.distributed default port
|
| 59 |
+
os.environ['MASTER_PORT'] = '29500'
|
| 60 |
+
if 'MASTER_ADDR' not in os.environ:
|
| 61 |
+
raise KeyError('The environment variable MASTER_ADDR is not set')
|
| 62 |
+
os.environ['WORLD_SIZE'] = os.environ['OMPI_COMM_WORLD_SIZE']
|
| 63 |
+
os.environ['RANK'] = os.environ['OMPI_COMM_WORLD_RANK']
|
| 64 |
+
dist.init_process_group(backend=backend, **kwargs)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _init_dist_slurm(backend, port=None):
|
| 68 |
+
"""Initialize slurm distributed training environment.
|
| 69 |
+
|
| 70 |
+
If argument ``port`` is not specified, then the master port will be system
|
| 71 |
+
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
|
| 72 |
+
environment variable, then a default port ``29500`` will be used.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
backend (str): Backend of torch.distributed.
|
| 76 |
+
port (int, optional): Master port. Defaults to None.
|
| 77 |
+
"""
|
| 78 |
+
proc_id = int(os.environ['SLURM_PROCID'])
|
| 79 |
+
ntasks = int(os.environ['SLURM_NTASKS'])
|
| 80 |
+
node_list = os.environ['SLURM_NODELIST']
|
| 81 |
+
num_gpus = torch.cuda.device_count()
|
| 82 |
+
torch.cuda.set_device(proc_id % num_gpus)
|
| 83 |
+
addr = subprocess.getoutput(
|
| 84 |
+
f'scontrol show hostname {node_list} | head -n1')
|
| 85 |
+
# specify master port
|
| 86 |
+
if port is not None:
|
| 87 |
+
os.environ['MASTER_PORT'] = str(port)
|
| 88 |
+
elif 'MASTER_PORT' in os.environ:
|
| 89 |
+
pass # use MASTER_PORT in the environment variable
|
| 90 |
+
else:
|
| 91 |
+
# if torch.distributed default port(29500) is available
|
| 92 |
+
# then use it, else find a free port
|
| 93 |
+
if _is_free_port(29500):
|
| 94 |
+
os.environ['MASTER_PORT'] = '29500'
|
| 95 |
+
else:
|
| 96 |
+
os.environ['MASTER_PORT'] = str(_find_free_port())
|
| 97 |
+
# use MASTER_ADDR in the environment variable if it already exists
|
| 98 |
+
if 'MASTER_ADDR' not in os.environ:
|
| 99 |
+
os.environ['MASTER_ADDR'] = addr
|
| 100 |
+
os.environ['WORLD_SIZE'] = str(ntasks)
|
| 101 |
+
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
|
| 102 |
+
os.environ['RANK'] = str(proc_id)
|
| 103 |
+
# dist.init_process_group(backend=backend, timeout=timeout)
|
| 104 |
+
deepspeed.init_distributed(dist_backend=backend)
|
InternVL/internvl_chat/tools/convert_parquet.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import io
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
argparse = argparse.ArgumentParser()
|
| 12 |
+
argparse.add_argument('input', type=str, default='')
|
| 13 |
+
argparse.add_argument('output', type=str, default='')
|
| 14 |
+
|
| 15 |
+
args = argparse.parse_args()
|
| 16 |
+
|
| 17 |
+
if not os.path.exists(args.output):
|
| 18 |
+
os.makedirs(args.output)
|
| 19 |
+
|
| 20 |
+
image_root = os.path.join(args.output, 'images')
|
| 21 |
+
if not os.path.exists(image_root):
|
| 22 |
+
os.makedirs(image_root)
|
| 23 |
+
|
| 24 |
+
prompts = [
|
| 25 |
+
'Please recognize the text in the image.',
|
| 26 |
+
'Please extract the text from the image.',
|
| 27 |
+
'Kindly identify and transcribe the text present in the image.',
|
| 28 |
+
'Could you please perform optical character recognition (OCR) on the image to retrieve the text?',
|
| 29 |
+
'Please use text recognition techniques to decipher the text within the image.',
|
| 30 |
+
'Could you extract any readable text contained in the image?',
|
| 31 |
+
'I need the text within the image recognized and converted into machine-readable format, please.',
|
| 32 |
+
'Please employ OCR technology to recognize and extract the text from the image.',
|
| 33 |
+
'Kindly process the image to identify and retrieve any textual content it contains.',
|
| 34 |
+
'Please analyze the image and retrieve any textual information that is discernible.',
|
| 35 |
+
'Could you transcribe any visible text from the image, please?',
|
| 36 |
+
'请从图像中提取文本',
|
| 37 |
+
'请识别图像中的文本。',
|
| 38 |
+
'能否使用光学字符识别(OCR)技术在图像上提取文本?',
|
| 39 |
+
'请使用文本识别技术解读图像中的文字。',
|
| 40 |
+
'能提取图像中的任何可读文本吗?',
|
| 41 |
+
'请将图像中的文本识别并转换为机器可读格式。',
|
| 42 |
+
'请使用OCR技术识别并提取图像中的文本。',
|
| 43 |
+
'请处理图像以识别并提取其中包含的任何文本内容。',
|
| 44 |
+
'请分析图像并提取其中可以辨认的任何文本信息。',
|
| 45 |
+
'你能够将图像中可见的文本转录出来吗?',
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
cnt = 0
|
| 49 |
+
data = []
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def process(filename):
|
| 53 |
+
df_parquet = pd.read_parquet(os.path.join(args.input, filename))
|
| 54 |
+
for index, row in tqdm(df_parquet.iterrows()):
|
| 55 |
+
# 在这里对每一行的数据进行处理
|
| 56 |
+
image = row['image']['bytes']
|
| 57 |
+
ground_truth = row['ground_truth']
|
| 58 |
+
ground_truth = json.loads(ground_truth)['gt_parse']['text_sequence']
|
| 59 |
+
image = Image.open(io.BytesIO(image))
|
| 60 |
+
global cnt, data
|
| 61 |
+
image_out_path = os.path.join(args.output, 'images/%08d.jpg' % cnt)
|
| 62 |
+
image.save(image_out_path)
|
| 63 |
+
data_item = {'id': cnt, 'image': 'images/%08d.jpg' % cnt}
|
| 64 |
+
conversations = []
|
| 65 |
+
conversations.append({'from': 'human', 'value': '<image>\n' + random.choice(prompts)})
|
| 66 |
+
conversations.append({'from': 'gpt', 'value': ground_truth})
|
| 67 |
+
data_item['conversations'] = conversations
|
| 68 |
+
data.append(data_item)
|
| 69 |
+
cnt += 1
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
process('train-00000-of-00084-26dbc51f3d0903b9.parquet')
|
| 73 |
+
process('train-00001-of-00084-3efa94914043c815.parquet')
|
| 74 |
+
process('train-00002-of-00084-65600b4a95c96e85.parquet')
|
| 75 |
+
process('train-00003-of-00084-45e260eca2cd125f.parquet')
|
| 76 |
+
process('train-00004-of-00084-b1d684c57dc6e3da.parquet')
|
| 77 |
+
|
| 78 |
+
writer = open(os.path.join(args.output, 'synthdog_en.jsonl'), 'w')
|
| 79 |
+
for item in data:
|
| 80 |
+
writer.write(json.dumps(item) + '\n')
|
| 81 |
+
writer.close()
|
| 82 |
+
|
| 83 |
+
print('done')
|
InternVL/internvl_chat/tools/convert_to_int8.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import AutoModel, AutoTokenizer
|
| 3 |
+
|
| 4 |
+
path = 'OpenGVLab/InternVL-Chat-V1-5'
|
| 5 |
+
model = AutoModel.from_pretrained(
|
| 6 |
+
path,
|
| 7 |
+
torch_dtype=torch.bfloat16,
|
| 8 |
+
low_cpu_mem_usage=True,
|
| 9 |
+
trust_remote_code=True,
|
| 10 |
+
load_in_8bit=True).eval()
|
| 11 |
+
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
|
| 13 |
+
|
| 14 |
+
model.save_pretrained('release/InternVL-Chat-V1-5-Int8')
|
| 15 |
+
tokenizer.save_pretrained('release/InternVL-Chat-V1-5-Int8')
|
| 16 |
+
print('finished')
|
InternVL/internvl_chat/tools/extract_mlp.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os.path
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from internvl.model.internvl_chat import InternVLChatModel
|
| 6 |
+
|
| 7 |
+
argparse = argparse.ArgumentParser()
|
| 8 |
+
argparse.add_argument('model_path', type=str, default='')
|
| 9 |
+
argparse.add_argument('output_path', type=str, default='')
|
| 10 |
+
|
| 11 |
+
args = argparse.parse_args()
|
| 12 |
+
|
| 13 |
+
model = InternVLChatModel.from_pretrained(args.model_path, torch_dtype=torch.bfloat16)
|
| 14 |
+
model = model.mlp1.to(torch.bfloat16)
|
| 15 |
+
|
| 16 |
+
ckpt = model.state_dict()
|
| 17 |
+
output_path = os.path.join(args.output_path, 'mlp_projector.pth')
|
| 18 |
+
torch.save(ckpt, output_path)
|
| 19 |
+
print('finished')
|
InternVL/internvl_chat/tools/extract_video_frames.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import concurrent.futures
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
import av
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from decord import VideoReader, cpu
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from tqdm.auto import tqdm
|
| 12 |
+
|
| 13 |
+
num_segments = 1
|
| 14 |
+
|
| 15 |
+
# root directory of evaluation dimension 10
|
| 16 |
+
dimension10_dir = './videos/20bn-something-something-v2'
|
| 17 |
+
# root directory of evaluation dimension 11
|
| 18 |
+
dimension11_dir = './videos/EPIC-KITCHENS'
|
| 19 |
+
# root directory of evaluation dimension 12
|
| 20 |
+
dimension12_dir = './videos/BreakfastII_15fps_qvga_sync'
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def transform_video(buffer):
|
| 24 |
+
try:
|
| 25 |
+
buffer = buffer.numpy()
|
| 26 |
+
except AttributeError:
|
| 27 |
+
try:
|
| 28 |
+
buffer = buffer.asnumpy()
|
| 29 |
+
except AttributeError:
|
| 30 |
+
print('Both buffer.numpy() and buffer.asnumpy() failed.')
|
| 31 |
+
buffer = None
|
| 32 |
+
images_group = list()
|
| 33 |
+
for fid in range(len(buffer)):
|
| 34 |
+
images_group.append(Image.fromarray(buffer[fid]))
|
| 35 |
+
return images_group
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_index(num_frames, num_segments):
|
| 39 |
+
if num_segments > num_frames:
|
| 40 |
+
offsets = np.array([
|
| 41 |
+
idx for idx in range(num_frames)
|
| 42 |
+
])
|
| 43 |
+
else:
|
| 44 |
+
# uniform sampling
|
| 45 |
+
seg_size = float(num_frames - 1) / num_segments
|
| 46 |
+
start = int(seg_size / 2)
|
| 47 |
+
offsets = np.array([
|
| 48 |
+
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
|
| 49 |
+
])
|
| 50 |
+
return offsets
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def fetch_images(qa_item):
|
| 54 |
+
use_pyav = False
|
| 55 |
+
segment = None
|
| 56 |
+
if qa_item['question_type_id'] == 10:
|
| 57 |
+
data_path = os.path.join(dimension10_dir, qa_item['data_id'])
|
| 58 |
+
start = 0.0
|
| 59 |
+
end = 0.0
|
| 60 |
+
elif qa_item['question_type_id'] == 11:
|
| 61 |
+
data_path = os.path.join(dimension11_dir, qa_item['data_id'].split('/')[-1])
|
| 62 |
+
segment = qa_item['segment']
|
| 63 |
+
start, end = segment[0], segment[1]
|
| 64 |
+
elif qa_item['question_type_id'] == 12:
|
| 65 |
+
data_path = os.path.join(dimension12_dir, qa_item['data_id'])
|
| 66 |
+
segment = qa_item['segment']
|
| 67 |
+
start, end = segment[0], segment[1]
|
| 68 |
+
use_pyav = True
|
| 69 |
+
|
| 70 |
+
if use_pyav:
|
| 71 |
+
# using pyav for decoding videos in evaluation dimension 12
|
| 72 |
+
reader = av.open(data_path)
|
| 73 |
+
frames = [torch.from_numpy(f.to_rgb().to_ndarray()) for f in reader.decode(video=0)]
|
| 74 |
+
video_len = len(frames)
|
| 75 |
+
start_frame, end_frame = start, end
|
| 76 |
+
end_frame = min(end_frame, video_len)
|
| 77 |
+
offset = get_index(end_frame - start_frame, num_segments)
|
| 78 |
+
frame_indices = offset + start_frame
|
| 79 |
+
buffer = torch.stack([frames[idx] for idx in frame_indices])
|
| 80 |
+
else:
|
| 81 |
+
# using decord for decoding videos in evaluation dimension 10-11
|
| 82 |
+
vr = VideoReader(data_path, num_threads=1, ctx=cpu(0))
|
| 83 |
+
video_len = len(vr)
|
| 84 |
+
fps = vr.get_avg_fps()
|
| 85 |
+
if segment is not None:
|
| 86 |
+
# obtain start and end frame for the video segment in evaluation dimension 11
|
| 87 |
+
start_frame = int(min(max(start * fps, 0), video_len - 1))
|
| 88 |
+
end_frame = int(min(max(end * fps, 0), video_len - 1))
|
| 89 |
+
tot_frames = int(end_frame - start_frame)
|
| 90 |
+
offset = get_index(tot_frames, num_segments)
|
| 91 |
+
frame_indices = offset + start_frame
|
| 92 |
+
else:
|
| 93 |
+
# sample frames of the video in evaluation dimension 10
|
| 94 |
+
frame_indices = get_index(video_len - 1, num_segments)
|
| 95 |
+
vr.seek(0)
|
| 96 |
+
buffer = vr.get_batch(frame_indices)
|
| 97 |
+
return transform_video(buffer)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def fetch_images_parallel(qa_item):
|
| 101 |
+
return qa_item, fetch_images(qa_item)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
if __name__ == '__main__':
|
| 105 |
+
data = json.load(open('SEED-Bench.json'))
|
| 106 |
+
video_img_dir = 'SEED-Bench-video-image'
|
| 107 |
+
ques_type_id_to_name = {id:n for n,id in data['question_type'].items()}
|
| 108 |
+
|
| 109 |
+
video_data = [x for x in data['questions'] if x['data_type'] == 'video']
|
| 110 |
+
|
| 111 |
+
with open(output, 'w') as f, concurrent.futures.ThreadPoolExecutor() as executor:
|
| 112 |
+
future_to_images = {executor.submit(fetch_images_parallel, qa_item): qa_item for qa_item in video_data}
|
| 113 |
+
for future in tqdm(concurrent.futures.as_completed(future_to_images), total=len(future_to_images)):
|
| 114 |
+
qa_item = future_to_images[future]
|
| 115 |
+
try:
|
| 116 |
+
qa_item, images = future.result()
|
| 117 |
+
except Exception as exc:
|
| 118 |
+
print(f'{qa_item} generated an exception: {exc}')
|
| 119 |
+
else:
|
| 120 |
+
img_file = f"{qa_item['question_type_id']}_{qa_item['question_id']}.png"
|
| 121 |
+
images[0].save(os.path.join(video_img_dir, img_file))
|
InternVL/internvl_chat/tools/extract_vit.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from internvl.model.internvl_chat import InternVLChatModel
|
| 5 |
+
|
| 6 |
+
argparse = argparse.ArgumentParser()
|
| 7 |
+
argparse.add_argument('model_path', type=str, default='')
|
| 8 |
+
argparse.add_argument('output_path', type=str, default='')
|
| 9 |
+
|
| 10 |
+
args = argparse.parse_args()
|
| 11 |
+
|
| 12 |
+
model = InternVLChatModel.from_pretrained(args.model_path, torch_dtype=torch.bfloat16)
|
| 13 |
+
model = model.vision_model.to(torch.bfloat16)
|
| 14 |
+
|
| 15 |
+
model.save_pretrained(args.output_path)
|
| 16 |
+
print('finished')
|
InternVL/internvl_chat/tools/json2jsonl.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
argparse = argparse.ArgumentParser()
|
| 6 |
+
argparse.add_argument('path', type=str)
|
| 7 |
+
|
| 8 |
+
args = argparse.parse_args()
|
| 9 |
+
|
| 10 |
+
assert args.path.endswith('.json')
|
| 11 |
+
|
| 12 |
+
data = json.load(open(args.path))
|
| 13 |
+
writer = open(args.path.replace('.json', '.jsonl'), 'w')
|
| 14 |
+
for idx, item in enumerate(data):
|
| 15 |
+
conversations = item['conversations']
|
| 16 |
+
if conversations[0]['from'] == 'system':
|
| 17 |
+
item['conversations'] = item['conversations'][1:]
|
| 18 |
+
item['id'] = idx
|
| 19 |
+
writer.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 20 |
+
|
| 21 |
+
writer.close()
|
InternVL/internvl_chat/tools/jsonl2jsonl.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
argparse = argparse.ArgumentParser()
|
| 6 |
+
argparse.add_argument('path', type=str)
|
| 7 |
+
|
| 8 |
+
args = argparse.parse_args()
|
| 9 |
+
|
| 10 |
+
assert args.path.endswith('.jsonl')
|
| 11 |
+
|
| 12 |
+
f = open(args.path)
|
| 13 |
+
data = [json.loads(line) for line in f.readlines()]
|
| 14 |
+
writer = open(args.path.replace('.jsonl', '_new.jsonl'), 'w')
|
| 15 |
+
for idx, item in enumerate(data):
|
| 16 |
+
item['id'] = idx
|
| 17 |
+
# item['image'] = os.path.join('train/documents/', item['image'])
|
| 18 |
+
conversations = item['conversations']
|
| 19 |
+
if conversations[0]['from'] == 'system':
|
| 20 |
+
item['conversations'] = item['conversations'][1:]
|
| 21 |
+
writer.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 22 |
+
|
| 23 |
+
writer.close()
|
InternVL/internvl_chat/tools/merge_lora.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from internvl.model.internvl_chat import InternVLChatModel
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
argparse = argparse.ArgumentParser()
|
| 8 |
+
argparse.add_argument('input_path', type=str, help='Path to the input model')
|
| 9 |
+
argparse.add_argument('output_path', type=str, help='Path to the output model')
|
| 10 |
+
args = argparse.parse_args()
|
| 11 |
+
|
| 12 |
+
print('Loading model...')
|
| 13 |
+
model = InternVLChatModel.from_pretrained(
|
| 14 |
+
args.input_path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).eval()
|
| 15 |
+
print('Loading tokenizer...')
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained(args.input_path, trust_remote_code=True)
|
| 17 |
+
|
| 18 |
+
if model.config.use_backbone_lora:
|
| 19 |
+
model.vision_model.merge_and_unload()
|
| 20 |
+
model.vision_model = model.vision_model.model
|
| 21 |
+
model.config.use_backbone_lora = 0
|
| 22 |
+
if model.config.use_llm_lora:
|
| 23 |
+
model.language_model.merge_and_unload()
|
| 24 |
+
model.language_model = model.language_model.model
|
| 25 |
+
model.config.use_llm_lora = 0
|
| 26 |
+
|
| 27 |
+
print('Saving model...')
|
| 28 |
+
model.save_pretrained(args.output_path)
|
| 29 |
+
print('Saving tokenizer...')
|
| 30 |
+
tokenizer.save_pretrained(args.output_path)
|
| 31 |
+
print('Done!')
|
InternVL/internvl_chat/tools/replace_llm.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from internvl.model.internvl_chat import InternVLChatModel
|
| 5 |
+
from transformers import AutoModel, AutoTokenizer
|
| 6 |
+
|
| 7 |
+
argparse = argparse.ArgumentParser()
|
| 8 |
+
argparse.add_argument('model_path', type=str, default='')
|
| 9 |
+
argparse.add_argument('llm_path', type=str, default='')
|
| 10 |
+
|
| 11 |
+
args = argparse.parse_args()
|
| 12 |
+
|
| 13 |
+
if args.model_path[-1] == '/':
|
| 14 |
+
args.model_path = args.model_path[:-1]
|
| 15 |
+
|
| 16 |
+
model = InternVLChatModel.from_pretrained(args.model_path, torch_dtype=torch.bfloat16)
|
| 17 |
+
|
| 18 |
+
llm = AutoModel.from_pretrained(
|
| 19 |
+
args.llm_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
|
| 20 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 21 |
+
args.llm_path, trust_remote_code=True)
|
| 22 |
+
model.language_model = llm
|
| 23 |
+
model.config.llm_config = llm.config
|
| 24 |
+
model.to(torch.bfloat16)
|
| 25 |
+
|
| 26 |
+
output_path = args.model_path + '_replace_llm'
|
| 27 |
+
model.save_pretrained(output_path)
|
| 28 |
+
tokenizer.save_pretrained(output_path)
|
| 29 |
+
print('finished')
|
InternVL/internvl_chat/tools/resize_pos_embed.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from internvl.model.internvl_chat import InternVLChatModel
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
argparse = argparse.ArgumentParser()
|
| 8 |
+
argparse.add_argument('model_path', type=str, default='')
|
| 9 |
+
argparse.add_argument('output_path', type=str, default='')
|
| 10 |
+
argparse.add_argument('force_image_size', type=int, default=448)
|
| 11 |
+
|
| 12 |
+
args = argparse.parse_args()
|
| 13 |
+
|
| 14 |
+
model = InternVLChatModel.from_pretrained(args.model_path, torch_dtype=torch.bfloat16)
|
| 15 |
+
model.vision_model.resize_pos_embeddings(old_size=model.config.vision_config.image_size,
|
| 16 |
+
new_size=args.force_image_size,
|
| 17 |
+
patch_size=14)
|
| 18 |
+
model.config.vision_config.image_size = args.force_image_size
|
| 19 |
+
model.config.force_image_size = args.force_image_size
|
| 20 |
+
|
| 21 |
+
model.save_pretrained(args.output_path)
|
| 22 |
+
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
|
| 24 |
+
tokenizer.save_pretrained(args.output_path)
|
| 25 |
+
print('finished')
|
InternVL/internvl_chat_llava/llava/model/language_model/llava_llama.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torch.nn import CrossEntropyLoss
|
| 21 |
+
|
| 22 |
+
from transformers import AutoConfig, AutoModelForCausalLM, \
|
| 23 |
+
LlamaConfig, LlamaModel, LlamaForCausalLM
|
| 24 |
+
|
| 25 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 26 |
+
|
| 27 |
+
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class LlavaConfig(LlamaConfig):
|
| 31 |
+
model_type = "llava_llama"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
|
| 35 |
+
config_class = LlavaConfig
|
| 36 |
+
|
| 37 |
+
def __init__(self, config: LlamaConfig):
|
| 38 |
+
super(LlavaLlamaModel, self).__init__(config)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
|
| 42 |
+
config_class = LlavaConfig
|
| 43 |
+
|
| 44 |
+
def __init__(self, config):
|
| 45 |
+
super(LlamaForCausalLM, self).__init__(config)
|
| 46 |
+
self.model = LlavaLlamaModel(config)
|
| 47 |
+
|
| 48 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 49 |
+
|
| 50 |
+
# Initialize weights and apply final processing
|
| 51 |
+
self.post_init()
|
| 52 |
+
|
| 53 |
+
def get_model(self):
|
| 54 |
+
return self.model
|
| 55 |
+
|
| 56 |
+
def forward(
|
| 57 |
+
self,
|
| 58 |
+
input_ids: torch.LongTensor = None,
|
| 59 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 60 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 61 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 62 |
+
labels: Optional[torch.LongTensor] = None,
|
| 63 |
+
use_cache: Optional[bool] = None,
|
| 64 |
+
output_attentions: Optional[bool] = None,
|
| 65 |
+
output_hidden_states: Optional[bool] = None,
|
| 66 |
+
images: Optional[torch.FloatTensor] = None,
|
| 67 |
+
return_dict: Optional[bool] = None,
|
| 68 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 69 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 70 |
+
output_hidden_states = (
|
| 71 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 72 |
+
)
|
| 73 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 74 |
+
|
| 75 |
+
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
|
| 76 |
+
|
| 77 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 78 |
+
outputs = self.model(
|
| 79 |
+
input_ids=input_ids,
|
| 80 |
+
attention_mask=attention_mask,
|
| 81 |
+
past_key_values=past_key_values,
|
| 82 |
+
inputs_embeds=inputs_embeds,
|
| 83 |
+
use_cache=use_cache,
|
| 84 |
+
output_attentions=output_attentions,
|
| 85 |
+
output_hidden_states=output_hidden_states,
|
| 86 |
+
return_dict=return_dict
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
hidden_states = outputs[0]
|
| 90 |
+
logits = self.lm_head(hidden_states)
|
| 91 |
+
|
| 92 |
+
loss = None
|
| 93 |
+
if labels is not None:
|
| 94 |
+
# Shift so that tokens < n predict n
|
| 95 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 96 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 97 |
+
# Flatten the tokens
|
| 98 |
+
loss_fct = CrossEntropyLoss()
|
| 99 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 100 |
+
shift_labels = shift_labels.view(-1)
|
| 101 |
+
# Enable model/pipeline parallelism
|
| 102 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 103 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 104 |
+
|
| 105 |
+
if not return_dict:
|
| 106 |
+
output = (logits,) + outputs[1:]
|
| 107 |
+
return (loss,) + output if loss is not None else output
|
| 108 |
+
|
| 109 |
+
return CausalLMOutputWithPast(
|
| 110 |
+
loss=loss,
|
| 111 |
+
logits=logits,
|
| 112 |
+
past_key_values=outputs.past_key_values,
|
| 113 |
+
hidden_states=outputs.hidden_states,
|
| 114 |
+
attentions=outputs.attentions,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def prepare_inputs_for_generation(
|
| 118 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 119 |
+
):
|
| 120 |
+
if past_key_values:
|
| 121 |
+
input_ids = input_ids[:, -1:]
|
| 122 |
+
|
| 123 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 124 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 125 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 126 |
+
else:
|
| 127 |
+
model_inputs = {"input_ids": input_ids}
|
| 128 |
+
|
| 129 |
+
model_inputs.update(
|
| 130 |
+
{
|
| 131 |
+
"past_key_values": past_key_values,
|
| 132 |
+
"use_cache": kwargs.get("use_cache"),
|
| 133 |
+
"attention_mask": attention_mask,
|
| 134 |
+
"images": kwargs.get("images", None),
|
| 135 |
+
}
|
| 136 |
+
)
|
| 137 |
+
return model_inputs
|
| 138 |
+
|
| 139 |
+
AutoConfig.register("llava_llama", LlavaConfig)
|
| 140 |
+
AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
|
InternVL/internvl_chat_llava/llava/model/language_model/llava_mpt.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from typing import Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from transformers import AutoConfig, AutoModelForCausalLM, \
|
| 21 |
+
MptConfig, MptForCausalLM, MptModel
|
| 22 |
+
from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class LlavaMptConfig(MptConfig):
|
| 26 |
+
model_type = "llava_mpt"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class LlavaMptModel(LlavaMetaModel, MptModel):
|
| 30 |
+
config_class = LlavaMptConfig
|
| 31 |
+
|
| 32 |
+
def __init__(self, config: MptConfig):
|
| 33 |
+
config.hidden_size = config.d_model
|
| 34 |
+
super(LlavaMptModel, self).__init__(config)
|
| 35 |
+
|
| 36 |
+
def embed_tokens(self, x):
|
| 37 |
+
return self.wte(x)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class LlavaMptForCausalLM(MptForCausalLM, LlavaMetaForCausalLM):
|
| 41 |
+
config_class = LlavaMptConfig
|
| 42 |
+
supports_gradient_checkpointing = True
|
| 43 |
+
|
| 44 |
+
def __init__(self, config):
|
| 45 |
+
super(MptForCausalLM, self).__init__(config)
|
| 46 |
+
|
| 47 |
+
self.transformer = LlavaMptModel(config)
|
| 48 |
+
self.lm_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 49 |
+
|
| 50 |
+
# Initialize weights and apply final processing
|
| 51 |
+
self.post_init()
|
| 52 |
+
|
| 53 |
+
def get_model(self):
|
| 54 |
+
return self.transformer
|
| 55 |
+
|
| 56 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 57 |
+
if isinstance(module, LlavaMptModel):
|
| 58 |
+
module.gradient_checkpointing = value
|
| 59 |
+
|
| 60 |
+
def forward(
|
| 61 |
+
self,
|
| 62 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 63 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
| 64 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 65 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 66 |
+
labels: Optional[torch.Tensor] = None,
|
| 67 |
+
use_cache: Optional[bool] = None,
|
| 68 |
+
output_attentions: Optional[bool] = None,
|
| 69 |
+
output_hidden_states: Optional[bool] = None,
|
| 70 |
+
return_dict: Optional[bool] = None,
|
| 71 |
+
images=None):
|
| 72 |
+
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(
|
| 73 |
+
input_ids, attention_mask, past_key_values, labels, images)
|
| 74 |
+
|
| 75 |
+
return super().forward(
|
| 76 |
+
input_ids,
|
| 77 |
+
past_key_values=past_key_values,
|
| 78 |
+
attention_mask=attention_mask,
|
| 79 |
+
inputs_embeds=inputs_embeds,
|
| 80 |
+
labels=labels,
|
| 81 |
+
use_cache=use_cache,
|
| 82 |
+
output_attentions=output_attentions,
|
| 83 |
+
output_hidden_states=output_hidden_states,
|
| 84 |
+
return_dict=return_dict,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
| 88 |
+
images = kwargs.pop("images", None)
|
| 89 |
+
_inputs = super().prepare_inputs_for_generation(
|
| 90 |
+
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
|
| 91 |
+
)
|
| 92 |
+
_inputs['images'] = images
|
| 93 |
+
return _inputs
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
AutoConfig.register("llava_mpt", LlavaMptConfig)
|
| 97 |
+
AutoModelForCausalLM.register(LlavaMptConfig, LlavaMptForCausalLM)
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/adapt_tokenizer.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
|
| 3 |
+
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
|
| 4 |
+
NUM_SENTINEL_TOKENS: int = 100
|
| 5 |
+
|
| 6 |
+
def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
|
| 7 |
+
"""Adds sentinel tokens and padding token (if missing).
|
| 8 |
+
|
| 9 |
+
Expands the tokenizer vocabulary to include sentinel tokens
|
| 10 |
+
used in mixture-of-denoiser tasks as well as a padding token.
|
| 11 |
+
|
| 12 |
+
All added tokens are added as special tokens. No tokens are
|
| 13 |
+
added if sentinel tokens and padding token already exist.
|
| 14 |
+
"""
|
| 15 |
+
sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]
|
| 16 |
+
tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
|
| 17 |
+
if tokenizer.pad_token is None:
|
| 18 |
+
tokenizer.add_tokens('<pad>', special_tokens=True)
|
| 19 |
+
tokenizer.pad_token = '<pad>'
|
| 20 |
+
assert tokenizer.pad_token_id is not None
|
| 21 |
+
sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])
|
| 22 |
+
_sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
|
| 23 |
+
tokenizer.sentinel_token_ids = _sentinel_token_ids
|
| 24 |
+
|
| 25 |
+
class AutoTokenizerForMOD(AutoTokenizer):
|
| 26 |
+
"""AutoTokenizer + Adaptation for MOD.
|
| 27 |
+
|
| 28 |
+
A simple wrapper around AutoTokenizer to make instantiating
|
| 29 |
+
an MOD-adapted tokenizer a bit easier.
|
| 30 |
+
|
| 31 |
+
MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),
|
| 32 |
+
a padding token, and a property to get the token ids of the
|
| 33 |
+
sentinel tokens.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
@classmethod
|
| 37 |
+
def from_pretrained(cls, *args, **kwargs):
|
| 38 |
+
"""See `AutoTokenizer.from_pretrained` docstring."""
|
| 39 |
+
tokenizer = super().from_pretrained(*args, **kwargs)
|
| 40 |
+
adapt_tokenizer_for_denoising(tokenizer)
|
| 41 |
+
return tokenizer
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/attention.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Attention layers."""
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Optional
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
from packaging import version
|
| 9 |
+
from torch import nn
|
| 10 |
+
from .norm import LPLayerNorm
|
| 11 |
+
|
| 12 |
+
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
|
| 13 |
+
if original_is_causal and num_query_tokens != num_key_tokens:
|
| 14 |
+
if num_query_tokens != 1:
|
| 15 |
+
raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.')
|
| 16 |
+
else:
|
| 17 |
+
return False
|
| 18 |
+
return original_is_causal
|
| 19 |
+
|
| 20 |
+
def scaled_multihead_dot_product_attention(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
|
| 21 |
+
q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
|
| 22 |
+
kv_n_heads = 1 if multiquery else n_heads
|
| 23 |
+
k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
|
| 24 |
+
v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
|
| 25 |
+
if past_key_value is not None:
|
| 26 |
+
if len(past_key_value) != 0:
|
| 27 |
+
k = torch.cat([past_key_value[0], k], dim=3)
|
| 28 |
+
v = torch.cat([past_key_value[1], v], dim=2)
|
| 29 |
+
past_key_value = (k, v)
|
| 30 |
+
(b, _, s_q, d) = q.shape
|
| 31 |
+
s_k = k.size(-1)
|
| 32 |
+
if softmax_scale is None:
|
| 33 |
+
softmax_scale = 1 / math.sqrt(d)
|
| 34 |
+
attn_weight = q.matmul(k) * softmax_scale
|
| 35 |
+
if attn_bias is not None:
|
| 36 |
+
_s_q = max(0, attn_bias.size(2) - s_q)
|
| 37 |
+
_s_k = max(0, attn_bias.size(3) - s_k)
|
| 38 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
| 39 |
+
if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
|
| 40 |
+
raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.')
|
| 41 |
+
attn_weight = attn_weight + attn_bias
|
| 42 |
+
min_val = torch.finfo(q.dtype).min
|
| 43 |
+
if key_padding_mask is not None:
|
| 44 |
+
if attn_bias is not None:
|
| 45 |
+
warnings.warn('Propogating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unneccessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
|
| 46 |
+
attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
|
| 47 |
+
if is_causal and (not q.size(2) == 1):
|
| 48 |
+
s = max(s_q, s_k)
|
| 49 |
+
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
|
| 50 |
+
causal_mask = causal_mask.tril()
|
| 51 |
+
causal_mask = causal_mask.to(torch.bool)
|
| 52 |
+
causal_mask = ~causal_mask
|
| 53 |
+
causal_mask = causal_mask[-s_q:, -s_k:]
|
| 54 |
+
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
|
| 55 |
+
attn_weight = torch.softmax(attn_weight, dim=-1)
|
| 56 |
+
if dropout_p:
|
| 57 |
+
attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
|
| 58 |
+
out = attn_weight.to(v.dtype).matmul(v)
|
| 59 |
+
out = rearrange(out, 'b h s d -> b s (h d)')
|
| 60 |
+
if needs_weights:
|
| 61 |
+
return (out, attn_weight, past_key_value)
|
| 62 |
+
return (out, None, past_key_value)
|
| 63 |
+
|
| 64 |
+
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
|
| 65 |
+
for tensor in tensors:
|
| 66 |
+
if tensor.dtype not in valid_dtypes:
|
| 67 |
+
raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
|
| 68 |
+
if not tensor.is_cuda:
|
| 69 |
+
raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
|
| 70 |
+
|
| 71 |
+
def flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
|
| 72 |
+
try:
|
| 73 |
+
from flash_attn import bert_padding, flash_attn_interface
|
| 74 |
+
except:
|
| 75 |
+
raise RuntimeError('Please install flash-attn==1.0.3.post0')
|
| 76 |
+
check_valid_inputs(query, key, value)
|
| 77 |
+
if past_key_value is not None:
|
| 78 |
+
if len(past_key_value) != 0:
|
| 79 |
+
key = torch.cat([past_key_value[0], key], dim=1)
|
| 80 |
+
value = torch.cat([past_key_value[1], value], dim=1)
|
| 81 |
+
past_key_value = (key, value)
|
| 82 |
+
if attn_bias is not None:
|
| 83 |
+
_s_q = max(0, attn_bias.size(2) - query.size(1))
|
| 84 |
+
_s_k = max(0, attn_bias.size(3) - key.size(1))
|
| 85 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
| 86 |
+
if attn_bias is not None:
|
| 87 |
+
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
|
| 88 |
+
(batch_size, seqlen) = query.shape[:2]
|
| 89 |
+
if key_padding_mask is None:
|
| 90 |
+
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
|
| 91 |
+
query_padding_mask = key_padding_mask[:, -query.size(1):]
|
| 92 |
+
(query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
|
| 93 |
+
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
|
| 94 |
+
(key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
|
| 95 |
+
key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
|
| 96 |
+
(value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
|
| 97 |
+
value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
|
| 98 |
+
if multiquery:
|
| 99 |
+
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
|
| 100 |
+
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
|
| 101 |
+
dropout_p = dropout_p if training else 0.0
|
| 102 |
+
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
| 103 |
+
output_unpad = flash_attn_interface.flash_attn_unpadded_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
|
| 104 |
+
output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
|
| 105 |
+
return (output, None, past_key_value)
|
| 106 |
+
|
| 107 |
+
def triton_flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
|
| 108 |
+
try:
|
| 109 |
+
from .flash_attn_triton import flash_attn_func
|
| 110 |
+
except:
|
| 111 |
+
_installed = False
|
| 112 |
+
if version.parse(torch.__version__) < version.parse('2.0.0'):
|
| 113 |
+
_installed = True
|
| 114 |
+
try:
|
| 115 |
+
from flash_attn.flash_attn_triton import flash_attn_func
|
| 116 |
+
except:
|
| 117 |
+
_installed = False
|
| 118 |
+
if not _installed:
|
| 119 |
+
raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed.')
|
| 120 |
+
check_valid_inputs(query, key, value)
|
| 121 |
+
if past_key_value is not None:
|
| 122 |
+
if len(past_key_value) != 0:
|
| 123 |
+
key = torch.cat([past_key_value[0], key], dim=1)
|
| 124 |
+
value = torch.cat([past_key_value[1], value], dim=1)
|
| 125 |
+
past_key_value = (key, value)
|
| 126 |
+
if attn_bias is not None:
|
| 127 |
+
_s_q = max(0, attn_bias.size(2) - query.size(1))
|
| 128 |
+
_s_k = max(0, attn_bias.size(3) - key.size(1))
|
| 129 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
| 130 |
+
if dropout_p:
|
| 131 |
+
raise NotImplementedError(f'Dropout not implemented for attn_impl: triton.')
|
| 132 |
+
if needs_weights:
|
| 133 |
+
raise NotImplementedError(f'attn_impl: triton cannot return attn weights.')
|
| 134 |
+
if key_padding_mask is not None:
|
| 135 |
+
warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
|
| 136 |
+
(b_size, s_k) = key_padding_mask.shape[:2]
|
| 137 |
+
if attn_bias is None:
|
| 138 |
+
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
|
| 139 |
+
attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
|
| 140 |
+
query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
|
| 141 |
+
key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
|
| 142 |
+
value = rearrange(value, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
|
| 143 |
+
if multiquery:
|
| 144 |
+
key = key.expand(*key.shape[:2], n_heads, key.size(-1))
|
| 145 |
+
value = value.expand(*value.shape[:2], n_heads, value.size(-1))
|
| 146 |
+
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
| 147 |
+
attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
|
| 148 |
+
output = attn_output.view(*attn_output.shape[:2], -1)
|
| 149 |
+
return (output, None, past_key_value)
|
| 150 |
+
|
| 151 |
+
class MultiheadAttention(nn.Module):
|
| 152 |
+
"""Multi-head self attention.
|
| 153 |
+
|
| 154 |
+
Using torch or triton attention implementation enables user to also use
|
| 155 |
+
additive bias.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
|
| 159 |
+
super().__init__()
|
| 160 |
+
self.attn_impl = attn_impl
|
| 161 |
+
self.clip_qkv = clip_qkv
|
| 162 |
+
self.qk_ln = qk_ln
|
| 163 |
+
self.d_model = d_model
|
| 164 |
+
self.n_heads = n_heads
|
| 165 |
+
self.softmax_scale = softmax_scale
|
| 166 |
+
if self.softmax_scale is None:
|
| 167 |
+
self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
|
| 168 |
+
self.attn_dropout_p = attn_pdrop
|
| 169 |
+
self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
|
| 170 |
+
fuse_splits = (d_model, 2 * d_model)
|
| 171 |
+
self.Wqkv._fused = (0, fuse_splits)
|
| 172 |
+
if self.qk_ln:
|
| 173 |
+
layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
|
| 174 |
+
self.q_ln = layernorm_class(self.d_model, device=device)
|
| 175 |
+
self.k_ln = layernorm_class(self.d_model, device=device)
|
| 176 |
+
if self.attn_impl == 'flash':
|
| 177 |
+
self.attn_fn = flash_attn_fn
|
| 178 |
+
elif self.attn_impl == 'triton':
|
| 179 |
+
self.attn_fn = triton_flash_attn_fn
|
| 180 |
+
if verbose:
|
| 181 |
+
warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
|
| 182 |
+
elif self.attn_impl == 'torch':
|
| 183 |
+
self.attn_fn = scaled_multihead_dot_product_attention
|
| 184 |
+
if torch.cuda.is_available() and verbose:
|
| 185 |
+
warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
|
| 186 |
+
else:
|
| 187 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
| 188 |
+
self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
|
| 189 |
+
self.out_proj._is_residual = True
|
| 190 |
+
|
| 191 |
+
def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
|
| 192 |
+
qkv = self.Wqkv(x)
|
| 193 |
+
if self.clip_qkv:
|
| 194 |
+
qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
|
| 195 |
+
(query, key, value) = qkv.chunk(3, dim=2)
|
| 196 |
+
key_padding_mask = attention_mask
|
| 197 |
+
if self.qk_ln:
|
| 198 |
+
dtype = query.dtype
|
| 199 |
+
query = self.q_ln(query).to(dtype)
|
| 200 |
+
key = self.k_ln(key).to(dtype)
|
| 201 |
+
(context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
|
| 202 |
+
return (self.out_proj(context), attn_weights, past_key_value)
|
| 203 |
+
|
| 204 |
+
class MultiQueryAttention(nn.Module):
|
| 205 |
+
"""Multi-Query self attention.
|
| 206 |
+
|
| 207 |
+
Using torch or triton attention implementation enables user to also use
|
| 208 |
+
additive bias.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
|
| 212 |
+
super().__init__()
|
| 213 |
+
self.attn_impl = attn_impl
|
| 214 |
+
self.clip_qkv = clip_qkv
|
| 215 |
+
self.qk_ln = qk_ln
|
| 216 |
+
self.d_model = d_model
|
| 217 |
+
self.n_heads = n_heads
|
| 218 |
+
self.head_dim = d_model // n_heads
|
| 219 |
+
self.softmax_scale = softmax_scale
|
| 220 |
+
if self.softmax_scale is None:
|
| 221 |
+
self.softmax_scale = 1 / math.sqrt(self.head_dim)
|
| 222 |
+
self.attn_dropout_p = attn_pdrop
|
| 223 |
+
self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device)
|
| 224 |
+
fuse_splits = (d_model, d_model + self.head_dim)
|
| 225 |
+
self.Wqkv._fused = (0, fuse_splits)
|
| 226 |
+
if self.qk_ln:
|
| 227 |
+
layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
|
| 228 |
+
self.q_ln = layernorm_class(d_model, device=device)
|
| 229 |
+
self.k_ln = layernorm_class(self.head_dim, device=device)
|
| 230 |
+
if self.attn_impl == 'flash':
|
| 231 |
+
self.attn_fn = flash_attn_fn
|
| 232 |
+
elif self.attn_impl == 'triton':
|
| 233 |
+
self.attn_fn = triton_flash_attn_fn
|
| 234 |
+
if verbose:
|
| 235 |
+
warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
|
| 236 |
+
elif self.attn_impl == 'torch':
|
| 237 |
+
self.attn_fn = scaled_multihead_dot_product_attention
|
| 238 |
+
if torch.cuda.is_available() and verbose:
|
| 239 |
+
warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
|
| 240 |
+
else:
|
| 241 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
| 242 |
+
self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
|
| 243 |
+
self.out_proj._is_residual = True
|
| 244 |
+
|
| 245 |
+
def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
|
| 246 |
+
qkv = self.Wqkv(x)
|
| 247 |
+
if self.clip_qkv:
|
| 248 |
+
qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
|
| 249 |
+
(query, key, value) = qkv.split([self.d_model, self.head_dim, self.head_dim], dim=2)
|
| 250 |
+
key_padding_mask = attention_mask
|
| 251 |
+
if self.qk_ln:
|
| 252 |
+
dtype = query.dtype
|
| 253 |
+
query = self.q_ln(query).to(dtype)
|
| 254 |
+
key = self.k_ln(key).to(dtype)
|
| 255 |
+
(context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True)
|
| 256 |
+
return (self.out_proj(context), attn_weights, past_key_value)
|
| 257 |
+
|
| 258 |
+
def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
|
| 259 |
+
if attn_impl == 'flash':
|
| 260 |
+
return None
|
| 261 |
+
elif attn_impl in ['torch', 'triton']:
|
| 262 |
+
if alibi:
|
| 263 |
+
if (prefix_lm or not causal) or use_sequence_id:
|
| 264 |
+
return (1, n_heads, seq_len, seq_len)
|
| 265 |
+
return (1, n_heads, 1, seq_len)
|
| 266 |
+
elif prefix_lm or use_sequence_id:
|
| 267 |
+
return (1, 1, seq_len, seq_len)
|
| 268 |
+
return None
|
| 269 |
+
else:
|
| 270 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
| 271 |
+
|
| 272 |
+
def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
|
| 273 |
+
if attn_impl == 'flash':
|
| 274 |
+
return None
|
| 275 |
+
elif attn_impl in ['torch', 'triton']:
|
| 276 |
+
if alibi:
|
| 277 |
+
(device, dtype) = (attn_bias.device, attn_bias.dtype)
|
| 278 |
+
attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))
|
| 279 |
+
return attn_bias
|
| 280 |
+
else:
|
| 281 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
| 282 |
+
|
| 283 |
+
def gen_slopes(n_heads, alibi_bias_max=8, device=None):
|
| 284 |
+
_n_heads = 2 ** math.ceil(math.log2(n_heads))
|
| 285 |
+
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
|
| 286 |
+
m = m.mul(alibi_bias_max / _n_heads)
|
| 287 |
+
slopes = 1.0 / torch.pow(2, m)
|
| 288 |
+
if _n_heads != n_heads:
|
| 289 |
+
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
|
| 290 |
+
return slopes.view(1, n_heads, 1, 1)
|
| 291 |
+
|
| 292 |
+
def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
|
| 293 |
+
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
|
| 294 |
+
if full:
|
| 295 |
+
alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
|
| 296 |
+
alibi_bias = alibi_bias.abs().mul(-1)
|
| 297 |
+
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
|
| 298 |
+
alibi_bias = alibi_bias * slopes
|
| 299 |
+
return alibi_bias.to(dtype=dtype)
|
| 300 |
+
ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention}
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/blocks.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""GPT Blocks used for the GPT Model."""
|
| 2 |
+
from typing import Dict, Optional, Tuple
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from .attention import ATTN_CLASS_REGISTRY
|
| 6 |
+
from .norm import NORM_CLASS_REGISTRY
|
| 7 |
+
|
| 8 |
+
class MPTMLP(nn.Module):
|
| 9 |
+
|
| 10 |
+
def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
|
| 13 |
+
self.act = nn.GELU(approximate='none')
|
| 14 |
+
self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
|
| 15 |
+
self.down_proj._is_residual = True
|
| 16 |
+
|
| 17 |
+
def forward(self, x):
|
| 18 |
+
return self.down_proj(self.act(self.up_proj(x)))
|
| 19 |
+
|
| 20 |
+
class MPTBlock(nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
|
| 23 |
+
del kwargs
|
| 24 |
+
super().__init__()
|
| 25 |
+
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
|
| 26 |
+
attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
|
| 27 |
+
self.norm_1 = norm_class(d_model, device=device)
|
| 28 |
+
self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
|
| 29 |
+
self.norm_2 = norm_class(d_model, device=device)
|
| 30 |
+
self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
|
| 31 |
+
self.resid_attn_dropout = nn.Dropout(resid_pdrop)
|
| 32 |
+
self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
|
| 33 |
+
|
| 34 |
+
def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
|
| 35 |
+
a = self.norm_1(x)
|
| 36 |
+
(b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
|
| 37 |
+
x = x + self.resid_attn_dropout(b)
|
| 38 |
+
m = self.norm_2(x)
|
| 39 |
+
n = self.ffn(m)
|
| 40 |
+
x = x + self.resid_ffn_dropout(n)
|
| 41 |
+
return (x, attn_weights, past_key_value)
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/configuration_mpt.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A HuggingFace-style model configuration."""
|
| 2 |
+
from typing import Dict, Optional, Union
|
| 3 |
+
from transformers import PretrainedConfig
|
| 4 |
+
attn_config_defaults: Dict = {'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}
|
| 5 |
+
init_config_defaults: Dict = {'name': 'kaiming_normal_', 'fan_mode': 'fan_in', 'init_nonlinearity': 'relu', 'init_div_is_residual': True, 'emb_init_std': None, 'emb_init_uniform_lim': None, 'init_std': None, 'init_gain': 0.0}
|
| 6 |
+
|
| 7 |
+
class MPTConfig(PretrainedConfig):
|
| 8 |
+
model_type = 'mpt'
|
| 9 |
+
|
| 10 |
+
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):
|
| 11 |
+
"""The MPT configuration class.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
d_model (int): The size of the embedding dimension of the model.
|
| 15 |
+
n_heads (int): The number of attention heads.
|
| 16 |
+
n_layers (int): The number of layers in the model.
|
| 17 |
+
expansion_ratio (int): The ratio of the up/down scale in the MLP.
|
| 18 |
+
max_seq_len (int): The maximum sequence length of the model.
|
| 19 |
+
vocab_size (int): The size of the vocabulary.
|
| 20 |
+
resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
|
| 21 |
+
emb_pdrop (float): The dropout probability for the embedding layer.
|
| 22 |
+
learned_pos_emb (bool): Whether to use learned positional embeddings
|
| 23 |
+
attn_config (Dict): A dictionary used to configure the model's attention module:
|
| 24 |
+
attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
|
| 25 |
+
attn_pdrop (float): The dropout probability for the attention layers.
|
| 26 |
+
attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
|
| 27 |
+
qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
|
| 28 |
+
clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
|
| 29 |
+
this value.
|
| 30 |
+
softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
|
| 31 |
+
use the default scale of ``1/sqrt(d_keys)``.
|
| 32 |
+
prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
|
| 33 |
+
extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
|
| 34 |
+
can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
|
| 35 |
+
attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
|
| 36 |
+
When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
|
| 37 |
+
which sub-sequence each token belongs to.
|
| 38 |
+
Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
|
| 39 |
+
alibi (bool): Whether to use the alibi bias instead of position embeddings.
|
| 40 |
+
alibi_bias_max (int): The maximum value of the alibi bias.
|
| 41 |
+
init_device (str): The device to use for parameter initialization.
|
| 42 |
+
logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
|
| 43 |
+
no_bias (bool): Whether to use bias in all layers.
|
| 44 |
+
verbose (int): The verbosity level. 0 is silent.
|
| 45 |
+
embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
|
| 46 |
+
norm_type (str): choose type of norm to use
|
| 47 |
+
multiquery_attention (bool): Whether to use multiquery attention implementation.
|
| 48 |
+
use_cache (bool): Whether or not the model should return the last key/values attentions
|
| 49 |
+
init_config (Dict): A dictionary used to configure the model initialization:
|
| 50 |
+
init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
|
| 51 |
+
'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
|
| 52 |
+
'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
|
| 53 |
+
init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
|
| 54 |
+
emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
|
| 55 |
+
emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
|
| 56 |
+
used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
|
| 57 |
+
init_std (float): The standard deviation of the normal distribution used to initialize the model,
|
| 58 |
+
if using the baseline_ parameter initialization scheme.
|
| 59 |
+
init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
|
| 60 |
+
fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
|
| 61 |
+
init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
|
| 62 |
+
---
|
| 63 |
+
See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
|
| 64 |
+
"""
|
| 65 |
+
self.d_model = d_model
|
| 66 |
+
self.n_heads = n_heads
|
| 67 |
+
self.n_layers = n_layers
|
| 68 |
+
self.expansion_ratio = expansion_ratio
|
| 69 |
+
self.max_seq_len = max_seq_len
|
| 70 |
+
self.vocab_size = vocab_size
|
| 71 |
+
self.resid_pdrop = resid_pdrop
|
| 72 |
+
self.emb_pdrop = emb_pdrop
|
| 73 |
+
self.learned_pos_emb = learned_pos_emb
|
| 74 |
+
self.attn_config = attn_config
|
| 75 |
+
self.init_device = init_device
|
| 76 |
+
self.logit_scale = logit_scale
|
| 77 |
+
self.no_bias = no_bias
|
| 78 |
+
self.verbose = verbose
|
| 79 |
+
self.embedding_fraction = embedding_fraction
|
| 80 |
+
self.norm_type = norm_type
|
| 81 |
+
self.use_cache = use_cache
|
| 82 |
+
self.init_config = init_config
|
| 83 |
+
if 'name' in kwargs:
|
| 84 |
+
del kwargs['name']
|
| 85 |
+
if 'loss_fn' in kwargs:
|
| 86 |
+
del kwargs['loss_fn']
|
| 87 |
+
super().__init__(**kwargs)
|
| 88 |
+
self._validate_config()
|
| 89 |
+
|
| 90 |
+
def _set_config_defaults(self, config, config_defaults):
|
| 91 |
+
for (k, v) in config_defaults.items():
|
| 92 |
+
if k not in config:
|
| 93 |
+
config[k] = v
|
| 94 |
+
return config
|
| 95 |
+
|
| 96 |
+
def _validate_config(self):
|
| 97 |
+
self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)
|
| 98 |
+
self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)
|
| 99 |
+
if self.d_model % self.n_heads != 0:
|
| 100 |
+
raise ValueError('d_model must be divisible by n_heads')
|
| 101 |
+
if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):
|
| 102 |
+
raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1")
|
| 103 |
+
if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
|
| 104 |
+
raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
|
| 105 |
+
if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 106 |
+
raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
|
| 107 |
+
if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 108 |
+
raise NotImplementedError('alibi only implemented with torch and triton attention.')
|
| 109 |
+
if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
| 110 |
+
raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')
|
| 111 |
+
if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
|
| 112 |
+
raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
|
| 113 |
+
if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
|
| 114 |
+
raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
|
| 115 |
+
if self.init_config.get('name', None) is None:
|
| 116 |
+
raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.")
|
| 117 |
+
if not self.learned_pos_emb and (not self.attn_config['alibi']):
|
| 118 |
+
raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/custom_embedding.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
|
| 6 |
+
class SharedEmbedding(nn.Embedding):
|
| 7 |
+
|
| 8 |
+
def forward(self, input: Tensor, unembed: bool=False) -> Tensor:
|
| 9 |
+
if unembed:
|
| 10 |
+
return F.linear(input, self.weight)
|
| 11 |
+
return super().forward(input)
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/flash_attn_triton.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copied from https://github.com/HazyResearch/flash-attention/blob/eff9fe6b8076df59d64d7a3f464696738a3c7c24/flash_attn/flash_attn_triton.py
|
| 3 |
+
update imports to use 'triton_pre_mlir'
|
| 4 |
+
|
| 5 |
+
*Experimental* implementation of FlashAttention in Triton.
|
| 6 |
+
Tested with triton==2.0.0.dev20221202.
|
| 7 |
+
Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
|
| 8 |
+
other than 64:
|
| 9 |
+
https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
|
| 10 |
+
We'll update this implementation with the new Triton backend once this is fixed.
|
| 11 |
+
|
| 12 |
+
We use the FlashAttention implementation from Phil Tillet a starting point.
|
| 13 |
+
https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
|
| 14 |
+
|
| 15 |
+
Changes:
|
| 16 |
+
- Implement both causal and non-causal attention.
|
| 17 |
+
- Implement both self-attention and cross-attention.
|
| 18 |
+
- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
|
| 19 |
+
- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
|
| 20 |
+
- Support attention bias.
|
| 21 |
+
- Speed up the forward pass a bit, and only store the LSE instead of m and l.
|
| 22 |
+
- Make the backward for d=128 much faster by reducing register spilling.
|
| 23 |
+
- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
|
| 24 |
+
small batch size * nheads.
|
| 25 |
+
|
| 26 |
+
Caution:
|
| 27 |
+
- This is an *experimental* implementation. The forward pass should be quite robust but
|
| 28 |
+
I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
|
| 29 |
+
- This implementation has only been tested on A100.
|
| 30 |
+
- If you plan to use headdim other than 64 and 128, you should test for race conditions
|
| 31 |
+
(due to the Triton compiler), as done in tests/test_flash_attn.py
|
| 32 |
+
"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
|
| 33 |
+
for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
|
| 34 |
+
that there are none left for other head dimensions.
|
| 35 |
+
|
| 36 |
+
Differences between this Triton version and the CUDA version:
|
| 37 |
+
- Triton version doesn't support dropout.
|
| 38 |
+
- Triton forward is generally faster than CUDA forward, while Triton backward is
|
| 39 |
+
generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
|
| 40 |
+
than CUDA forward + backward.
|
| 41 |
+
- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
|
| 42 |
+
- Triton version supports attention bias, while CUDA version doesn't.
|
| 43 |
+
"""
|
| 44 |
+
import math
|
| 45 |
+
import torch
|
| 46 |
+
import triton_pre_mlir as triton
|
| 47 |
+
import triton_pre_mlir.language as tl
|
| 48 |
+
|
| 49 |
+
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
|
| 50 |
+
@triton.jit
|
| 51 |
+
def _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
|
| 52 |
+
start_m = tl.program_id(0)
|
| 53 |
+
off_hb = tl.program_id(1)
|
| 54 |
+
off_b = off_hb // nheads
|
| 55 |
+
off_h = off_hb % nheads
|
| 56 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 57 |
+
offs_n = tl.arange(0, BLOCK_N)
|
| 58 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 59 |
+
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
|
| 60 |
+
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
|
| 61 |
+
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
|
| 62 |
+
if BIAS_TYPE == 'vector':
|
| 63 |
+
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
|
| 64 |
+
elif BIAS_TYPE == 'matrix':
|
| 65 |
+
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
|
| 66 |
+
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
|
| 67 |
+
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
|
| 68 |
+
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
|
| 69 |
+
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
|
| 70 |
+
if EVEN_M & EVEN_N:
|
| 71 |
+
if EVEN_HEADDIM:
|
| 72 |
+
q = tl.load(q_ptrs)
|
| 73 |
+
else:
|
| 74 |
+
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 75 |
+
elif EVEN_HEADDIM:
|
| 76 |
+
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
|
| 77 |
+
else:
|
| 78 |
+
q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
|
| 79 |
+
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
|
| 80 |
+
for start_n in range(0, end_n, BLOCK_N):
|
| 81 |
+
start_n = tl.multiple_of(start_n, BLOCK_N)
|
| 82 |
+
if EVEN_N & EVEN_M:
|
| 83 |
+
if EVEN_HEADDIM:
|
| 84 |
+
k = tl.load(k_ptrs + start_n * stride_kn)
|
| 85 |
+
else:
|
| 86 |
+
k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
|
| 87 |
+
elif EVEN_HEADDIM:
|
| 88 |
+
k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
|
| 89 |
+
else:
|
| 90 |
+
k = tl.load(k_ptrs + start_n * stride_kn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
|
| 91 |
+
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
|
| 92 |
+
qk += tl.dot(q, k, trans_b=True)
|
| 93 |
+
if not EVEN_N:
|
| 94 |
+
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float('-inf'))
|
| 95 |
+
if IS_CAUSAL:
|
| 96 |
+
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float('-inf'))
|
| 97 |
+
if BIAS_TYPE != 'none':
|
| 98 |
+
if BIAS_TYPE == 'vector':
|
| 99 |
+
if EVEN_N:
|
| 100 |
+
bias = tl.load(b_ptrs + start_n).to(tl.float32)
|
| 101 |
+
else:
|
| 102 |
+
bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0).to(tl.float32)
|
| 103 |
+
bias = bias[None, :]
|
| 104 |
+
elif BIAS_TYPE == 'matrix':
|
| 105 |
+
if EVEN_M & EVEN_N:
|
| 106 |
+
bias = tl.load(b_ptrs + start_n).to(tl.float32)
|
| 107 |
+
else:
|
| 108 |
+
bias = tl.load(b_ptrs + start_n, mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k), other=0.0).to(tl.float32)
|
| 109 |
+
qk = qk * softmax_scale + bias
|
| 110 |
+
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
|
| 111 |
+
p = tl.exp(qk - m_ij[:, None])
|
| 112 |
+
else:
|
| 113 |
+
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
|
| 114 |
+
p = tl.exp(qk * softmax_scale - m_ij[:, None])
|
| 115 |
+
l_ij = tl.sum(p, 1)
|
| 116 |
+
acc_o_scale = tl.exp(m_i - m_ij)
|
| 117 |
+
tl.store(t_ptrs, acc_o_scale)
|
| 118 |
+
acc_o_scale = tl.load(t_ptrs)
|
| 119 |
+
acc_o = acc_o * acc_o_scale[:, None]
|
| 120 |
+
if EVEN_N & EVEN_M:
|
| 121 |
+
if EVEN_HEADDIM:
|
| 122 |
+
v = tl.load(v_ptrs + start_n * stride_vn)
|
| 123 |
+
else:
|
| 124 |
+
v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
|
| 125 |
+
elif EVEN_HEADDIM:
|
| 126 |
+
v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
|
| 127 |
+
else:
|
| 128 |
+
v = tl.load(v_ptrs + start_n * stride_vn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
|
| 129 |
+
p = p.to(v.dtype)
|
| 130 |
+
acc_o += tl.dot(p, v)
|
| 131 |
+
m_i = m_ij
|
| 132 |
+
l_i_new = tl.exp(lse_i - m_ij) + l_ij
|
| 133 |
+
lse_i = m_ij + tl.log(l_i_new)
|
| 134 |
+
o_scale = tl.exp(m_i - lse_i)
|
| 135 |
+
tl.store(t_ptrs, o_scale)
|
| 136 |
+
o_scale = tl.load(t_ptrs)
|
| 137 |
+
acc_o = acc_o * o_scale[:, None]
|
| 138 |
+
start_m = tl.program_id(0)
|
| 139 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 140 |
+
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
|
| 141 |
+
tl.store(lse_ptrs, lse_i)
|
| 142 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 143 |
+
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :])
|
| 144 |
+
if EVEN_M:
|
| 145 |
+
if EVEN_HEADDIM:
|
| 146 |
+
tl.store(out_ptrs, acc_o)
|
| 147 |
+
else:
|
| 148 |
+
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
|
| 149 |
+
elif EVEN_HEADDIM:
|
| 150 |
+
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
|
| 151 |
+
else:
|
| 152 |
+
tl.store(out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
|
| 153 |
+
|
| 154 |
+
@triton.jit
|
| 155 |
+
def _bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr):
|
| 156 |
+
start_m = tl.program_id(0)
|
| 157 |
+
off_hb = tl.program_id(1)
|
| 158 |
+
off_b = off_hb // nheads
|
| 159 |
+
off_h = off_hb % nheads
|
| 160 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 161 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 162 |
+
o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
|
| 163 |
+
do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
|
| 164 |
+
delta = tl.sum(o * do, axis=1)
|
| 165 |
+
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
|
| 166 |
+
|
| 167 |
+
@triton.jit
|
| 168 |
+
def _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr):
|
| 169 |
+
if EVEN_N & EVEN_M:
|
| 170 |
+
if EVEN_HEADDIM:
|
| 171 |
+
tl.store(dv_ptrs, dv)
|
| 172 |
+
tl.store(dk_ptrs, dk)
|
| 173 |
+
else:
|
| 174 |
+
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
|
| 175 |
+
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
|
| 176 |
+
elif EVEN_HEADDIM:
|
| 177 |
+
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
|
| 178 |
+
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
|
| 179 |
+
else:
|
| 180 |
+
tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
|
| 181 |
+
tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
|
| 182 |
+
|
| 183 |
+
@triton.jit
|
| 184 |
+
def _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: tl.constexpr, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
|
| 185 |
+
begin_m = 0 if not IS_CAUSAL else start_n * BLOCK_N // BLOCK_M * BLOCK_M
|
| 186 |
+
offs_qm = begin_m + tl.arange(0, BLOCK_M)
|
| 187 |
+
offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
| 188 |
+
offs_m = tl.arange(0, BLOCK_M)
|
| 189 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 190 |
+
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
|
| 191 |
+
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
|
| 192 |
+
v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
|
| 193 |
+
do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
|
| 194 |
+
dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
|
| 195 |
+
if BIAS_TYPE == 'vector':
|
| 196 |
+
b_ptrs = Bias + offs_n
|
| 197 |
+
elif BIAS_TYPE == 'matrix':
|
| 198 |
+
b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
|
| 199 |
+
dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
|
| 200 |
+
dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
|
| 201 |
+
if begin_m >= seqlen_q:
|
| 202 |
+
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
|
| 203 |
+
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
|
| 204 |
+
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
|
| 205 |
+
return
|
| 206 |
+
if EVEN_N & EVEN_M:
|
| 207 |
+
if EVEN_HEADDIM:
|
| 208 |
+
k = tl.load(k_ptrs)
|
| 209 |
+
v = tl.load(v_ptrs)
|
| 210 |
+
else:
|
| 211 |
+
k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 212 |
+
v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 213 |
+
elif EVEN_HEADDIM:
|
| 214 |
+
k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
|
| 215 |
+
v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
|
| 216 |
+
else:
|
| 217 |
+
k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
|
| 218 |
+
v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
|
| 219 |
+
num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
|
| 220 |
+
for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
|
| 221 |
+
start_m = tl.multiple_of(start_m, BLOCK_M)
|
| 222 |
+
offs_m_curr = start_m + offs_m
|
| 223 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 224 |
+
q = tl.load(q_ptrs)
|
| 225 |
+
elif EVEN_HEADDIM:
|
| 226 |
+
q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
|
| 227 |
+
else:
|
| 228 |
+
q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
|
| 229 |
+
qk = tl.dot(q, k, trans_b=True)
|
| 230 |
+
if not EVEN_N:
|
| 231 |
+
qk = tl.where(offs_n[None, :] < seqlen_k, qk, float('-inf'))
|
| 232 |
+
if IS_CAUSAL:
|
| 233 |
+
qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], qk, float('-inf'))
|
| 234 |
+
if BIAS_TYPE != 'none':
|
| 235 |
+
tl.debug_barrier()
|
| 236 |
+
if BIAS_TYPE == 'vector':
|
| 237 |
+
if EVEN_N:
|
| 238 |
+
bias = tl.load(b_ptrs).to(tl.float32)
|
| 239 |
+
else:
|
| 240 |
+
bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
|
| 241 |
+
bias = bias[None, :]
|
| 242 |
+
elif BIAS_TYPE == 'matrix':
|
| 243 |
+
if EVEN_M & EVEN_N:
|
| 244 |
+
bias = tl.load(b_ptrs).to(tl.float32)
|
| 245 |
+
else:
|
| 246 |
+
bias = tl.load(b_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k), other=0.0).to(tl.float32)
|
| 247 |
+
qk = qk * softmax_scale + bias
|
| 248 |
+
if not EVEN_M & EVEN_HEADDIM:
|
| 249 |
+
tl.debug_barrier()
|
| 250 |
+
lse_i = tl.load(LSE + offs_m_curr)
|
| 251 |
+
if BIAS_TYPE == 'none':
|
| 252 |
+
p = tl.exp(qk * softmax_scale - lse_i[:, None])
|
| 253 |
+
else:
|
| 254 |
+
p = tl.exp(qk - lse_i[:, None])
|
| 255 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 256 |
+
do = tl.load(do_ptrs)
|
| 257 |
+
else:
|
| 258 |
+
do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
|
| 259 |
+
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
|
| 260 |
+
if not EVEN_M & EVEN_HEADDIM:
|
| 261 |
+
tl.debug_barrier()
|
| 262 |
+
dp = tl.dot(do, v, trans_b=True)
|
| 263 |
+
if not EVEN_HEADDIM:
|
| 264 |
+
tl.debug_barrier()
|
| 265 |
+
Di = tl.load(D + offs_m_curr)
|
| 266 |
+
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
|
| 267 |
+
dk += tl.dot(ds, q, trans_a=True)
|
| 268 |
+
if not EVEN_M & EVEN_HEADDIM:
|
| 269 |
+
tl.debug_barrier()
|
| 270 |
+
if not ATOMIC_ADD:
|
| 271 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 272 |
+
dq = tl.load(dq_ptrs, eviction_policy='evict_last')
|
| 273 |
+
dq += tl.dot(ds, k)
|
| 274 |
+
tl.store(dq_ptrs, dq, eviction_policy='evict_last')
|
| 275 |
+
elif EVEN_HEADDIM:
|
| 276 |
+
dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, eviction_policy='evict_last')
|
| 277 |
+
dq += tl.dot(ds, k)
|
| 278 |
+
tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, eviction_policy='evict_last')
|
| 279 |
+
else:
|
| 280 |
+
dq = tl.load(dq_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, eviction_policy='evict_last')
|
| 281 |
+
dq += tl.dot(ds, k)
|
| 282 |
+
tl.store(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), eviction_policy='evict_last')
|
| 283 |
+
else:
|
| 284 |
+
dq = tl.dot(ds, k)
|
| 285 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 286 |
+
tl.atomic_add(dq_ptrs, dq)
|
| 287 |
+
elif EVEN_HEADDIM:
|
| 288 |
+
tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
|
| 289 |
+
else:
|
| 290 |
+
tl.atomic_add(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
|
| 291 |
+
dq_ptrs += BLOCK_M * stride_dqm
|
| 292 |
+
q_ptrs += BLOCK_M * stride_qm
|
| 293 |
+
do_ptrs += BLOCK_M * stride_dom
|
| 294 |
+
if BIAS_TYPE == 'matrix':
|
| 295 |
+
b_ptrs += BLOCK_M * stride_bm
|
| 296 |
+
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
|
| 297 |
+
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
|
| 298 |
+
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
|
| 299 |
+
|
| 300 |
+
def init_to_zero(name):
|
| 301 |
+
return lambda nargs: nargs[name].zero_()
|
| 302 |
+
|
| 303 |
+
@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ'))], key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'])
|
| 304 |
+
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
|
| 305 |
+
@triton.jit
|
| 306 |
+
def _bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
|
| 307 |
+
off_hb = tl.program_id(1)
|
| 308 |
+
off_b = off_hb // nheads
|
| 309 |
+
off_h = off_hb % nheads
|
| 310 |
+
Q += off_b * stride_qb + off_h * stride_qh
|
| 311 |
+
K += off_b * stride_kb + off_h * stride_kh
|
| 312 |
+
V += off_b * stride_vb + off_h * stride_vh
|
| 313 |
+
DO += off_b * stride_dob + off_h * stride_doh
|
| 314 |
+
DQ += off_b * stride_dqb + off_h * stride_dqh
|
| 315 |
+
DK += off_b * stride_dkb + off_h * stride_dkh
|
| 316 |
+
DV += off_b * stride_dvb + off_h * stride_dvh
|
| 317 |
+
if BIAS_TYPE != 'none':
|
| 318 |
+
Bias += off_b * stride_bb + off_h * stride_bh
|
| 319 |
+
D += off_hb * seqlen_q_rounded
|
| 320 |
+
LSE += off_hb * seqlen_q_rounded
|
| 321 |
+
if not SEQUENCE_PARALLEL:
|
| 322 |
+
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
|
| 323 |
+
for start_n in range(0, num_block_n):
|
| 324 |
+
_bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=False, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
|
| 325 |
+
else:
|
| 326 |
+
start_n = tl.program_id(0)
|
| 327 |
+
_bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=True, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
|
| 328 |
+
|
| 329 |
+
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
|
| 330 |
+
(batch, seqlen_q, nheads, d) = q.shape
|
| 331 |
+
(_, seqlen_k, _, _) = k.shape
|
| 332 |
+
assert k.shape == (batch, seqlen_k, nheads, d)
|
| 333 |
+
assert v.shape == (batch, seqlen_k, nheads, d)
|
| 334 |
+
assert d <= 128, 'FlashAttention only support head dimensions up to 128'
|
| 335 |
+
assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
|
| 336 |
+
assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
|
| 337 |
+
assert q.is_cuda and k.is_cuda and v.is_cuda
|
| 338 |
+
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
|
| 339 |
+
has_bias = bias is not None
|
| 340 |
+
bias_type = 'none'
|
| 341 |
+
if has_bias:
|
| 342 |
+
assert bias.dtype in [q.dtype, torch.float]
|
| 343 |
+
assert bias.is_cuda
|
| 344 |
+
assert bias.dim() == 4
|
| 345 |
+
if bias.stride(-1) != 1:
|
| 346 |
+
bias = bias.contiguous()
|
| 347 |
+
if bias.shape[2:] == (1, seqlen_k):
|
| 348 |
+
bias_type = 'vector'
|
| 349 |
+
elif bias.shape[2:] == (seqlen_q, seqlen_k):
|
| 350 |
+
bias_type = 'matrix'
|
| 351 |
+
else:
|
| 352 |
+
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
|
| 353 |
+
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
|
| 354 |
+
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
|
| 355 |
+
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
|
| 356 |
+
lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
|
| 357 |
+
tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
|
| 358 |
+
o = torch.empty_like(q)
|
| 359 |
+
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
|
| 360 |
+
BLOCK = 128
|
| 361 |
+
num_warps = 4 if d <= 64 else 8
|
| 362 |
+
grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
|
| 363 |
+
_fwd_kernel[grid](q, k, v, bias, o, lse, tmp, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, o.stride(0), o.stride(2), o.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM, BLOCK_M=BLOCK, BLOCK_N=BLOCK, num_warps=num_warps, num_stages=1)
|
| 364 |
+
return (o, lse, softmax_scale)
|
| 365 |
+
|
| 366 |
+
def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
|
| 367 |
+
if do.stride(-1) != 1:
|
| 368 |
+
do = do.contiguous()
|
| 369 |
+
(batch, seqlen_q, nheads, d) = q.shape
|
| 370 |
+
(_, seqlen_k, _, _) = k.shape
|
| 371 |
+
assert d <= 128
|
| 372 |
+
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
|
| 373 |
+
assert lse.shape == (batch, nheads, seqlen_q_rounded)
|
| 374 |
+
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
|
| 375 |
+
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
|
| 376 |
+
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
|
| 377 |
+
dq_accum = torch.empty_like(q, dtype=torch.float32)
|
| 378 |
+
delta = torch.empty_like(lse)
|
| 379 |
+
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
|
| 380 |
+
grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
|
| 381 |
+
_bwd_preprocess_do_o_dot[grid](o, do, delta, o.stride(0), o.stride(2), o.stride(1), do.stride(0), do.stride(2), do.stride(1), nheads, seqlen_q, seqlen_q_rounded, d, BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM)
|
| 382 |
+
has_bias = bias is not None
|
| 383 |
+
bias_type = 'none'
|
| 384 |
+
if has_bias:
|
| 385 |
+
assert bias.dtype in [q.dtype, torch.float]
|
| 386 |
+
assert bias.is_cuda
|
| 387 |
+
assert bias.dim() == 4
|
| 388 |
+
assert bias.stride(-1) == 1
|
| 389 |
+
if bias.shape[2:] == (1, seqlen_k):
|
| 390 |
+
bias_type = 'vector'
|
| 391 |
+
elif bias.shape[2:] == (seqlen_q, seqlen_k):
|
| 392 |
+
bias_type = 'matrix'
|
| 393 |
+
else:
|
| 394 |
+
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
|
| 395 |
+
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
|
| 396 |
+
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
|
| 397 |
+
grid = lambda META: (triton.cdiv(seqlen_k, META['BLOCK_N']) if META['SEQUENCE_PARALLEL'] else 1, batch * nheads)
|
| 398 |
+
_bwd_kernel[grid](q, k, v, bias, do, dq_accum, dk, dv, lse, delta, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, do.stride(0), do.stride(2), do.stride(1), dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1), dk.stride(0), dk.stride(2), dk.stride(1), dv.stride(0), dv.stride(2), dv.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM)
|
| 399 |
+
dq.copy_(dq_accum)
|
| 400 |
+
|
| 401 |
+
class FlashAttnQKVPackedFunc(torch.autograd.Function):
|
| 402 |
+
|
| 403 |
+
@staticmethod
|
| 404 |
+
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
|
| 405 |
+
"""
|
| 406 |
+
qkv: (batch, seqlen, 3, nheads, headdim)
|
| 407 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
|
| 408 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
|
| 409 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
|
| 410 |
+
"""
|
| 411 |
+
if qkv.stride(-1) != 1:
|
| 412 |
+
qkv = qkv.contiguous()
|
| 413 |
+
(o, lse, ctx.softmax_scale) = _flash_attn_forward(qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal, softmax_scale=softmax_scale)
|
| 414 |
+
ctx.save_for_backward(qkv, o, lse, bias)
|
| 415 |
+
ctx.causal = causal
|
| 416 |
+
return o
|
| 417 |
+
|
| 418 |
+
@staticmethod
|
| 419 |
+
def backward(ctx, do):
|
| 420 |
+
(qkv, o, lse, bias) = ctx.saved_tensors
|
| 421 |
+
assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet'
|
| 422 |
+
with torch.inference_mode():
|
| 423 |
+
dqkv = torch.empty_like(qkv)
|
| 424 |
+
_flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse, dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
|
| 425 |
+
return (dqkv, None, None, None)
|
| 426 |
+
flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
|
| 427 |
+
|
| 428 |
+
class FlashAttnKVPackedFunc(torch.autograd.Function):
|
| 429 |
+
|
| 430 |
+
@staticmethod
|
| 431 |
+
def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
|
| 432 |
+
"""
|
| 433 |
+
q: (batch, seqlen_q, nheads, headdim)
|
| 434 |
+
kv: (batch, seqlen_k, 2, nheads, headdim)
|
| 435 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
|
| 436 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
|
| 437 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
| 438 |
+
"""
|
| 439 |
+
(q, kv) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
|
| 440 |
+
(o, lse, ctx.softmax_scale) = _flash_attn_forward(q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale)
|
| 441 |
+
ctx.save_for_backward(q, kv, o, lse, bias)
|
| 442 |
+
ctx.causal = causal
|
| 443 |
+
return o
|
| 444 |
+
|
| 445 |
+
@staticmethod
|
| 446 |
+
def backward(ctx, do):
|
| 447 |
+
(q, kv, o, lse, bias) = ctx.saved_tensors
|
| 448 |
+
if len(ctx.needs_input_grad) >= 3:
|
| 449 |
+
assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet'
|
| 450 |
+
with torch.inference_mode():
|
| 451 |
+
dq = torch.empty_like(q)
|
| 452 |
+
dkv = torch.empty_like(kv)
|
| 453 |
+
_flash_attn_backward(do, q, kv[:, :, 0], kv[:, :, 1], o, lse, dq, dkv[:, :, 0], dkv[:, :, 1], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
|
| 454 |
+
return (dq, dkv, None, None, None)
|
| 455 |
+
flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
|
| 456 |
+
|
| 457 |
+
class FlashAttnFunc(torch.autograd.Function):
|
| 458 |
+
|
| 459 |
+
@staticmethod
|
| 460 |
+
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
|
| 461 |
+
"""
|
| 462 |
+
q: (batch_size, seqlen_q, nheads, headdim)
|
| 463 |
+
k, v: (batch_size, seqlen_k, nheads, headdim)
|
| 464 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
|
| 465 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
|
| 466 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
| 467 |
+
"""
|
| 468 |
+
(q, k, v) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
|
| 469 |
+
(o, lse, ctx.softmax_scale) = _flash_attn_forward(q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale)
|
| 470 |
+
ctx.save_for_backward(q, k, v, o, lse, bias)
|
| 471 |
+
ctx.causal = causal
|
| 472 |
+
return o
|
| 473 |
+
|
| 474 |
+
@staticmethod
|
| 475 |
+
def backward(ctx, do):
|
| 476 |
+
(q, k, v, o, lse, bias) = ctx.saved_tensors
|
| 477 |
+
assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet'
|
| 478 |
+
with torch.inference_mode():
|
| 479 |
+
dq = torch.empty_like(q)
|
| 480 |
+
dk = torch.empty_like(k)
|
| 481 |
+
dv = torch.empty_like(v)
|
| 482 |
+
_flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
|
| 483 |
+
return (dq, dk, dv, None, None, None)
|
| 484 |
+
flash_attn_func = FlashAttnFunc.apply
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/hf_prefixlm_converter.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Converts Huggingface Causal LM to Prefix LM.
|
| 2 |
+
|
| 3 |
+
Conversion does lightweight surgery on a HuggingFace
|
| 4 |
+
Causal LM to convert it to a Prefix LM.
|
| 5 |
+
|
| 6 |
+
Prefix LMs accepts a `bidirectional_mask` input in `forward`
|
| 7 |
+
and treat the input prompt as the prefix in `generate`.
|
| 8 |
+
"""
|
| 9 |
+
import math
|
| 10 |
+
import warnings
|
| 11 |
+
from types import MethodType
|
| 12 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 13 |
+
import torch
|
| 14 |
+
from transformers.models.bloom.modeling_bloom import BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel, CausalLMOutputWithCrossAttentions, CrossEntropyLoss
|
| 15 |
+
from transformers.models.bloom.modeling_bloom import _expand_mask as _expand_mask_bloom
|
| 16 |
+
from transformers.models.bloom.modeling_bloom import _make_causal_mask as _make_causal_mask_bloom
|
| 17 |
+
from transformers.models.bloom.modeling_bloom import logging
|
| 18 |
+
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
|
| 19 |
+
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
|
| 20 |
+
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
|
| 21 |
+
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
|
| 22 |
+
from transformers.models.opt.modeling_opt import OPTForCausalLM
|
| 23 |
+
from transformers.models.opt.modeling_opt import _expand_mask as _expand_mask_opt
|
| 24 |
+
from transformers.models.opt.modeling_opt import _make_causal_mask as _make_causal_mask_opt
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
_SUPPORTED_GPT_MODELS = (GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM)
|
| 27 |
+
CAUSAL_GPT_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM]
|
| 28 |
+
|
| 29 |
+
def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
|
| 30 |
+
"""Converts a GPT-style Causal LM to a Prefix LM.
|
| 31 |
+
|
| 32 |
+
Supported HuggingFace model classes:
|
| 33 |
+
- `GPT2LMHeadModel`
|
| 34 |
+
- `GPTNeoForCausalLM`
|
| 35 |
+
- `GPTNeoXForCausalLM`
|
| 36 |
+
- `GPTJForCausalLM`
|
| 37 |
+
|
| 38 |
+
See `convert_hf_causal_lm_to_prefix_lm` for more details.
|
| 39 |
+
"""
|
| 40 |
+
if hasattr(model, '_prefix_lm_converted'):
|
| 41 |
+
return model
|
| 42 |
+
assert isinstance(model, _SUPPORTED_GPT_MODELS)
|
| 43 |
+
assert model.config.add_cross_attention == False, 'Only supports GPT-style decoder-only models'
|
| 44 |
+
|
| 45 |
+
def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
|
| 46 |
+
"""Helper that gets a list of the model's attention modules.
|
| 47 |
+
|
| 48 |
+
Each module has a `bias` buffer used for causal masking. The Prefix LM
|
| 49 |
+
conversion adds logic to dynamically manipulate these biases to support
|
| 50 |
+
Prefix LM attention masking.
|
| 51 |
+
"""
|
| 52 |
+
attn_modules = []
|
| 53 |
+
if isinstance(model, GPTNeoXForCausalLM):
|
| 54 |
+
blocks = model.gpt_neox.layers
|
| 55 |
+
else:
|
| 56 |
+
blocks = model.transformer.h
|
| 57 |
+
for block in blocks:
|
| 58 |
+
if isinstance(model, GPTNeoForCausalLM):
|
| 59 |
+
if block.attn.attention_type != 'global':
|
| 60 |
+
continue
|
| 61 |
+
attn_module = block.attn.attention
|
| 62 |
+
elif isinstance(model, GPTNeoXForCausalLM):
|
| 63 |
+
attn_module = block.attention
|
| 64 |
+
else:
|
| 65 |
+
attn_module = block.attn
|
| 66 |
+
attn_modules.append(attn_module)
|
| 67 |
+
return attn_modules
|
| 68 |
+
setattr(model, '_original_forward', getattr(model, 'forward'))
|
| 69 |
+
setattr(model, '_original_generate', getattr(model, 'generate'))
|
| 70 |
+
|
| 71 |
+
def forward(self: CAUSAL_GPT_TYPES, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
|
| 72 |
+
"""Wraps original forward to enable PrefixLM attention."""
|
| 73 |
+
|
| 74 |
+
def call_og_forward():
|
| 75 |
+
if isinstance(self, GPTNeoXForCausalLM):
|
| 76 |
+
return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
| 77 |
+
else:
|
| 78 |
+
return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
| 79 |
+
if bidirectional_mask is None:
|
| 80 |
+
return call_og_forward()
|
| 81 |
+
assert isinstance(bidirectional_mask, torch.Tensor)
|
| 82 |
+
attn_modules = _get_attn_modules(model)
|
| 83 |
+
(b, s) = bidirectional_mask.shape
|
| 84 |
+
max_length = attn_modules[0].bias.shape[-1]
|
| 85 |
+
if s > max_length:
|
| 86 |
+
raise ValueError(f'bidirectional_mask sequence length (={s}) exceeds the ' + f'max length allowed by the model ({max_length}).')
|
| 87 |
+
assert s <= max_length
|
| 88 |
+
if s < max_length:
|
| 89 |
+
pad = torch.zeros((int(b), int(max_length - s)), dtype=bidirectional_mask.dtype, device=bidirectional_mask.device)
|
| 90 |
+
bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
|
| 91 |
+
bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
|
| 92 |
+
for attn_module in attn_modules:
|
| 93 |
+
attn_module.bias.data = torch.logical_or(attn_module.bias.data, bidirectional)
|
| 94 |
+
output = call_og_forward()
|
| 95 |
+
for attn_module in attn_modules:
|
| 96 |
+
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
|
| 97 |
+
return output
|
| 98 |
+
|
| 99 |
+
def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
|
| 100 |
+
"""Wraps original generate to enable PrefixLM attention."""
|
| 101 |
+
attn_modules = _get_attn_modules(model)
|
| 102 |
+
for attn_module in attn_modules:
|
| 103 |
+
attn_module.bias.data[:] = 1
|
| 104 |
+
output = self._original_generate(*args, **kwargs)
|
| 105 |
+
for attn_module in attn_modules:
|
| 106 |
+
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
|
| 107 |
+
return output
|
| 108 |
+
setattr(model, 'forward', MethodType(forward, model))
|
| 109 |
+
setattr(model, 'generate', MethodType(generate, model))
|
| 110 |
+
setattr(model, '_prefix_lm_converted', True)
|
| 111 |
+
return model
|
| 112 |
+
|
| 113 |
+
def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
|
| 114 |
+
"""Converts a BLOOM Causal LM to a Prefix LM.
|
| 115 |
+
|
| 116 |
+
Supported HuggingFace model classes:
|
| 117 |
+
- `BloomForCausalLM`
|
| 118 |
+
|
| 119 |
+
See `convert_hf_causal_lm_to_prefix_lm` for more details.
|
| 120 |
+
"""
|
| 121 |
+
if hasattr(model, '_prefix_lm_converted'):
|
| 122 |
+
return model
|
| 123 |
+
assert isinstance(model, BloomForCausalLM)
|
| 124 |
+
assert model.config.add_cross_attention == False, 'Only supports BLOOM decoder-only models'
|
| 125 |
+
|
| 126 |
+
def _prepare_attn_mask(self: BloomModel, attention_mask: torch.Tensor, bidirectional_mask: Optional[torch.Tensor], input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor:
|
| 127 |
+
combined_attention_mask = None
|
| 128 |
+
device = attention_mask.device
|
| 129 |
+
(_, src_length) = input_shape
|
| 130 |
+
if src_length > 1:
|
| 131 |
+
combined_attention_mask = _make_causal_mask_bloom(input_shape, device=device, past_key_values_length=past_key_values_length)
|
| 132 |
+
if bidirectional_mask is not None:
|
| 133 |
+
assert attention_mask.shape == bidirectional_mask.shape
|
| 134 |
+
expanded_bidirectional_mask = _expand_mask_bloom(bidirectional_mask, tgt_length=src_length)
|
| 135 |
+
combined_attention_mask = torch.logical_and(combined_attention_mask, expanded_bidirectional_mask)
|
| 136 |
+
expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
|
| 137 |
+
combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
|
| 138 |
+
return combined_attention_mask
|
| 139 |
+
|
| 140 |
+
def _build_alibi_tensor(self: BloomModel, batch_size: int, query_length: int, key_length: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
|
| 141 |
+
num_heads = self.config.n_head
|
| 142 |
+
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
|
| 143 |
+
base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=device, dtype=torch.float32)
|
| 144 |
+
powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32)
|
| 145 |
+
slopes = torch.pow(base, powers)
|
| 146 |
+
if closest_power_of_2 != num_heads:
|
| 147 |
+
extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=device, dtype=torch.float32)
|
| 148 |
+
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
|
| 149 |
+
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32)
|
| 150 |
+
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
|
| 151 |
+
qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
|
| 152 |
+
ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
|
| 153 |
+
diffs = qa - ka + key_length - query_length
|
| 154 |
+
diffs = -diffs.abs()
|
| 155 |
+
alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(1, 1, query_length, key_length)
|
| 156 |
+
alibi = alibi.expand(batch_size, -1, -1, -1).reshape(-1, query_length, key_length)
|
| 157 |
+
return alibi.to(dtype)
|
| 158 |
+
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
|
| 159 |
+
|
| 160 |
+
def forward(self: BloomModel, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
|
| 161 |
+
if deprecated_arguments.pop('position_ids', False) is not False:
|
| 162 |
+
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. ' + 'You can safely ignore passing `position_ids`.', FutureWarning)
|
| 163 |
+
if len(deprecated_arguments) > 0:
|
| 164 |
+
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
|
| 165 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 166 |
+
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 167 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 168 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 169 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 170 |
+
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
|
| 171 |
+
elif input_ids is not None:
|
| 172 |
+
(batch_size, seq_length) = input_ids.shape
|
| 173 |
+
elif inputs_embeds is not None:
|
| 174 |
+
(batch_size, seq_length, _) = inputs_embeds.shape
|
| 175 |
+
else:
|
| 176 |
+
raise ValueError('You have to specify either input_ids or inputs_embeds')
|
| 177 |
+
if past_key_values is None:
|
| 178 |
+
past_key_values = tuple([None] * len(self.h))
|
| 179 |
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
| 180 |
+
if inputs_embeds is None:
|
| 181 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 182 |
+
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
|
| 183 |
+
presents = () if use_cache else None
|
| 184 |
+
all_self_attentions = () if output_attentions else None
|
| 185 |
+
all_hidden_states = () if output_hidden_states else None
|
| 186 |
+
seq_length_with_past = seq_length
|
| 187 |
+
past_key_values_length = 0
|
| 188 |
+
if past_key_values[0] is not None:
|
| 189 |
+
tmp = past_key_values[0][0]
|
| 190 |
+
past_key_values_length = tmp.shape[2]
|
| 191 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
| 192 |
+
if attention_mask is None:
|
| 193 |
+
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
|
| 194 |
+
else:
|
| 195 |
+
attention_mask = attention_mask.to(hidden_states.device)
|
| 196 |
+
alibi = self._build_alibi_tensor(batch_size=batch_size, query_length=seq_length, key_length=seq_length_with_past, dtype=hidden_states.dtype, device=hidden_states.device)
|
| 197 |
+
causal_mask = self._prepare_attn_mask(attention_mask, bidirectional_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length)
|
| 198 |
+
for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)):
|
| 199 |
+
if output_hidden_states:
|
| 200 |
+
hst = (hidden_states,)
|
| 201 |
+
all_hidden_states = all_hidden_states + hst
|
| 202 |
+
if self.gradient_checkpointing and self.training:
|
| 203 |
+
if use_cache:
|
| 204 |
+
logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
|
| 205 |
+
use_cache = False
|
| 206 |
+
|
| 207 |
+
def create_custom_forward(module):
|
| 208 |
+
|
| 209 |
+
def custom_forward(*inputs):
|
| 210 |
+
return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)
|
| 211 |
+
return custom_forward
|
| 212 |
+
outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, alibi, causal_mask, head_mask[i])
|
| 213 |
+
else:
|
| 214 |
+
outputs = block(hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi)
|
| 215 |
+
hidden_states = outputs[0]
|
| 216 |
+
if use_cache is True:
|
| 217 |
+
presents = presents + (outputs[1],)
|
| 218 |
+
if output_attentions:
|
| 219 |
+
oa = (outputs[2 if use_cache else 1],)
|
| 220 |
+
all_self_attentions = all_self_attentions + oa
|
| 221 |
+
hidden_states = self.ln_f(hidden_states)
|
| 222 |
+
if output_hidden_states:
|
| 223 |
+
hst = (hidden_states,)
|
| 224 |
+
all_hidden_states = all_hidden_states + hst
|
| 225 |
+
if not return_dict:
|
| 226 |
+
return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None))
|
| 227 |
+
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
| 228 |
+
setattr(model.transformer, '_prepare_attn_mask', MethodType(_prepare_attn_mask, model.transformer))
|
| 229 |
+
setattr(model.transformer, '_build_alibi_tensor', MethodType(_build_alibi_tensor, model.transformer))
|
| 230 |
+
setattr(model.transformer, 'forward', MethodType(forward, model.transformer))
|
| 231 |
+
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
|
| 232 |
+
|
| 233 |
+
def forward(self: BloomForCausalLM, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
|
| 234 |
+
"""Replacement forward method for BloomCausalLM."""
|
| 235 |
+
if deprecated_arguments.pop('position_ids', False) is not False:
|
| 236 |
+
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed ' + 'in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
|
| 237 |
+
if len(deprecated_arguments) > 0:
|
| 238 |
+
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
|
| 239 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 240 |
+
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, bidirectional_mask=bidirectional_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
| 241 |
+
hidden_states = transformer_outputs[0]
|
| 242 |
+
lm_logits = self.lm_head(hidden_states)
|
| 243 |
+
loss = None
|
| 244 |
+
if labels is not None:
|
| 245 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
| 246 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 247 |
+
(batch_size, seq_length, vocab_size) = shift_logits.shape
|
| 248 |
+
loss_fct = CrossEntropyLoss()
|
| 249 |
+
loss = loss_fct(shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length))
|
| 250 |
+
if not return_dict:
|
| 251 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
| 252 |
+
return (loss,) + output if loss is not None else output
|
| 253 |
+
return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
| 254 |
+
|
| 255 |
+
def prepare_inputs_for_generation(self: BloomForCausalLM, input_ids: torch.LongTensor, past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> dict:
|
| 256 |
+
if past:
|
| 257 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 258 |
+
bidirectional_mask = None
|
| 259 |
+
if past[0][0].shape[0] == input_ids.shape[0]:
|
| 260 |
+
past = self._convert_to_bloom_cache(past)
|
| 261 |
+
else:
|
| 262 |
+
bidirectional_mask = torch.ones_like(input_ids)
|
| 263 |
+
return {'input_ids': input_ids, 'past_key_values': past, 'use_cache': True, 'attention_mask': attention_mask, 'bidirectional_mask': bidirectional_mask}
|
| 264 |
+
setattr(model, 'forward', MethodType(forward, model))
|
| 265 |
+
setattr(model, 'prepare_inputs_for_generation', MethodType(prepare_inputs_for_generation, model))
|
| 266 |
+
setattr(model, '_prefix_lm_converted', True)
|
| 267 |
+
return model
|
| 268 |
+
|
| 269 |
+
def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
|
| 270 |
+
"""Converts an OPT Causal LM to a Prefix LM.
|
| 271 |
+
|
| 272 |
+
Supported HuggingFace model classes:
|
| 273 |
+
- `OPTForCausalLM`
|
| 274 |
+
|
| 275 |
+
See `convert_hf_causal_lm_to_prefix_lm` for more details.
|
| 276 |
+
"""
|
| 277 |
+
if hasattr(model, '_prefix_lm_converted'):
|
| 278 |
+
return model
|
| 279 |
+
assert isinstance(model, OPTForCausalLM)
|
| 280 |
+
assert model.config.add_cross_attention == False, 'Only supports OPT decoder-only models'
|
| 281 |
+
setattr(model, '_original_forward', getattr(model, 'forward'))
|
| 282 |
+
setattr(model, '_original_generate', getattr(model, 'generate'))
|
| 283 |
+
model.model.decoder.bidirectional_mask = None
|
| 284 |
+
|
| 285 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
| 286 |
+
combined_attention_mask = None
|
| 287 |
+
if input_shape[-1] > 1:
|
| 288 |
+
if self.bidirectional_mask == 'g':
|
| 289 |
+
(bsz, src_length) = input_shape
|
| 290 |
+
combined_attention_mask = torch.zeros((bsz, 1, src_length, src_length + past_key_values_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
|
| 291 |
+
else:
|
| 292 |
+
combined_attention_mask = _make_causal_mask_opt(input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length).to(inputs_embeds.device)
|
| 293 |
+
if self.bidirectional_mask is not None:
|
| 294 |
+
assert attention_mask.shape == self.bidirectional_mask.shape
|
| 295 |
+
expanded_bidirectional_mask = _expand_mask_opt(self.bidirectional_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
|
| 296 |
+
combined_attention_mask = torch.maximum(expanded_bidirectional_mask, combined_attention_mask)
|
| 297 |
+
if attention_mask is not None:
|
| 298 |
+
expanded_attn_mask = _expand_mask_opt(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
|
| 299 |
+
combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
| 300 |
+
return combined_attention_mask
|
| 301 |
+
setattr(model.model.decoder, '_prepare_decoder_attention_mask', MethodType(_prepare_decoder_attention_mask, model.model.decoder))
|
| 302 |
+
|
| 303 |
+
def forward(self: OPTForCausalLM, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.ByteTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
|
| 304 |
+
|
| 305 |
+
def call_og_forward():
|
| 306 |
+
return self._original_forward(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
| 307 |
+
if bidirectional_mask is None:
|
| 308 |
+
return call_og_forward()
|
| 309 |
+
self.model.decoder.bidirectional_mask = bidirectional_mask
|
| 310 |
+
try:
|
| 311 |
+
outputs = call_og_forward()
|
| 312 |
+
except:
|
| 313 |
+
self.model.decoder.bidirectional_mask = None
|
| 314 |
+
raise
|
| 315 |
+
self.model.decoder.bidirectional_mask = None
|
| 316 |
+
return outputs
|
| 317 |
+
|
| 318 |
+
def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
|
| 319 |
+
"""Wraps original generate to enable PrefixLM-style attention."""
|
| 320 |
+
self.model.decoder.bidirectional_mask = 'g'
|
| 321 |
+
try:
|
| 322 |
+
output = self._original_generate(*args, **kwargs)
|
| 323 |
+
except:
|
| 324 |
+
self.model.decoder.bidirectional_mask = None
|
| 325 |
+
raise
|
| 326 |
+
self.model.decoder.bidirectional_mask = None
|
| 327 |
+
return output
|
| 328 |
+
setattr(model, 'forward', MethodType(forward, model))
|
| 329 |
+
setattr(model, 'generate', MethodType(generate, model))
|
| 330 |
+
setattr(model, '_prefix_lm_converted', True)
|
| 331 |
+
return model
|
| 332 |
+
_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
|
| 333 |
+
CAUSAL_LM_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM, BloomForCausalLM, OPTForCausalLM]
|
| 334 |
+
|
| 335 |
+
def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
|
| 336 |
+
"""Converts a HuggingFace Causal LM to a Prefix LM.
|
| 337 |
+
|
| 338 |
+
Supported HuggingFace model classes:
|
| 339 |
+
- `GPT2LMHeadModel`
|
| 340 |
+
- `GPTNeoForCausalLM`
|
| 341 |
+
- `GPTNeoXForCausalLM`
|
| 342 |
+
- `GPTJForCausalLM`
|
| 343 |
+
- `BloomForCausalLM`
|
| 344 |
+
- `OPTForCausalLM`
|
| 345 |
+
|
| 346 |
+
Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
|
| 347 |
+
`generate` method and/or select underlying methods depending on the model class.
|
| 348 |
+
|
| 349 |
+
These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
|
| 350 |
+
|
| 351 |
+
Notes on training:
|
| 352 |
+
To actually train the converted model as a Prefix LM, training batches will need to indicate
|
| 353 |
+
the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
|
| 354 |
+
|
| 355 |
+
**This is not a standard input and requires custom layers either within or after your dataloader.**
|
| 356 |
+
|
| 357 |
+
In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
|
| 358 |
+
such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
|
| 359 |
+
That is, the prefix portion of the sequence should not generate any loss. Loss should only be
|
| 360 |
+
generated by the target portion of the sequence.
|
| 361 |
+
|
| 362 |
+
Notes on `GPTNeoForCausalLM`:
|
| 363 |
+
To simplify the implementation, "global" and "local" attention layers are handled differently.
|
| 364 |
+
For "global" layers, we handle conversion as described above. For "local" layers, which use a
|
| 365 |
+
causal attention mask within a restricted local window, we do not alter the masking.
|
| 366 |
+
|
| 367 |
+
Notes on `forward` method conversion:
|
| 368 |
+
After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
|
| 369 |
+
which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
|
| 370 |
+
belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
|
| 371 |
+
0 indicates token positions belonging to the target.
|
| 372 |
+
|
| 373 |
+
The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
|
| 374 |
+
causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
|
| 375 |
+
the causal masks before returning the result.
|
| 376 |
+
|
| 377 |
+
Notes on `generate` method conversion:
|
| 378 |
+
After conversion, the `generate` method will have the same signature but will internally
|
| 379 |
+
convert all causal masks to be purely bidirectional, call the original `generate` method, and
|
| 380 |
+
(where appropriate) reset the causal masks before returning the result.
|
| 381 |
+
|
| 382 |
+
This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
|
| 383 |
+
"prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
|
| 384 |
+
each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
|
| 385 |
+
another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
|
| 386 |
+
previously-generated tokens (also as expected in a Prefix LM).
|
| 387 |
+
|
| 388 |
+
To preserve the API, the original methods are renamed to `_original_forward` and
|
| 389 |
+
`_original_generate`, and replaced with new `forward` and `generate` methods that wrap
|
| 390 |
+
them, respectively. Although implementation details vary by model class.
|
| 391 |
+
"""
|
| 392 |
+
if isinstance(model, _SUPPORTED_GPT_MODELS):
|
| 393 |
+
return _convert_gpt_causal_lm_to_prefix_lm(model)
|
| 394 |
+
elif isinstance(model, BloomForCausalLM):
|
| 395 |
+
return _convert_bloom_causal_lm_to_prefix_lm(model)
|
| 396 |
+
elif isinstance(model, OPTForCausalLM):
|
| 397 |
+
return _convert_opt_causal_lm_to_prefix_lm(model)
|
| 398 |
+
else:
|
| 399 |
+
raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\n{_SUPPORTED_HF_MODELS}')
|
| 400 |
+
|
| 401 |
+
def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
|
| 402 |
+
"""Attempts to add bidirectional_mask to batch if missing.
|
| 403 |
+
|
| 404 |
+
Raises:
|
| 405 |
+
KeyError if bidirectional_mask is missing and can't be inferred
|
| 406 |
+
"""
|
| 407 |
+
if 'bidirectional_mask' not in batch:
|
| 408 |
+
if batch.get('mode', None) == 'icl_task':
|
| 409 |
+
batch['bidirectional_mask'] = batch['attention_mask'].clone()
|
| 410 |
+
for (i, continuation_indices) in enumerate(batch['continuation_indices']):
|
| 411 |
+
batch['bidirectional_mask'][i, continuation_indices] = 0
|
| 412 |
+
elif 'labels' in batch and 'attention_mask' in batch:
|
| 413 |
+
batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])
|
| 414 |
+
else:
|
| 415 |
+
raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/meta_init_context.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import contextmanager
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
@contextmanager
|
| 6 |
+
def init_empty_weights(include_buffers: bool=False):
|
| 7 |
+
"""Meta initialization context manager.
|
| 8 |
+
|
| 9 |
+
A context manager under which models are initialized with all parameters
|
| 10 |
+
on the meta device, therefore creating an empty model. Useful when just
|
| 11 |
+
initializing the model would blow the available RAM.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
|
| 15 |
+
not to also put all buffers on the meta device while initializing.
|
| 16 |
+
|
| 17 |
+
Example:
|
| 18 |
+
```python
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
|
| 21 |
+
# Initialize a model with 100 billions parameters in no time and without using any RAM.
|
| 22 |
+
with init_empty_weights():
|
| 23 |
+
tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
<Tip warning={true}>
|
| 27 |
+
|
| 28 |
+
Any model created under this context manager has no weights. As such you can't do something like
|
| 29 |
+
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
|
| 30 |
+
|
| 31 |
+
</Tip>
|
| 32 |
+
"""
|
| 33 |
+
with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
|
| 34 |
+
yield f
|
| 35 |
+
|
| 36 |
+
@contextmanager
|
| 37 |
+
def init_on_device(device: torch.device, include_buffers: bool=False):
|
| 38 |
+
"""Device initialization context manager.
|
| 39 |
+
|
| 40 |
+
A context manager under which models are initialized with all parameters
|
| 41 |
+
on the specified device.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
device (`torch.device`): Device to initialize all parameters on.
|
| 45 |
+
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
|
| 46 |
+
not to also put all buffers on the meta device while initializing.
|
| 47 |
+
|
| 48 |
+
Example:
|
| 49 |
+
```python
|
| 50 |
+
import torch.nn as nn
|
| 51 |
+
|
| 52 |
+
with init_on_device(device=torch.device("cuda")):
|
| 53 |
+
tst = nn.Liner(100, 100) # on `cuda` device
|
| 54 |
+
```
|
| 55 |
+
"""
|
| 56 |
+
old_register_parameter = nn.Module.register_parameter
|
| 57 |
+
if include_buffers:
|
| 58 |
+
old_register_buffer = nn.Module.register_buffer
|
| 59 |
+
|
| 60 |
+
def register_empty_parameter(module, name, param):
|
| 61 |
+
old_register_parameter(module, name, param)
|
| 62 |
+
if param is not None:
|
| 63 |
+
param_cls = type(module._parameters[name])
|
| 64 |
+
kwargs = module._parameters[name].__dict__
|
| 65 |
+
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
|
| 66 |
+
|
| 67 |
+
def register_empty_buffer(module, name, buffer):
|
| 68 |
+
old_register_buffer(module, name, buffer)
|
| 69 |
+
if buffer is not None:
|
| 70 |
+
module._buffers[name] = module._buffers[name].to(device)
|
| 71 |
+
if include_buffers:
|
| 72 |
+
tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
|
| 73 |
+
else:
|
| 74 |
+
tensor_constructors_to_patch = {}
|
| 75 |
+
|
| 76 |
+
def patch_tensor_constructor(fn):
|
| 77 |
+
|
| 78 |
+
def wrapper(*args, **kwargs):
|
| 79 |
+
kwargs['device'] = device
|
| 80 |
+
return fn(*args, **kwargs)
|
| 81 |
+
return wrapper
|
| 82 |
+
try:
|
| 83 |
+
nn.Module.register_parameter = register_empty_parameter
|
| 84 |
+
if include_buffers:
|
| 85 |
+
nn.Module.register_buffer = register_empty_buffer
|
| 86 |
+
for torch_function_name in tensor_constructors_to_patch.keys():
|
| 87 |
+
setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
|
| 88 |
+
yield
|
| 89 |
+
finally:
|
| 90 |
+
nn.Module.register_parameter = old_register_parameter
|
| 91 |
+
if include_buffers:
|
| 92 |
+
nn.Module.register_buffer = old_register_buffer
|
| 93 |
+
for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
|
| 94 |
+
setattr(torch, torch_function_name, old_torch_function)
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/modeling_mpt.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A simple, flexible implementation of a GPT model.
|
| 2 |
+
|
| 3 |
+
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
|
| 4 |
+
"""
|
| 5 |
+
import math
|
| 6 |
+
import warnings
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
|
| 12 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 13 |
+
from .attention import attn_bias_shape, build_attn_bias
|
| 14 |
+
from .blocks import MPTBlock
|
| 15 |
+
from .custom_embedding import SharedEmbedding
|
| 16 |
+
from .norm import NORM_CLASS_REGISTRY
|
| 17 |
+
from .configuration_mpt import MPTConfig
|
| 18 |
+
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
|
| 19 |
+
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
|
| 20 |
+
from .meta_init_context import init_empty_weights
|
| 21 |
+
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
|
| 22 |
+
try:
|
| 23 |
+
from .flash_attn_triton import flash_attn_func
|
| 24 |
+
except:
|
| 25 |
+
pass
|
| 26 |
+
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
|
| 27 |
+
|
| 28 |
+
class MPTPreTrainedModel(PreTrainedModel):
|
| 29 |
+
config_class = MPTConfig
|
| 30 |
+
base_model_prefix = 'model'
|
| 31 |
+
_no_split_modules = ['MPTBlock']
|
| 32 |
+
|
| 33 |
+
class MPTModel(MPTPreTrainedModel):
|
| 34 |
+
|
| 35 |
+
def __init__(self, config: MPTConfig):
|
| 36 |
+
config._validate_config()
|
| 37 |
+
super().__init__(config)
|
| 38 |
+
self.attn_impl = config.attn_config['attn_impl']
|
| 39 |
+
self.prefix_lm = config.attn_config['prefix_lm']
|
| 40 |
+
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
|
| 41 |
+
self.alibi = config.attn_config['alibi']
|
| 42 |
+
self.alibi_bias_max = config.attn_config['alibi_bias_max']
|
| 43 |
+
if config.init_device == 'mixed':
|
| 44 |
+
if dist.get_local_rank() == 0:
|
| 45 |
+
config.init_device = 'cpu'
|
| 46 |
+
else:
|
| 47 |
+
config.init_device = 'meta'
|
| 48 |
+
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
|
| 49 |
+
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
|
| 50 |
+
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
|
| 51 |
+
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
|
| 52 |
+
self.embedding_fraction = config.embedding_fraction
|
| 53 |
+
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
|
| 54 |
+
if not self.alibi:
|
| 55 |
+
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
|
| 56 |
+
self.emb_drop = nn.Dropout(config.emb_pdrop)
|
| 57 |
+
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
|
| 58 |
+
self.norm_f = norm_class(config.d_model, device=config.init_device)
|
| 59 |
+
if config.init_device != 'meta':
|
| 60 |
+
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
|
| 61 |
+
self.apply(self.param_init_fn)
|
| 62 |
+
self.is_causal = not self.prefix_lm
|
| 63 |
+
self._attn_bias_initialized = False
|
| 64 |
+
self.attn_bias = None
|
| 65 |
+
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
|
| 66 |
+
if config.no_bias:
|
| 67 |
+
for module in self.modules():
|
| 68 |
+
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
|
| 69 |
+
if config.verbose:
|
| 70 |
+
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
|
| 71 |
+
module.register_parameter('bias', None)
|
| 72 |
+
if config.verbose and config.verbose > 2:
|
| 73 |
+
print(self)
|
| 74 |
+
if 'verbose' not in self.config.init_config:
|
| 75 |
+
self.config.init_config['verbose'] = self.config.verbose
|
| 76 |
+
if self.config.init_config['verbose'] > 1:
|
| 77 |
+
init_fn_name = self.config.init_config['name']
|
| 78 |
+
warnings.warn(f'Using {init_fn_name} initialization.')
|
| 79 |
+
self.gradient_checkpointing = False
|
| 80 |
+
|
| 81 |
+
def get_input_embeddings(self):
|
| 82 |
+
return self.wte
|
| 83 |
+
|
| 84 |
+
def set_input_embeddings(self, value):
|
| 85 |
+
self.wte = value
|
| 86 |
+
|
| 87 |
+
@torch.no_grad()
|
| 88 |
+
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
|
| 89 |
+
if not self._attn_bias_initialized:
|
| 90 |
+
if self.attn_bias_shape:
|
| 91 |
+
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
|
| 92 |
+
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
|
| 93 |
+
self._attn_bias_initialized = True
|
| 94 |
+
if self.attn_impl == 'flash':
|
| 95 |
+
return (self.attn_bias, attention_mask)
|
| 96 |
+
if self.attn_bias is not None:
|
| 97 |
+
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
|
| 98 |
+
attn_bias = self.attn_bias
|
| 99 |
+
if self.prefix_lm:
|
| 100 |
+
assert isinstance(attn_bias, torch.Tensor)
|
| 101 |
+
assert isinstance(prefix_mask, torch.Tensor)
|
| 102 |
+
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
|
| 103 |
+
if self.attn_uses_sequence_id and sequence_id is not None:
|
| 104 |
+
assert isinstance(attn_bias, torch.Tensor)
|
| 105 |
+
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
|
| 106 |
+
if attention_mask is not None:
|
| 107 |
+
s_k = attention_mask.shape[-1]
|
| 108 |
+
if attn_bias is None:
|
| 109 |
+
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
|
| 110 |
+
else:
|
| 111 |
+
_s_k = max(0, attn_bias.size(-1) - s_k)
|
| 112 |
+
attn_bias = attn_bias[:, :, :, _s_k:]
|
| 113 |
+
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
|
| 114 |
+
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
|
| 115 |
+
min_val = torch.finfo(attn_bias.dtype).min
|
| 116 |
+
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
|
| 117 |
+
return (attn_bias, None)
|
| 118 |
+
|
| 119 |
+
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
|
| 120 |
+
(s_k, s_q) = attn_bias.shape[-2:]
|
| 121 |
+
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
|
| 122 |
+
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
|
| 123 |
+
seq_len = prefix_mask.shape[-1]
|
| 124 |
+
if seq_len > self.config.max_seq_len:
|
| 125 |
+
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
|
| 126 |
+
attn_bias = attn_bias[..., :seq_len, :seq_len]
|
| 127 |
+
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
|
| 128 |
+
prefix = prefix_mask.view(-1, 1, 1, seq_len)
|
| 129 |
+
cannot_attend = ~torch.logical_or(causal, prefix.bool())
|
| 130 |
+
min_val = torch.finfo(attn_bias.dtype).min
|
| 131 |
+
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
| 132 |
+
return attn_bias
|
| 133 |
+
|
| 134 |
+
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
|
| 135 |
+
seq_len = sequence_id.shape[-1]
|
| 136 |
+
if seq_len > self.config.max_seq_len:
|
| 137 |
+
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
|
| 138 |
+
attn_bias = attn_bias[..., :seq_len, :seq_len]
|
| 139 |
+
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
|
| 140 |
+
min_val = torch.finfo(attn_bias.dtype).min
|
| 141 |
+
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
| 142 |
+
return attn_bias
|
| 143 |
+
|
| 144 |
+
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
|
| 145 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 146 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 147 |
+
if attention_mask is not None:
|
| 148 |
+
attention_mask = attention_mask.bool()
|
| 149 |
+
if prefix_mask is not None:
|
| 150 |
+
prefix_mask = prefix_mask.bool()
|
| 151 |
+
if not return_dict:
|
| 152 |
+
raise NotImplementedError('return_dict False is not implemented yet for MPT')
|
| 153 |
+
if output_attentions:
|
| 154 |
+
if self.attn_impl != 'torch':
|
| 155 |
+
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
|
| 156 |
+
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
|
| 157 |
+
raise NotImplementedError('MPT does not support training with left padding.')
|
| 158 |
+
if self.prefix_lm and prefix_mask is None:
|
| 159 |
+
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
|
| 160 |
+
if self.training:
|
| 161 |
+
if self.attn_uses_sequence_id and sequence_id is None:
|
| 162 |
+
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
|
| 163 |
+
elif self.attn_uses_sequence_id is False and sequence_id is not None:
|
| 164 |
+
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
|
| 165 |
+
if input_ids is not None:
|
| 166 |
+
S = input_ids.size(1)
|
| 167 |
+
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
|
| 168 |
+
tok_emb = self.wte(input_ids)
|
| 169 |
+
else:
|
| 170 |
+
assert inputs_embeds is not None
|
| 171 |
+
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
|
| 172 |
+
S = inputs_embeds.size(1)
|
| 173 |
+
tok_emb = inputs_embeds
|
| 174 |
+
if self.alibi:
|
| 175 |
+
x = tok_emb
|
| 176 |
+
else:
|
| 177 |
+
past_position = 0
|
| 178 |
+
if past_key_values is not None:
|
| 179 |
+
if len(past_key_values) != self.config.n_layers:
|
| 180 |
+
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
|
| 181 |
+
past_position = past_key_values[0][0].size(1)
|
| 182 |
+
if self.attn_impl == 'torch':
|
| 183 |
+
past_position = past_key_values[0][0].size(3)
|
| 184 |
+
if S + past_position > self.config.max_seq_len:
|
| 185 |
+
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
|
| 186 |
+
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
|
| 187 |
+
if attention_mask is not None:
|
| 188 |
+
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
|
| 189 |
+
pos_emb = self.wpe(pos)
|
| 190 |
+
x = tok_emb + pos_emb
|
| 191 |
+
if self.embedding_fraction == 1:
|
| 192 |
+
x = self.emb_drop(x)
|
| 193 |
+
else:
|
| 194 |
+
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
|
| 195 |
+
assert isinstance(self.emb_drop, nn.Module)
|
| 196 |
+
x = self.emb_drop(x_shrunk)
|
| 197 |
+
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
|
| 198 |
+
if use_cache and past_key_values is None:
|
| 199 |
+
past_key_values = [() for _ in range(self.config.n_layers)]
|
| 200 |
+
all_hidden_states = () if output_hidden_states else None
|
| 201 |
+
all_self_attns = () if output_attentions else None
|
| 202 |
+
for (b_idx, block) in enumerate(self.blocks):
|
| 203 |
+
if output_hidden_states:
|
| 204 |
+
assert all_hidden_states is not None
|
| 205 |
+
all_hidden_states = all_hidden_states + (x,)
|
| 206 |
+
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
|
| 207 |
+
if self.gradient_checkpointing and self.training:
|
| 208 |
+
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
|
| 209 |
+
else:
|
| 210 |
+
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
|
| 211 |
+
if past_key_values is not None:
|
| 212 |
+
past_key_values[b_idx] = past_key_value
|
| 213 |
+
if output_attentions:
|
| 214 |
+
assert all_self_attns is not None
|
| 215 |
+
all_self_attns = all_self_attns + (attn_weights,)
|
| 216 |
+
x = self.norm_f(x)
|
| 217 |
+
if output_hidden_states:
|
| 218 |
+
assert all_hidden_states is not None
|
| 219 |
+
all_hidden_states = all_hidden_states + (x,)
|
| 220 |
+
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
|
| 221 |
+
|
| 222 |
+
def param_init_fn(self, module):
|
| 223 |
+
init_fn_name = self.config.init_config['name']
|
| 224 |
+
MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
|
| 225 |
+
|
| 226 |
+
def fsdp_wrap_fn(self, module):
|
| 227 |
+
return isinstance(module, MPTBlock)
|
| 228 |
+
|
| 229 |
+
def activation_checkpointing_fn(self, module):
|
| 230 |
+
return isinstance(module, MPTBlock)
|
| 231 |
+
|
| 232 |
+
class MPTForCausalLM(MPTPreTrainedModel):
|
| 233 |
+
|
| 234 |
+
def __init__(self, config: MPTConfig):
|
| 235 |
+
super().__init__(config)
|
| 236 |
+
if not config.tie_word_embeddings:
|
| 237 |
+
raise ValueError('MPTForCausalLM only supports tied word embeddings')
|
| 238 |
+
print(f'Instantiating an MPTForCausalLM model from {__file__}')
|
| 239 |
+
self.transformer = MPTModel(config)
|
| 240 |
+
for child in self.transformer.children():
|
| 241 |
+
if isinstance(child, torch.nn.ModuleList):
|
| 242 |
+
continue
|
| 243 |
+
if isinstance(child, torch.nn.Module):
|
| 244 |
+
child._fsdp_wrap = True
|
| 245 |
+
self.logit_scale = None
|
| 246 |
+
if config.logit_scale is not None:
|
| 247 |
+
logit_scale = config.logit_scale
|
| 248 |
+
if isinstance(logit_scale, str):
|
| 249 |
+
if logit_scale == 'inv_sqrt_d_model':
|
| 250 |
+
logit_scale = 1 / math.sqrt(config.d_model)
|
| 251 |
+
else:
|
| 252 |
+
raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
|
| 253 |
+
self.logit_scale = logit_scale
|
| 254 |
+
|
| 255 |
+
def get_input_embeddings(self):
|
| 256 |
+
return self.transformer.wte
|
| 257 |
+
|
| 258 |
+
def set_input_embeddings(self, value):
|
| 259 |
+
self.transformer.wte = value
|
| 260 |
+
|
| 261 |
+
def get_output_embeddings(self):
|
| 262 |
+
return self.transformer.wte
|
| 263 |
+
|
| 264 |
+
def set_output_embeddings(self, new_embeddings):
|
| 265 |
+
self.transformer.wte = new_embeddings
|
| 266 |
+
|
| 267 |
+
def set_decoder(self, decoder):
|
| 268 |
+
self.transformer = decoder
|
| 269 |
+
|
| 270 |
+
def get_decoder(self):
|
| 271 |
+
return self.transformer
|
| 272 |
+
|
| 273 |
+
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None):
|
| 274 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
| 275 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 276 |
+
if inputs_embeds is not None:
|
| 277 |
+
raise NotImplementedError('inputs_embeds has to be None (for hf/peft support).')
|
| 278 |
+
outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
|
| 279 |
+
logits = self.transformer.wte(outputs.last_hidden_state.to(self.transformer.wte.weight.device), True)
|
| 280 |
+
if self.logit_scale is not None:
|
| 281 |
+
if self.logit_scale == 0:
|
| 282 |
+
warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
|
| 283 |
+
logits *= self.logit_scale
|
| 284 |
+
loss = None
|
| 285 |
+
if labels is not None:
|
| 286 |
+
labels = torch.roll(labels, shifts=-1)
|
| 287 |
+
labels[:, -1] = -100
|
| 288 |
+
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
|
| 289 |
+
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
| 290 |
+
|
| 291 |
+
def param_init_fn(self, module):
|
| 292 |
+
init_fn_name = self.config.init_config['name']
|
| 293 |
+
MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
|
| 294 |
+
|
| 295 |
+
def fsdp_wrap_fn(self, module):
|
| 296 |
+
return isinstance(module, MPTBlock)
|
| 297 |
+
|
| 298 |
+
def activation_checkpointing_fn(self, module):
|
| 299 |
+
return isinstance(module, MPTBlock)
|
| 300 |
+
|
| 301 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
| 302 |
+
if inputs_embeds is not None:
|
| 303 |
+
raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
|
| 304 |
+
attention_mask = kwargs['attention_mask'].bool()
|
| 305 |
+
if attention_mask[:, -1].sum() != attention_mask.shape[0]:
|
| 306 |
+
raise NotImplementedError('MPT does not support generation with right padding.')
|
| 307 |
+
if self.transformer.attn_uses_sequence_id and self.training:
|
| 308 |
+
sequence_id = torch.zeros_like(input_ids[:1])
|
| 309 |
+
else:
|
| 310 |
+
sequence_id = None
|
| 311 |
+
if past_key_values is not None:
|
| 312 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
| 313 |
+
if self.transformer.prefix_lm:
|
| 314 |
+
prefix_mask = torch.ones_like(attention_mask)
|
| 315 |
+
if kwargs.get('use_cache') == False:
|
| 316 |
+
raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
|
| 317 |
+
else:
|
| 318 |
+
prefix_mask = None
|
| 319 |
+
return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)}
|
| 320 |
+
|
| 321 |
+
@staticmethod
|
| 322 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 323 |
+
"""Used by HuggingFace generate when using beam search with kv-caching.
|
| 324 |
+
|
| 325 |
+
See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
|
| 326 |
+
for an example in transformers.
|
| 327 |
+
"""
|
| 328 |
+
reordered_past = []
|
| 329 |
+
for layer_past in past_key_values:
|
| 330 |
+
reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
|
| 331 |
+
return reordered_past
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/norm.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
def _cast_if_autocast_enabled(tensor):
|
| 4 |
+
if torch.is_autocast_enabled():
|
| 5 |
+
if tensor.device.type == 'cuda':
|
| 6 |
+
dtype = torch.get_autocast_gpu_dtype()
|
| 7 |
+
elif tensor.device.type == 'cpu':
|
| 8 |
+
dtype = torch.get_autocast_cpu_dtype()
|
| 9 |
+
else:
|
| 10 |
+
raise NotImplementedError()
|
| 11 |
+
return tensor.to(dtype=dtype)
|
| 12 |
+
return tensor
|
| 13 |
+
|
| 14 |
+
class LPLayerNorm(torch.nn.LayerNorm):
|
| 15 |
+
|
| 16 |
+
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
|
| 17 |
+
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
module_device = x.device
|
| 21 |
+
downcast_x = _cast_if_autocast_enabled(x)
|
| 22 |
+
downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
|
| 23 |
+
downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
|
| 24 |
+
with torch.autocast(enabled=False, device_type=module_device.type):
|
| 25 |
+
return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
|
| 26 |
+
|
| 27 |
+
def rms_norm(x, weight=None, eps=1e-05):
|
| 28 |
+
output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
|
| 29 |
+
if weight is not None:
|
| 30 |
+
return output * weight
|
| 31 |
+
return output
|
| 32 |
+
|
| 33 |
+
class RMSNorm(torch.nn.Module):
|
| 34 |
+
|
| 35 |
+
def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.eps = eps
|
| 38 |
+
if weight:
|
| 39 |
+
self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
|
| 40 |
+
else:
|
| 41 |
+
self.register_parameter('weight', None)
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
|
| 45 |
+
|
| 46 |
+
class LPRMSNorm(RMSNorm):
|
| 47 |
+
|
| 48 |
+
def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
|
| 49 |
+
super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
downcast_x = _cast_if_autocast_enabled(x)
|
| 53 |
+
downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
|
| 54 |
+
with torch.autocast(enabled=False, device_type=x.device.type):
|
| 55 |
+
return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
|
| 56 |
+
NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}
|
InternVL/internvl_chat_llava/llava/model/language_model/mpt/param_init_fns.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import warnings
|
| 3 |
+
from collections.abc import Sequence
|
| 4 |
+
from functools import partial
|
| 5 |
+
from typing import Optional, Tuple, Union
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from .norm import NORM_CLASS_REGISTRY
|
| 9 |
+
|
| 10 |
+
def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs):
|
| 11 |
+
del kwargs
|
| 12 |
+
if verbose > 1:
|
| 13 |
+
warnings.warn(f"Initializing network using module's reset_parameters attribute")
|
| 14 |
+
if hasattr(module, 'reset_parameters'):
|
| 15 |
+
module.reset_parameters()
|
| 16 |
+
|
| 17 |
+
def fused_init_helper_(module: nn.Module, init_fn_):
|
| 18 |
+
_fused = getattr(module, '_fused', None)
|
| 19 |
+
if _fused is None:
|
| 20 |
+
raise RuntimeError(f'Internal logic error')
|
| 21 |
+
(dim, splits) = _fused
|
| 22 |
+
splits = (0, *splits, module.weight.size(dim))
|
| 23 |
+
for (s, e) in zip(splits[:-1], splits[1:]):
|
| 24 |
+
slice_indices = [slice(None)] * module.weight.ndim
|
| 25 |
+
slice_indices[dim] = slice(s, e)
|
| 26 |
+
init_fn_(module.weight[slice_indices])
|
| 27 |
+
|
| 28 |
+
def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
|
| 29 |
+
del kwargs
|
| 30 |
+
if verbose > 1:
|
| 31 |
+
warnings.warn(f'If model has bias parameters they are initialized to 0.')
|
| 32 |
+
init_div_is_residual = init_div_is_residual
|
| 33 |
+
if init_div_is_residual is False:
|
| 34 |
+
div_is_residual = 1.0
|
| 35 |
+
elif init_div_is_residual is True:
|
| 36 |
+
div_is_residual = math.sqrt(2 * n_layers)
|
| 37 |
+
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
|
| 38 |
+
div_is_residual = init_div_is_residual
|
| 39 |
+
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
|
| 40 |
+
div_is_residual = float(init_div_is_residual)
|
| 41 |
+
else:
|
| 42 |
+
div_is_residual = 1.0
|
| 43 |
+
raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')
|
| 44 |
+
if init_div_is_residual is not False:
|
| 45 |
+
if verbose > 1:
|
| 46 |
+
warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')
|
| 47 |
+
if isinstance(module, nn.Linear):
|
| 48 |
+
if hasattr(module, '_fused'):
|
| 49 |
+
fused_init_helper_(module, init_fn_)
|
| 50 |
+
else:
|
| 51 |
+
init_fn_(module.weight)
|
| 52 |
+
if module.bias is not None:
|
| 53 |
+
torch.nn.init.zeros_(module.bias)
|
| 54 |
+
if init_div_is_residual is not False and getattr(module, '_is_residual', False):
|
| 55 |
+
with torch.no_grad():
|
| 56 |
+
module.weight.div_(div_is_residual)
|
| 57 |
+
elif isinstance(module, nn.Embedding):
|
| 58 |
+
if emb_init_std is not None:
|
| 59 |
+
std = emb_init_std
|
| 60 |
+
if std == 0:
|
| 61 |
+
warnings.warn(f'Embedding layer initialized to 0.')
|
| 62 |
+
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
|
| 63 |
+
if verbose > 1:
|
| 64 |
+
warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')
|
| 65 |
+
elif emb_init_uniform_lim is not None:
|
| 66 |
+
lim = emb_init_uniform_lim
|
| 67 |
+
if isinstance(lim, Sequence):
|
| 68 |
+
if len(lim) > 2:
|
| 69 |
+
raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')
|
| 70 |
+
if lim[0] == lim[1]:
|
| 71 |
+
warnings.warn(f'Embedding layer initialized to {lim[0]}.')
|
| 72 |
+
else:
|
| 73 |
+
if lim == 0:
|
| 74 |
+
warnings.warn(f'Embedding layer initialized to 0.')
|
| 75 |
+
lim = [-lim, lim]
|
| 76 |
+
(a, b) = lim
|
| 77 |
+
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
|
| 78 |
+
if verbose > 1:
|
| 79 |
+
warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')
|
| 80 |
+
else:
|
| 81 |
+
emb_init_fn_ = init_fn_
|
| 82 |
+
emb_init_fn_(module.weight)
|
| 83 |
+
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
|
| 84 |
+
if verbose > 1:
|
| 85 |
+
warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')
|
| 86 |
+
if hasattr(module, 'weight') and module.weight is not None:
|
| 87 |
+
torch.nn.init.ones_(module.weight)
|
| 88 |
+
if hasattr(module, 'bias') and module.bias is not None:
|
| 89 |
+
torch.nn.init.zeros_(module.bias)
|
| 90 |
+
elif isinstance(module, nn.MultiheadAttention):
|
| 91 |
+
if module._qkv_same_embed_dim:
|
| 92 |
+
assert module.in_proj_weight is not None
|
| 93 |
+
assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
|
| 94 |
+
assert d_model is not None
|
| 95 |
+
_d = d_model
|
| 96 |
+
splits = (0, _d, 2 * _d, 3 * _d)
|
| 97 |
+
for (s, e) in zip(splits[:-1], splits[1:]):
|
| 98 |
+
init_fn_(module.in_proj_weight[s:e])
|
| 99 |
+
else:
|
| 100 |
+
assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
|
| 101 |
+
assert module.in_proj_weight is None
|
| 102 |
+
init_fn_(module.q_proj_weight)
|
| 103 |
+
init_fn_(module.k_proj_weight)
|
| 104 |
+
init_fn_(module.v_proj_weight)
|
| 105 |
+
if module.in_proj_bias is not None:
|
| 106 |
+
torch.nn.init.zeros_(module.in_proj_bias)
|
| 107 |
+
if module.bias_k is not None:
|
| 108 |
+
torch.nn.init.zeros_(module.bias_k)
|
| 109 |
+
if module.bias_v is not None:
|
| 110 |
+
torch.nn.init.zeros_(module.bias_v)
|
| 111 |
+
init_fn_(module.out_proj.weight)
|
| 112 |
+
if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):
|
| 113 |
+
with torch.no_grad():
|
| 114 |
+
module.out_proj.weight.div_(div_is_residual)
|
| 115 |
+
if module.out_proj.bias is not None:
|
| 116 |
+
torch.nn.init.zeros_(module.out_proj.bias)
|
| 117 |
+
else:
|
| 118 |
+
for _ in module.parameters(recurse=False):
|
| 119 |
+
raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')
|
| 120 |
+
|
| 121 |
+
def _normal_init_(std, mean=0.0):
|
| 122 |
+
return partial(torch.nn.init.normal_, mean=mean, std=std)
|
| 123 |
+
|
| 124 |
+
def _normal_param_init_fn_(module: nn.Module, std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
|
| 125 |
+
del kwargs
|
| 126 |
+
init_fn_ = _normal_init_(std=std)
|
| 127 |
+
if verbose > 1:
|
| 128 |
+
warnings.warn(f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}')
|
| 129 |
+
generic_param_init_fn_(module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 130 |
+
|
| 131 |
+
def baseline_param_init_fn_(module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
|
| 132 |
+
del kwargs
|
| 133 |
+
if init_std is None:
|
| 134 |
+
raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.")
|
| 135 |
+
_normal_param_init_fn_(module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 136 |
+
|
| 137 |
+
def small_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
|
| 138 |
+
del kwargs
|
| 139 |
+
std = math.sqrt(2 / (5 * d_model))
|
| 140 |
+
_normal_param_init_fn_(module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 141 |
+
|
| 142 |
+
def neox_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
|
| 143 |
+
"""From section 2.3.1 of GPT-NeoX-20B:
|
| 144 |
+
|
| 145 |
+
An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
|
| 146 |
+
see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
|
| 147 |
+
and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
|
| 148 |
+
"""
|
| 149 |
+
del kwargs
|
| 150 |
+
residual_div = n_layers / math.sqrt(10)
|
| 151 |
+
if verbose > 1:
|
| 152 |
+
warnings.warn(f'setting init_div_is_residual to {residual_div}')
|
| 153 |
+
small_param_init_fn_(module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 154 |
+
|
| 155 |
+
def kaiming_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
|
| 156 |
+
del kwargs
|
| 157 |
+
if verbose > 1:
|
| 158 |
+
warnings.warn(f'Using nn.init.kaiming_uniform_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
|
| 159 |
+
kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
|
| 160 |
+
generic_param_init_fn_(module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 161 |
+
|
| 162 |
+
def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
|
| 163 |
+
del kwargs
|
| 164 |
+
if verbose > 1:
|
| 165 |
+
warnings.warn(f'Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
|
| 166 |
+
kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
|
| 167 |
+
generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 168 |
+
|
| 169 |
+
def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
|
| 170 |
+
del kwargs
|
| 171 |
+
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
|
| 172 |
+
if verbose > 1:
|
| 173 |
+
warnings.warn(f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}')
|
| 174 |
+
generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 175 |
+
|
| 176 |
+
def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
|
| 177 |
+
xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
|
| 178 |
+
if verbose > 1:
|
| 179 |
+
warnings.warn(f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' + f'gain={init_gain}')
|
| 180 |
+
generic_param_init_fn_(module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
| 181 |
+
MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}
|
InternVL/internvl_chat_llava/llava/model/multimodal_encoder/builder.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from .clip_encoder import CLIPVisionTower
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def build_vision_tower(vision_tower_cfg, **kwargs):
|
| 6 |
+
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
|
| 7 |
+
is_absolute_path_exists = os.path.exists(vision_tower)
|
| 8 |
+
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion") \
|
| 9 |
+
or "intern" in vision_tower.lower():
|
| 10 |
+
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
|
| 11 |
+
|
| 12 |
+
raise ValueError(f'Unknown vision tower: {vision_tower}')
|