Viglong commited on
Commit
df2d698
·
verified ·
1 Parent(s): 8b97fb1

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. checkpoints/DrvtFTPP_G_projectors/A/best.pt +3 -0
  2. checkpoints/DrvtFTPP_G_projectors/AT/best.pt +3 -0
  3. checkpoints/DrvtFTPP_G_projectors/AV/best.pt +3 -0
  4. checkpoints/DrvtFTPP_G_projectors/T/best.pt +3 -0
  5. checkpoints/DrvtFTPP_G_projectors/TV/best.pt +3 -0
  6. checkpoints/DrvtFTPP_G_projectors/V/best.pt +3 -0
  7. checkpoints/DrvtFTPP_G_projectors/mix/best.pt +3 -0
  8. checkpoints/DrvtFTPP_M_projectors/A/best.pt +3 -0
  9. checkpoints/DrvtFTPP_M_projectors/AT/best.pt +3 -0
  10. checkpoints/DrvtFTPP_M_projectors/AV/best.pt +3 -0
  11. checkpoints/DrvtFTPP_M_projectors/T/best.pt +3 -0
  12. checkpoints/DrvtFTPP_M_projectors/TV/best.pt +3 -0
  13. checkpoints/DrvtFTPP_M_projectors/V/best.pt +3 -0
  14. checkpoints/DrvtFTPP_M_projectors/mix/best.pt +3 -0
  15. checkpoints/DrvtFT_audio_with_head.pt +3 -0
  16. checkpoints/Drvt_projectors/A/best.pt +3 -0
  17. checkpoints/Drvt_projectors/AT/best.pt +3 -0
  18. checkpoints/Drvt_projectors/AV/best.pt +3 -0
  19. checkpoints/Drvt_projectors/T/best.pt +3 -0
  20. checkpoints/Drvt_projectors/TV/best.pt +3 -0
  21. checkpoints/Drvt_projectors/V/best.pt +3 -0
  22. checkpoints/Drvt_projectors/mix/best.pt +3 -0
  23. checkpoints/Drvt_projectors_mini/A/best.pt +3 -0
  24. checkpoints/Drvt_projectors_mini/AT/best.pt +3 -0
  25. checkpoints/Drvt_projectors_mini/AV/best.pt +3 -0
  26. checkpoints/Drvt_projectors_mini/T/best.pt +3 -0
  27. checkpoints/Drvt_projectors_mini/TV/best.pt +3 -0
  28. checkpoints/Drvt_projectors_mini/V/best.pt +3 -0
  29. checkpoints/Drvt_projectors_mini/mix/best.pt +3 -0
  30. checkpoints/IBPP_G_projectors/A/best.pt +3 -0
  31. checkpoints/IBPP_G_projectors/AT/best.pt +3 -0
  32. checkpoints/IBPP_G_projectors/AV/best.pt +3 -0
  33. checkpoints/IBPP_G_projectors/T/best.pt +3 -0
  34. checkpoints/IBPP_G_projectors/TV/best.pt +3 -0
  35. checkpoints/IBPP_G_projectors/V/best.pt +3 -0
  36. checkpoints/IBPP_G_projectors/mix/best.pt +3 -0
  37. checkpoints/IBPP_M_projectors/A/best.pt +3 -0
  38. checkpoints/IBPP_M_projectors/AT/best.pt +3 -0
  39. checkpoints/IBPP_M_projectors/AV/best.pt +3 -0
  40. checkpoints/IBPP_M_projectors/T/best.pt +3 -0
  41. checkpoints/IBPP_M_projectors/TV/best.pt +3 -0
  42. checkpoints/IBPP_M_projectors/V/best.pt +3 -0
  43. checkpoints/IBPP_M_projectors/mix/best.pt +3 -0
  44. checkpoints/InternVL-14B-224px/README.md +123 -0
  45. checkpoints/InternVL-14B-224px/__init__.py +87 -0
  46. checkpoints/InternVL-14B-224px/config.json +190 -0
  47. checkpoints/InternVL-14B-224px/configuration_intern_vit.py +117 -0
  48. checkpoints/InternVL-14B-224px/configuration_internvl.py +108 -0
  49. checkpoints/InternVL-14B-224px/flash_attention.py +76 -0
  50. checkpoints/InternVL-14B-224px/modeling_intern_vit.py +342 -0
checkpoints/DrvtFTPP_G_projectors/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cdd907f3f521c1c67d5bc159ff8c1cf52302d8650061103982fd3a2e53867e0
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:337f7af1ac4d3614875186a0f37d4e750a45b26d4339deac7b134479f880bcca
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2b68c599e0f294c0c18887ba07ec6b5e0e5b8bc267ab63478bc85f77aeda7cf
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1245490845c3791afa5fb4a1e94f0552930fc62bb1ec8c029bef3d188eba3800
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07804fc20484736479a6dff15f56b7ac7aa4dd4390db8791ba382ccba53541cb
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0abc3db3f0e140926ea8cc191133649cf1bf80a13af8056c67a17783658a76e9
3
+ size 10545547
checkpoints/DrvtFTPP_G_projectors/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88abd54902a0ea226ea9a40dd6f48e0d8fac9d8c5fe5f0803280f0e950193c4d
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cad30b7e2e96c8faeaf5b7444f58554d29103562b1457ccc8fe17e4044c4514
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9083b09b55752511aa063ae8ca65f7cf4c229ad6cec128c88b48d41aa9afaaa9
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c11d366946d3910ecfe82544479018f03021a11706247e9a1f2eb29feddb656
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351510cf2ab34afef47c520a7b333c523dae419902c135f703f4d3d7bf65c580
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:353282f8329555203cffd6609c7a525ecd4ab05fd304ace739a13ede3ffeea8d
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3edbbcbfedff32adaf7780437b994a40a0cba8790364b3236923f4b046cfee5d
3
+ size 10545547
checkpoints/DrvtFTPP_M_projectors/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06dfc8d6f8213cb88c2893da3b0eb7692ab519b10a19c712fb4f8f2600ab3aa9
3
+ size 10545547
checkpoints/DrvtFT_audio_with_head.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92eed730638818bc3a30303890157c4261c34b55ed5b7629d543744dfb0db3a3
3
+ size 448190294
checkpoints/Drvt_projectors/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef16fc2c4ada977d51114bd7965e622a01a6048e84bfa8929d71fc494171f46
3
+ size 23137715
checkpoints/Drvt_projectors/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb6a1a3c302cfad5eaf522789969d640890a0a8a0b66769791064c72d3ecf766
3
+ size 23137715
checkpoints/Drvt_projectors/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b50cae8217774821a206fc08e323ccf6c6eb64b9dbcd9e77e4014dd6d2da4530
3
+ size 23137715
checkpoints/Drvt_projectors/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c01a024f4b919a665967b9e10db02765710453df297c74401243c25aacdee9c5
3
+ size 23137715
checkpoints/Drvt_projectors/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42aec0545ea9e67a94449d1f1d5e378bc989a3f95f0ab506e39dfeb67c549aa4
3
+ size 23137715
checkpoints/Drvt_projectors/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:060286c55b887bc0906b27da771565a109de8a0c55c317389ca698c715ba594f
3
+ size 23137715
checkpoints/Drvt_projectors/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b817758bbf078bad9c0753988860e976c969c1e91516aafc015d0c66f10e8417
3
+ size 23137715
checkpoints/Drvt_projectors_mini/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585a6c5093e6fe0ef79c147f741b262667623d018f9b4ac05b79b21a32ef3ef7
3
+ size 14739851
checkpoints/Drvt_projectors_mini/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e72e12d0bf3f4c54139b2d0fa463f2c8a9a19d45feb879029713a4833c7e5ce6
3
+ size 14739851
checkpoints/Drvt_projectors_mini/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17122f541cacc387bd42e17d837a3cc92d1480090c73e5680482639164c401e7
3
+ size 14739851
checkpoints/Drvt_projectors_mini/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cc41b0abb0438b7d9f4ea310a8c1c8377025c81717bcb5282fbc907416614e8
3
+ size 14739851
checkpoints/Drvt_projectors_mini/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11a6333df4c641f428c37c5d8fe4518d9793988c5fddd2293e2df57d09b9de18
3
+ size 14739851
checkpoints/Drvt_projectors_mini/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c7785c2f204b5245a4c59bdf7e14ba18a2126430a74da4124eea7bc76cc255
3
+ size 14739851
checkpoints/Drvt_projectors_mini/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:223c95fbd7aa7b938b816fd74d15706ed315b8e538233b09facf2aa5526983fa
3
+ size 14739851
checkpoints/IBPP_G_projectors/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8681003740d414c7d4296250fe5e007ad5564f7192e8c91e4e91a08267b0df8
3
+ size 12647819
checkpoints/IBPP_G_projectors/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12bb18d3370f0bb4b990db334fa0ab20914ef0a1be11bf8ce0fa7e9b42f6b9c7
3
+ size 12647819
checkpoints/IBPP_G_projectors/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d71dc39ebdb6fc1dd7a6bc10e01d2d0f94d4202934ad293ba3d0c888a81e01c4
3
+ size 12647819
checkpoints/IBPP_G_projectors/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7004adee9d2200cf8d03598b356477b85ac79afd78e5125fb21003f56c7c2f
3
+ size 12647819
checkpoints/IBPP_G_projectors/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d180f7b38df7f9973e7baf06393c46c2fed23f74485a7b57ebf23f139cc406f7
3
+ size 12647819
checkpoints/IBPP_G_projectors/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:117ee65328706d7ab310a59a303656ff6114d23f822b97a86531b843fdf88976
3
+ size 12647819
checkpoints/IBPP_G_projectors/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf38dd5fe3e36ebfddc7f589738e6d3d4d7e983bdf7d3f2af2a7c4bbfb5edd60
3
+ size 12647819
checkpoints/IBPP_M_projectors/A/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6e262081db30edb8150051563b0e7da007a925eadb01fb94d4349572da80649
3
+ size 12647819
checkpoints/IBPP_M_projectors/AT/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e765e8916f0ccffa9d0d0199ff4936135bd28c488e392b29237e0d6858203cca
3
+ size 12647819
checkpoints/IBPP_M_projectors/AV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a80860d133cfb7c50c2419481b8dee1d98ab28e604d56ea5d14e7dd6ec16784c
3
+ size 12647819
checkpoints/IBPP_M_projectors/T/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f2e5ee0ca99f0070ce6660c46e20829e615db8b2d3a0010ac64d1b830808951
3
+ size 12647819
checkpoints/IBPP_M_projectors/TV/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5482c7a73ca56b9b16b746d61c03b20279cebf049a9c00403577b2ab0adebddb
3
+ size 12647819
checkpoints/IBPP_M_projectors/V/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86bb1c9bd9cc69db0447404482bba2fe7d5817db4b68cf5a5540e805563fd844
3
+ size 12647819
checkpoints/IBPP_M_projectors/mix/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6e05d23288d534b5a8ee4eeedd27cfda6fb98cfd211848744fc38a12cb3ab4
3
+ size 12647819
checkpoints/InternVL-14B-224px/README.md ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - laion/laion2B-en
5
+ - laion/laion-coco
6
+ - laion/laion2B-multi
7
+ - kakaobrain/coyo-700m
8
+ - conceptual_captions
9
+ - wanng/wukong100m
10
+ ---
11
+
12
+ # Model Card for InternVL-14B-224px
13
+
14
+ ## What is InternVL?
15
+
16
+ \[[Paper](https://arxiv.org/abs/2312.14238)\] \[[GitHub](https://github.com/OpenGVLab/InternVL)\]
17
+
18
+ InternVL scales up the ViT to _**6B parameters**_ and aligns it with LLM.
19
+
20
+ It is _**the largest open-source vision/vision-language foundation model (14B)**_ to date, achieving _**32 state-of-the-art**_ performances on a wide range of tasks such as visual perception, cross-modal retrieval, multimodal dialogue, etc.
21
+
22
+
23
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/f1jTYvyxyYbHRalvgtKY2.png)
24
+
25
+
26
+ ## Model Details
27
+ - **Model Type:** vision-language foundation model
28
+ - **Model Stats:**
29
+ - Params: 14B
30
+ - Image size: 224 x 224
31
+ - **Pretrain Dataset:** LAION-en, LAION-COCO, COYO, CC12M, CC3M, SBU, Wukong, LAION-multi
32
+
33
+ ## Model Usage
34
+
35
+ ```python
36
+ import torch
37
+ from PIL import Image
38
+ from transformers import AutoModel, CLIPImageProcessor
39
+ from transformers import AutoTokenizer
40
+
41
+
42
+ model = AutoModel.from_pretrained(
43
+ 'OpenGVLab/InternVL-14B-224px',
44
+ torch_dtype=torch.bfloat16,
45
+ low_cpu_mem_usage=True,
46
+ trust_remote_code=True).cuda().eval()
47
+
48
+ image_processor = CLIPImageProcessor.from_pretrained('OpenGVLab/InternVL-14B-224px')
49
+
50
+ tokenizer = AutoTokenizer.from_pretrained(
51
+ 'OpenGVLab/InternVL-14B-224px', use_fast=False, add_eos_token=True)
52
+ tokenizer.pad_token_id = 0 # set pad_token_id to 0
53
+
54
+ images = [
55
+ Image.open('./examples/image1.jpg').convert('RGB'),
56
+ Image.open('./examples/image2.jpg').convert('RGB'),
57
+ Image.open('./examples/image3.jpg').convert('RGB')
58
+ ]
59
+ prefix = 'summarize:'
60
+ texts = [
61
+ prefix + 'a photo of a red panda', # English
62
+ prefix + '一张熊猫的照片', # Chinese
63
+ prefix + '二匹の猫の写真' # Japanese
64
+ ]
65
+
66
+ pixel_values = image_processor(images=images, return_tensors='pt').pixel_values
67
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
68
+ input_ids = tokenizer(texts, return_tensors='pt', max_length=80,
69
+ truncation=True, padding='max_length').input_ids.cuda()
70
+
71
+ # InternVL-C
72
+ logits_per_image, logits_per_text = model(
73
+ image=pixel_values, text=input_ids, mode='InternVL-C')
74
+ probs = logits_per_image.softmax(dim=-1)
75
+ # tensor([[9.9609e-01, 5.2185e-03, 6.0070e-08],
76
+ # [2.2949e-02, 9.7656e-01, 5.9903e-06],
77
+ # [3.2932e-06, 7.4863e-05, 1.0000e+00]], device='cuda:0',
78
+ # dtype=torch.bfloat16, grad_fn=<SoftmaxBackward0>)
79
+
80
+ # InternVL-G
81
+ logits_per_image, logits_per_text = model(
82
+ image=pixel_values, text=input_ids, mode='InternVL-G')
83
+ probs = logits_per_image.softmax(dim=-1)
84
+ # tensor([[9.9609e-01, 3.1738e-03, 3.6322e-08],
85
+ # [8.6060e-03, 9.9219e-01, 2.8759e-06],
86
+ # [1.7583e-06, 3.1233e-05, 1.0000e+00]], device='cuda:0',
87
+ # dtype=torch.bfloat16, grad_fn=<SoftmaxBackward0>)
88
+
89
+ # please set add_eos_token to False for generation
90
+ tokenizer.add_eos_token = False
91
+ image = Image.open('./examples/image1.jpg').convert('RGB')
92
+ pixel_values = image_processor(images=image, return_tensors='pt').pixel_values
93
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
94
+
95
+ tokenized = tokenizer("English caption:", return_tensors='pt')
96
+ pred = model.generate(
97
+ pixel_values=pixel_values,
98
+ input_ids=tokenized.input_ids.cuda(),
99
+ attention_mask=tokenized.attention_mask.cuda(),
100
+ num_beams=5,
101
+ min_new_tokens=8,
102
+ )
103
+ caption = tokenizer.decode(pred[0].cpu(), skip_special_tokens=True).strip()
104
+ # English caption: a red panda sitting on top of a wooden platform
105
+ ```
106
+
107
+ ## Citation
108
+
109
+ If you find this project useful in your research, please consider cite:
110
+
111
+ ```BibTeX
112
+ @article{chen2023internvl,
113
+ title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
114
+ author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
115
+ journal={arXiv preprint arXiv:2312.14238},
116
+ year={2023}
117
+ }
118
+ ```
119
+
120
+
121
+ ## Acknowledgement
122
+
123
+ InternVL is built with reference to the code of the following projects: [OpenAI CLIP](https://github.com/openai/CLIP), [Open CLIP](https://github.com/mlfoundations/open_clip), [CLIP Benchmark](https://github.com/LAION-AI/CLIP_benchmark), [EVA](https://github.com/baaivision/EVA/tree/master), [InternImage](https://github.com/OpenGVLab/InternImage), [ViT-Adapter](https://github.com/czczup/ViT-Adapter), [MMSegmentation](https://github.com/open-mmlab/mmsegmentation), [Transformers](https://github.com/huggingface/transformers), [DINOv2](https://github.com/facebookresearch/dinov2), [BLIP-2](https://github.com/salesforce/LAVIS/tree/main/projects/blip2), [Qwen-VL](https://github.com/QwenLM/Qwen-VL/tree/master/eval_mm), and [LLaVA-1.5](https://github.com/haotian-liu/LLaVA). Thanks for their awesome work!
checkpoints/InternVL-14B-224px/__init__.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torchvision.transforms as T
10
+ from torchvision.transforms import InterpolationMode
11
+ from transformers import LlamaTokenizer
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+ from .configuration_internvl import InternVLConfig
15
+ from .modeling_intern_vit import InternVisionModel
16
+ from .modeling_internvl import InternVL_C, InternVL_G, InternVLModel
17
+
18
+ __all__ = ['InternVisionConfig', 'InternVisionModel', 'InternVLConfig',
19
+ 'InternVLModel', 'InternVL_C', 'InternVL_G']
20
+
21
+
22
+ # Prefix the text "summarize:"
23
+ class InternVLTokenizer(nn.Module):
24
+ def __init__(self, model_path):
25
+ super(InternVLTokenizer, self).__init__()
26
+ self.tokenizer = LlamaTokenizer.from_pretrained(model_path)
27
+ self.tokenizer.pad_token = ' ' # allow padding
28
+ self.tokenizer.add_eos_token = True
29
+
30
+ def forward(self, text, prefix='summarize:'):
31
+ if type(text) == str:
32
+ text = prefix + text
33
+ elif type(text) == list:
34
+ text = [prefix + item for item in text]
35
+ text = self.tokenizer(text, return_tensors='pt', max_length=80, truncation=True, padding='max_length').input_ids
36
+ return text
37
+
38
+
39
+ def build_transform(task, image_size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
40
+ if task == 'retrieval':
41
+ transform = T.Compose([
42
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
43
+ T.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
44
+ T.ToTensor(),
45
+ T.Normalize(mean=mean, std=std)])
46
+ else:
47
+ transform = T.Compose([
48
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
49
+ T.Resize(image_size, interpolation=InterpolationMode.BICUBIC),
50
+ T.CenterCrop(image_size),
51
+ T.ToTensor(),
52
+ T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
53
+ return transform
54
+
55
+
56
+ def load_internvl_c_huggingface(ckpt_path, device, task):
57
+ model = InternVL_C.from_pretrained(ckpt_path, torch_dtype=torch.float16).to(device)
58
+ if model.config.use_backbone_lora:
59
+ model.vision_model.merge_and_unload()
60
+ model.vision_model = model.vision_model.model
61
+ if model.config.use_qllama_lora:
62
+ model.qllama.merge_and_unload()
63
+ model.qllama = model.qllama.model
64
+ if model.config.force_image_size is not None:
65
+ image_size = model.config.force_image_size
66
+ else:
67
+ image_size = model.config.vision_config.image_size
68
+ transform = build_transform(task, image_size)
69
+ tokenizer = InternVLTokenizer(ckpt_path)
70
+ return model, transform, tokenizer
71
+
72
+
73
+ def load_internvl_g_huggingface(ckpt_path, device, task):
74
+ model = InternVL_G.from_pretrained(ckpt_path, torch_dtype=torch.float16).to(device)
75
+ if model.config.use_backbone_lora:
76
+ model.vision_model.merge_and_unload()
77
+ model.vision_model = model.vision_model.model
78
+ if model.config.use_qllama_lora:
79
+ model.qllama.merge_and_unload()
80
+ model.qllama = model.qllama.model
81
+ if model.config.force_image_size is not None:
82
+ image_size = model.config.force_image_size
83
+ else:
84
+ image_size = model.config.vision_config.image_size
85
+ transform = build_transform(task, image_size)
86
+ tokenizer = InternVLTokenizer(ckpt_path)
87
+ return model, transform, tokenizer
checkpoints/InternVL-14B-224px/config.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "./",
4
+ "architectures": [
5
+ "InternVLModel"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internvl.InternVLConfig",
9
+ "AutoModel": "modeling_internvl.InternVLModel"
10
+ },
11
+ "attn_pool_num_heads": 16,
12
+ "clip_embed_dim": 768,
13
+ "force_image_size": null,
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "label_smoothing": 0.0,
17
+ "max_txt_len": 32,
18
+ "model_type": "internvl",
19
+ "num_query_token": 96,
20
+ "qllama_config": {
21
+ "_name_or_path": "",
22
+ "add_cross_attention": false,
23
+ "architectures": [
24
+ "LlamaForCausalLM"
25
+ ],
26
+ "bad_words_ids": null,
27
+ "begin_suppress_tokens": null,
28
+ "bos_token_id": 1,
29
+ "chunk_size_feed_forward": 0,
30
+ "cross_attention_frequency": 2,
31
+ "cross_attention_hidden_size": null,
32
+ "decoder_start_token_id": null,
33
+ "diversity_penalty": 0.0,
34
+ "do_sample": false,
35
+ "early_stopping": false,
36
+ "encoder_no_repeat_ngram_size": 0,
37
+ "eos_token_id": 2,
38
+ "exponential_decay_length_penalty": null,
39
+ "finetuning_task": null,
40
+ "forced_bos_token_id": null,
41
+ "forced_eos_token_id": null,
42
+ "hidden_act": "silu",
43
+ "hidden_size": 4096,
44
+ "id2label": {
45
+ "0": "LABEL_0",
46
+ "1": "LABEL_1"
47
+ },
48
+ "initializer_range": 0.02,
49
+ "intermediate_size": 11008,
50
+ "is_decoder": false,
51
+ "is_encoder_decoder": false,
52
+ "label2id": {
53
+ "LABEL_0": 0,
54
+ "LABEL_1": 1
55
+ },
56
+ "length_penalty": 1.0,
57
+ "max_length": 20,
58
+ "max_position_embeddings": 2048,
59
+ "max_sequence_length": 2048,
60
+ "min_length": 0,
61
+ "model_type": "llama",
62
+ "no_repeat_ngram_size": 0,
63
+ "num_attention_heads": 32,
64
+ "num_beam_groups": 1,
65
+ "num_beams": 1,
66
+ "num_hidden_layers": 32,
67
+ "num_key_value_heads": 32,
68
+ "num_query_token": 96,
69
+ "num_return_sequences": 1,
70
+ "output_attentions": false,
71
+ "output_hidden_states": false,
72
+ "output_scores": false,
73
+ "pad_token_id": 0,
74
+ "prefix": null,
75
+ "pretraining_tp": 1,
76
+ "problem_type": null,
77
+ "pruned_heads": {},
78
+ "remove_invalid_values": false,
79
+ "repetition_penalty": 1.0,
80
+ "return_dict": true,
81
+ "return_dict_in_generate": false,
82
+ "rms_norm_eps": 1e-06,
83
+ "rope_scaling": null,
84
+ "sep_token_id": null,
85
+ "suppress_tokens": null,
86
+ "task_specific_params": null,
87
+ "temperature": 1.0,
88
+ "tf_legacy_loss": false,
89
+ "tie_encoder_decoder": false,
90
+ "tie_word_embeddings": false,
91
+ "tokenizer_class": null,
92
+ "top_k": 50,
93
+ "top_p": 1.0,
94
+ "torch_dtype": "float16",
95
+ "torchscript": false,
96
+ "transformers_version": "4.32.0",
97
+ "typical_p": 1.0,
98
+ "use_bfloat16": false,
99
+ "use_cache": false,
100
+ "vocab_size": 49954
101
+ },
102
+ "tie_word_embeddings": false,
103
+ "torch_dtype": "bfloat16",
104
+ "transformers_version": null,
105
+ "use_backbone_lora": 0,
106
+ "use_cache": false,
107
+ "use_decoder_only_language_model": true,
108
+ "use_qllama_lora": 0,
109
+ "vision_config": {
110
+ "_name_or_path": "",
111
+ "add_cross_attention": false,
112
+ "architectures": null,
113
+ "attention_dropout": 0.0,
114
+ "bad_words_ids": null,
115
+ "begin_suppress_tokens": null,
116
+ "bos_token_id": null,
117
+ "chunk_size_feed_forward": 0,
118
+ "cross_attention_hidden_size": null,
119
+ "decoder_start_token_id": null,
120
+ "diversity_penalty": 0.0,
121
+ "do_sample": false,
122
+ "drop_path_rate": 0.0,
123
+ "dropout": 0.0,
124
+ "early_stopping": false,
125
+ "encoder_no_repeat_ngram_size": 0,
126
+ "eos_token_id": null,
127
+ "exponential_decay_length_penalty": null,
128
+ "finetuning_task": null,
129
+ "forced_bos_token_id": null,
130
+ "forced_eos_token_id": null,
131
+ "hidden_act": "gelu",
132
+ "hidden_size": 3200,
133
+ "id2label": {
134
+ "0": "LABEL_0",
135
+ "1": "LABEL_1"
136
+ },
137
+ "image_size": 224,
138
+ "initializer_factor": 0.1,
139
+ "initializer_range": 1e-10,
140
+ "intermediate_size": 12800,
141
+ "is_decoder": false,
142
+ "is_encoder_decoder": false,
143
+ "label2id": {
144
+ "LABEL_0": 0,
145
+ "LABEL_1": 1
146
+ },
147
+ "layer_norm_eps": 1e-06,
148
+ "length_penalty": 1.0,
149
+ "max_length": 20,
150
+ "min_length": 0,
151
+ "model_type": "intern_vit_6b",
152
+ "no_repeat_ngram_size": 0,
153
+ "num_attention_heads": 25,
154
+ "num_beam_groups": 1,
155
+ "num_beams": 1,
156
+ "num_channels": 3,
157
+ "num_hidden_layers": 48,
158
+ "num_return_sequences": 1,
159
+ "output_attentions": false,
160
+ "output_hidden_states": false,
161
+ "output_scores": false,
162
+ "pad_token_id": null,
163
+ "patch_size": 14,
164
+ "prefix": null,
165
+ "problem_type": null,
166
+ "pruned_heads": {},
167
+ "qk_normalization": true,
168
+ "qkv_bias": false,
169
+ "remove_invalid_values": false,
170
+ "repetition_penalty": 1.0,
171
+ "return_dict": true,
172
+ "return_dict_in_generate": false,
173
+ "sep_token_id": null,
174
+ "suppress_tokens": null,
175
+ "task_specific_params": null,
176
+ "temperature": 1.0,
177
+ "tf_legacy_loss": false,
178
+ "tie_encoder_decoder": false,
179
+ "tie_word_embeddings": true,
180
+ "tokenizer_class": null,
181
+ "top_k": 50,
182
+ "top_p": 1.0,
183
+ "torch_dtype": null,
184
+ "torchscript": false,
185
+ "transformers_version": "4.32.0",
186
+ "typical_p": 1.0,
187
+ "use_bfloat16": false,
188
+ "use_flash_attn": true
189
+ }
190
+ }
checkpoints/InternVL-14B-224px/configuration_intern_vit.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import os
7
+ from typing import Union
8
+
9
+ from transformers.configuration_utils import PretrainedConfig
10
+ from transformers.utils import logging
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ class InternVisionConfig(PretrainedConfig):
16
+ r"""
17
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
18
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Args:
24
+ num_channels (`int`, *optional*, defaults to 3):
25
+ Number of color channels in the input images (e.g., 3 for RGB).
26
+ patch_size (`int`, *optional*, defaults to 14):
27
+ The size (resolution) of each patch.
28
+ image_size (`int`, *optional*, defaults to 224):
29
+ The size (resolution) of each image.
30
+ qkv_bias (`bool`, *optional*, defaults to `False`):
31
+ Whether to add a bias to the queries and values in the self-attention layers.
32
+ hidden_size (`int`, *optional*, defaults to 3200):
33
+ Dimensionality of the encoder layers and the pooler layer.
34
+ num_attention_heads (`int`, *optional*, defaults to 25):
35
+ Number of attention heads for each attention layer in the Transformer encoder.
36
+ intermediate_size (`int`, *optional*, defaults to 12800):
37
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
38
+ qk_normalization (`bool`, *optional*, defaults to `True`):
39
+ Whether to normalize the queries and keys in the self-attention layers.
40
+ num_hidden_layers (`int`, *optional*, defaults to 48):
41
+ Number of hidden layers in the Transformer encoder.
42
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
43
+ Whether to use flash attention mechanism.
44
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
45
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
46
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
47
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
48
+ The epsilon used by the layer normalization layers.
49
+ dropout (`float`, *optional*, defaults to 0.0):
50
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
51
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
52
+ Dropout rate for stochastic depth.
53
+ attention_dropout (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ initializer_factor (`float`, *optional*, defaults to 0.1):
58
+ A factor for layer scale.
59
+ """
60
+
61
+ model_type = 'intern_vit_6b'
62
+
63
+ def __init__(
64
+ self,
65
+ num_channels=3,
66
+ patch_size=14,
67
+ image_size=224,
68
+ qkv_bias=False,
69
+ hidden_size=3200,
70
+ num_attention_heads=25,
71
+ intermediate_size=12800,
72
+ qk_normalization=True,
73
+ num_hidden_layers=48,
74
+ use_flash_attn=True,
75
+ hidden_act='gelu',
76
+ layer_norm_eps=1e-6,
77
+ dropout=0.0,
78
+ drop_path_rate=0.0,
79
+ attention_dropout=0.0,
80
+ initializer_range=0.02,
81
+ initializer_factor=0.1,
82
+ **kwargs,
83
+ ):
84
+ super().__init__(**kwargs)
85
+
86
+ self.hidden_size = hidden_size
87
+ self.intermediate_size = intermediate_size
88
+ self.dropout = dropout
89
+ self.drop_path_rate = drop_path_rate
90
+ self.num_hidden_layers = num_hidden_layers
91
+ self.num_attention_heads = num_attention_heads
92
+ self.num_channels = num_channels
93
+ self.patch_size = patch_size
94
+ self.image_size = image_size
95
+ self.initializer_range = initializer_range
96
+ self.initializer_factor = initializer_factor
97
+ self.attention_dropout = attention_dropout
98
+ self.layer_norm_eps = layer_norm_eps
99
+ self.hidden_act = hidden_act
100
+ self.qkv_bias = qkv_bias
101
+ self.qk_normalization = qk_normalization
102
+ self.use_flash_attn = use_flash_attn
103
+
104
+ @classmethod
105
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
106
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
107
+
108
+ if 'vision_config' in config_dict:
109
+ config_dict = config_dict['vision_config']
110
+
111
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
112
+ logger.warning(
113
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
114
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
115
+ )
116
+
117
+ return cls.from_dict(config_dict, **kwargs)
checkpoints/InternVL-14B-224px/configuration_internvl.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import copy
7
+
8
+ from transformers import LlamaConfig
9
+ from transformers.configuration_utils import PretrainedConfig
10
+ from transformers.utils import logging
11
+
12
+ from .configuration_intern_vit import InternVisionConfig
13
+
14
+ logger = logging.get_logger(__name__)
15
+
16
+
17
+ class InternVLConfig(PretrainedConfig):
18
+ r"""
19
+ [`InternVLConfig`] is the configuration class to store the configuration of a
20
+ [`InternVLModel`]. It is used to instantiate a InternVLModel according to the specified
21
+ arguments, defining the InternViT-6B and QLLaMA configs. Instantiating a configuration with
22
+ the defaults will yield a similar configuration to that of the InternVL architecture.
23
+
24
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
25
+ documentation from [`PretrainedConfig`] for more information.
26
+
27
+ Args:
28
+ vision_config (`dict`, *optional*):
29
+ Dictionary of configuration options used to initialize [`InternVisionConfig`].
30
+ qllama_config (`dict`, *optional*):
31
+ Dictionary of configuration options used to initialize [`LLaMAConfig`].
32
+ clip_embed_dim (`int`, *optional*, defaults to 768):
33
+ Size of the embeddings from the CLIP model.
34
+ attn_pool_num_heads (`int`, *optional*, defaults to 16):
35
+ Number of attention heads used in the attention pooling layers.
36
+ num_query_token (`int`, *optional*, defaults to 96):
37
+ Number of query tokens used in the transformer.
38
+ label_smoothing (`float`, *optional*, defaults to 0.0):
39
+ The amount of label smoothing to apply.
40
+ cross_attention_frequency (`int`, *optional*, defaults to 2):
41
+ The frequency of cross-attention layers in the model.
42
+ use_backbone_lora (`int`, *optional*, defaults to 0):
43
+ If non-zero, indicates the use of LoRA in the backbone of the model.
44
+ use_qllama_lora (`int`, *optional*, defaults to 0):
45
+ If non-zero, indicates the use of LoRA in the QLLaMA of the model.
46
+ force_image_size (`int` or `None`, *optional*):
47
+ If not None, forces the model to use this specific image size.
48
+ initializer_range (`float`, *optional*, defaults to 0.02):
49
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
50
+ kwargs (*optional*):
51
+ Dictionary of additional keyword arguments.
52
+ """
53
+
54
+ model_type = 'internvl'
55
+ is_composition = True
56
+
57
+ def __init__(
58
+ self,
59
+ vision_config=None,
60
+ qllama_config=None,
61
+ clip_embed_dim=768,
62
+ attn_pool_num_heads=16,
63
+ num_query_token=96,
64
+ label_smoothing=0.0,
65
+ cross_attention_frequency=2,
66
+ use_backbone_lora=0,
67
+ use_qllama_lora=0,
68
+ force_image_size=None,
69
+ initializer_range=0.02,
70
+ **kwargs):
71
+ super().__init__(**kwargs)
72
+
73
+ if vision_config is None:
74
+ vision_config = {}
75
+ logger.info('vision_config is None. initializing the InternVisionConfig with default values.')
76
+
77
+ if qllama_config is None:
78
+ qllama_config = {}
79
+ logger.info(
80
+ 'qllama_config is None. Initializing the InternTextConfig config with default values (`LlamaConfig`).')
81
+
82
+ self.vision_config = InternVisionConfig(**vision_config)
83
+ self.qllama_config = LlamaConfig(**qllama_config)
84
+ self.qllama_config.num_query_token = num_query_token
85
+ self.qllama_config.cross_attention_frequency = cross_attention_frequency
86
+ self.hidden_size = self.qllama_config.hidden_size
87
+
88
+ self.clip_embed_dim = clip_embed_dim
89
+ self.attn_pool_num_heads = attn_pool_num_heads
90
+ self.num_query_token = num_query_token
91
+ self.label_smoothing = label_smoothing
92
+ self.use_backbone_lora = use_backbone_lora
93
+ self.use_qllama_lora = use_qllama_lora
94
+ self.force_image_size = force_image_size
95
+ self.initializer_range = initializer_range
96
+
97
+ def to_dict(self):
98
+ """
99
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
100
+
101
+ Returns:
102
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
103
+ """
104
+ output = copy.deepcopy(self.__dict__)
105
+ output['vision_config'] = self.vision_config.to_dict()
106
+ output['qllama_config'] = self.qllama_config.to_dict()
107
+ output['model_type'] = self.__class__.model_type
108
+ return output
checkpoints/InternVL-14B-224px/flash_attention.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
+ import torch
3
+ import torch.nn as nn
4
+ from einops import rearrange
5
+
6
+ try: # v1
7
+ from flash_attn.flash_attn_interface import \
8
+ flash_attn_unpadded_qkvpacked_func
9
+ except: # v2
10
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
+
12
+ from flash_attn.bert_padding import pad_input, unpad_input
13
+
14
+
15
+ class FlashAttention(nn.Module):
16
+ """Implement the scaled dot product attention with softmax.
17
+ Arguments
18
+ ---------
19
+ softmax_scale: The temperature to use for the softmax attention.
20
+ (default: 1/sqrt(d_keys) where d_keys is computed at
21
+ runtime)
22
+ attention_dropout: The dropout rate to apply to the attention
23
+ (default: 0.0)
24
+ """
25
+
26
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
+ super().__init__()
28
+ self.softmax_scale = softmax_scale
29
+ self.dropout_p = attention_dropout
30
+
31
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
+ max_s=None, need_weights=False):
33
+ """Implements the multihead softmax attention.
34
+ Arguments
35
+ ---------
36
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
+ if unpadded: (nnz, 3, h, d)
38
+ key_padding_mask: a bool tensor of shape (B, S)
39
+ """
40
+ assert not need_weights
41
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
42
+ assert qkv.is_cuda
43
+
44
+ if cu_seqlens is None:
45
+ batch_size = qkv.shape[0]
46
+ seqlen = qkv.shape[1]
47
+ if key_padding_mask is None:
48
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
+ max_s = seqlen
50
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
+ device=qkv.device)
52
+ output = flash_attn_unpadded_qkvpacked_func(
53
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
+ softmax_scale=self.softmax_scale, causal=causal
55
+ )
56
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
+ else:
58
+ nheads = qkv.shape[-2]
59
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
63
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
+ softmax_scale=self.softmax_scale, causal=causal
65
+ )
66
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
+ indices, batch_size, seqlen),
68
+ 'b s (h d) -> b s h d', h=nheads)
69
+ else:
70
+ assert max_s is not None
71
+ output = flash_attn_unpadded_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+
76
+ return output, None
checkpoints/InternVL-14B-224px/modeling_intern_vit.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint
11
+ from einops import rearrange
12
+ from timm.models.layers import DropPath
13
+ from torch import nn
14
+ from transformers.activations import ACT2FN
15
+ from transformers.modeling_outputs import (BaseModelOutput,
16
+ BaseModelOutputWithPooling)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ from .configuration_intern_vit import InternVisionConfig
21
+
22
+ try:
23
+ from .flash_attention import FlashAttention
24
+ has_flash_attn = True
25
+ except:
26
+ print('FlashAttention is not installed.')
27
+ has_flash_attn = False
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ class InternVisionEmbeddings(nn.Module):
62
+ def __init__(self, config: InternVisionConfig):
63
+ super().__init__()
64
+ self.config = config
65
+ self.embed_dim = config.hidden_size
66
+ self.image_size = config.image_size
67
+ self.patch_size = config.patch_size
68
+
69
+ self.class_embedding = nn.Parameter(
70
+ torch.randn(1, 1, self.embed_dim),
71
+ )
72
+
73
+ self.patch_embedding = nn.Conv2d(
74
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
75
+ )
76
+
77
+ self.num_patches = (self.image_size // self.patch_size) ** 2
78
+ self.num_positions = self.num_patches + 1
79
+
80
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
81
+
82
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
83
+ batch_size = pixel_values.shape[0]
84
+ target_dtype = self.patch_embedding.weight.dtype
85
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
86
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
87
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
88
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
89
+ embeddings = embeddings + self.position_embedding.to(target_dtype)
90
+ return embeddings
91
+
92
+
93
+ class InternAttention(nn.Module):
94
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
95
+
96
+ def __init__(self, config: InternVisionConfig):
97
+ super().__init__()
98
+ self.config = config
99
+ self.embed_dim = config.hidden_size
100
+ self.num_heads = config.num_attention_heads
101
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
102
+ if config.use_flash_attn and not has_flash_attn:
103
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
104
+ self.head_dim = self.embed_dim // self.num_heads
105
+ if self.head_dim * self.num_heads != self.embed_dim:
106
+ raise ValueError(
107
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
108
+ f' {self.num_heads}).'
109
+ )
110
+
111
+ self.scale = self.head_dim ** -0.5
112
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
113
+ self.attn_drop = nn.Dropout(config.attention_dropout)
114
+ self.proj_drop = nn.Dropout(config.dropout)
115
+
116
+ self.qk_normalization = config.qk_normalization
117
+
118
+ if self.qk_normalization:
119
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
120
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
121
+
122
+ if self.use_flash_attn:
123
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
124
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
125
+
126
+ def _naive_attn(self, x):
127
+ B, N, C = x.shape
128
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
129
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
130
+
131
+ if self.qk_normalization:
132
+ B_, H_, N_, D_ = q.shape
133
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
134
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
135
+
136
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
137
+ attn = attn.softmax(dim=-1)
138
+ attn = self.attn_drop(attn)
139
+
140
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
141
+ x = self.proj(x)
142
+ x = self.proj_drop(x)
143
+ return x
144
+
145
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
146
+ qkv = self.qkv(x)
147
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
148
+
149
+ if self.qk_normalization:
150
+ q, k, v = qkv.unbind(2)
151
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
152
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
153
+ qkv = torch.stack([q, k, v], dim=2)
154
+
155
+ context, _ = self.inner_attn(
156
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
157
+ )
158
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
159
+ outs = self.proj_drop(outs)
160
+ return outs
161
+
162
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
163
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
164
+ return x
165
+
166
+
167
+ class InternMLP(nn.Module):
168
+ def __init__(self, config: InternVisionConfig):
169
+ super().__init__()
170
+ self.config = config
171
+ self.act = ACT2FN[config.hidden_act]
172
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
173
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
174
+
175
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
176
+ hidden_states = self.fc1(hidden_states)
177
+ hidden_states = self.act(hidden_states)
178
+ hidden_states = self.fc2(hidden_states)
179
+ return hidden_states
180
+
181
+
182
+ class InternVisionEncoderLayer(nn.Module):
183
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
184
+ super().__init__()
185
+ self.embed_dim = config.hidden_size
186
+ self.intermediate_size = config.intermediate_size
187
+
188
+ self.attn = InternAttention(config)
189
+ self.mlp = InternMLP(config)
190
+ self.norm1 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
191
+ self.norm2 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
192
+
193
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
194
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
195
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
196
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
197
+
198
+ def forward(
199
+ self,
200
+ hidden_states: torch.Tensor,
201
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
202
+ """
203
+ Args:
204
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
205
+ """
206
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
207
+
208
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
209
+
210
+ return hidden_states
211
+
212
+
213
+ class InternVisionEncoder(nn.Module):
214
+ """
215
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
216
+ [`InternEncoderLayer`].
217
+
218
+ Args:
219
+ config (`InternConfig`):
220
+ The corresponding vision configuration for the `InternEncoder`.
221
+ """
222
+
223
+ def __init__(self, config: InternVisionConfig):
224
+ super().__init__()
225
+ self.config = config
226
+ # stochastic depth decay rule
227
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
228
+ self.layers = nn.ModuleList([
229
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
230
+ self.gradient_checkpointing = True
231
+
232
+ def forward(
233
+ self,
234
+ inputs_embeds,
235
+ output_hidden_states: Optional[bool] = None,
236
+ return_dict: Optional[bool] = None,
237
+ ) -> Union[Tuple, BaseModelOutput]:
238
+ r"""
239
+ Args:
240
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
241
+ Embedded representation of the inputs. Should be float, not int tokens.
242
+ output_hidden_states (`bool`, *optional*):
243
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
244
+ for more detail.
245
+ return_dict (`bool`, *optional*):
246
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
247
+ """
248
+ output_hidden_states = (
249
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
250
+ )
251
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
252
+
253
+ encoder_states = () if output_hidden_states else None
254
+ hidden_states = inputs_embeds
255
+
256
+ for idx, encoder_layer in enumerate(self.layers):
257
+ if output_hidden_states:
258
+ encoder_states = encoder_states + (hidden_states,)
259
+ if self.gradient_checkpointing and self.training:
260
+ layer_outputs = torch.utils.checkpoint.checkpoint(
261
+ encoder_layer,
262
+ hidden_states)
263
+ else:
264
+ layer_outputs = encoder_layer(
265
+ hidden_states,
266
+ )
267
+ hidden_states = layer_outputs
268
+
269
+ if output_hidden_states:
270
+ encoder_states = encoder_states + (hidden_states,)
271
+
272
+ if not return_dict:
273
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
274
+ return BaseModelOutput(
275
+ last_hidden_state=hidden_states, hidden_states=encoder_states
276
+ )
277
+
278
+
279
+ class InternVisionModel(PreTrainedModel):
280
+ main_input_name = 'pixel_values'
281
+ config_class = InternVisionConfig
282
+
283
+ def __init__(self, config: InternVisionConfig):
284
+ super().__init__(config)
285
+ self.config = config
286
+
287
+ self.embeddings = InternVisionEmbeddings(config)
288
+ self.encoder = InternVisionEncoder(config)
289
+
290
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
291
+ pos_emb = self.embeddings.position_embedding
292
+ _, num_positions, embed_dim = pos_emb.shape
293
+ cls_emb = pos_emb[:, :1, :]
294
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
295
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
296
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
297
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
298
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
299
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
300
+
301
+ def get_input_embeddings(self):
302
+ return self.embeddings
303
+
304
+ def forward(
305
+ self,
306
+ pixel_values: Optional[torch.FloatTensor] = None,
307
+ output_hidden_states: Optional[bool] = None,
308
+ return_dict: Optional[bool] = None,
309
+ pixel_embeds: Optional[torch.FloatTensor] = None,
310
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
311
+ output_hidden_states = (
312
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
313
+ )
314
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
315
+
316
+ if pixel_values is None and pixel_embeds is None:
317
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
318
+
319
+ if pixel_embeds is not None:
320
+ hidden_states = pixel_embeds
321
+ else:
322
+ if len(pixel_values.shape) == 4:
323
+ hidden_states = self.embeddings(pixel_values)
324
+ else:
325
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
326
+ encoder_outputs = self.encoder(
327
+ inputs_embeds=hidden_states,
328
+ output_hidden_states=output_hidden_states,
329
+ return_dict=return_dict,
330
+ )
331
+ last_hidden_state = encoder_outputs.last_hidden_state
332
+ pooled_output = last_hidden_state[:, 0, :]
333
+
334
+ if not return_dict:
335
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
336
+
337
+ return BaseModelOutputWithPooling(
338
+ last_hidden_state=last_hidden_state,
339
+ pooler_output=pooled_output,
340
+ hidden_states=encoder_outputs.hidden_states,
341
+ attentions=encoder_outputs.attentions,
342
+ )