Upload Blip2ForImageTextRetrieval
Browse files- config.json +24 -0
- pytorch_model.bin +3 -0
config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Blip2ForImageTextRetrieval"
|
| 4 |
+
],
|
| 5 |
+
"image_text_hidden_size": 256,
|
| 6 |
+
"initializer_factor": 1.0,
|
| 7 |
+
"initializer_range": 0.02,
|
| 8 |
+
"model_type": "blip-2",
|
| 9 |
+
"num_query_tokens": 32,
|
| 10 |
+
"qformer_config": {
|
| 11 |
+
"model_type": "blip_2_qformer",
|
| 12 |
+
"qformer_text_input": true,
|
| 13 |
+
"vocab_size": 30523
|
| 14 |
+
},
|
| 15 |
+
"text_config": {
|
| 16 |
+
"model_type": "opt"
|
| 17 |
+
},
|
| 18 |
+
"torch_dtype": "float32",
|
| 19 |
+
"transformers_version": "4.33.0.dev0",
|
| 20 |
+
"use_decoder_only_language_model": true,
|
| 21 |
+
"vision_config": {
|
| 22 |
+
"model_type": "blip_2_vision_model"
|
| 23 |
+
}
|
| 24 |
+
}
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec37a1612c80fc4067b3e1f74de7cbc71e84b90736e89516c7b4088d79195dda
|
| 3 |
+
size 4784669821
|