| { | |
| "_name_or_path": "microsoft/LLM2CLIP-Openai-B-16", | |
| "architectures": [ | |
| "LLM2CLIPModel" | |
| ], | |
| "auto_map": { | |
| "AutoConfig": "microsoft/LLM2CLIP-Openai-B-16--configuration_clip.CLIPConfig", | |
| "AutoModel": "microsoft/LLM2CLIP-Openai-B-16--modeling_clip.LLM2CLIPModel" | |
| }, | |
| "initializer_factor": 1.0, | |
| "logit_scale_init_value": 2.6592, | |
| "model_type": "clip", | |
| "projection_dim": 1280, | |
| "text_config": { | |
| "bos_token_id": 0, | |
| "dropout": 0.0, | |
| "eos_token_id": 2, | |
| "model_type": "clip_text_model", | |
| "projection_dim": 1280 | |
| }, | |
| "transformers_version": "4.37.2", | |
| "vision_config": { | |
| "dropout": 0.0, | |
| "model_type": "clip_vision_model", | |
| "patch_size": 16 | |
| } | |
| } | |