| { | |
| "_name_or_path": "yusx-swapp/ofm-clip-base-patch32-cifar10", | |
| "architectures": [ | |
| "CLIPForImageClassification" | |
| ], | |
| "elastic_config": { | |
| "text": { | |
| "atten_out_space": [ | |
| "None" | |
| ], | |
| "inter_hidden_space": [ | |
| 2048, | |
| 1536, | |
| 1024 | |
| ], | |
| "residual_hidden_space": [ | |
| "None" | |
| ] | |
| }, | |
| "vision": { | |
| "atten_out_space": [ | |
| "None" | |
| ], | |
| "inter_hidden_space": [ | |
| 3072, | |
| 2048, | |
| 1280 | |
| ], | |
| "residual_hidden_space": [ | |
| "None" | |
| ] | |
| } | |
| }, | |
| "initializer_factor": 1.0, | |
| "logit_scale_init_value": 2.6592, | |
| "model_type": "clip", | |
| "projection_dim": 512, | |
| "text_config": { | |
| "bos_token_id": 0, | |
| "dropout": 0.0, | |
| "eos_token_id": 2, | |
| "model_type": "clip_text_model" | |
| }, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.47.0.dev0", | |
| "vision_config": { | |
| "dropout": 0.0, | |
| "model_type": "clip_vision_model" | |
| } | |
| } | |