| { | |
| "_name_or_path": "openai/clip-vit-base-patch16", | |
| "architectures": [ | |
| "CLIPForImageClassification" | |
| ], | |
| "id2label": { | |
| "0": "LABEL_0", | |
| "1": "LABEL_1", | |
| "2": "LABEL_2", | |
| "3": "LABEL_3", | |
| "4": "LABEL_4", | |
| "5": "LABEL_5", | |
| "6": "LABEL_6", | |
| "7": "LABEL_7" | |
| }, | |
| "initializer_factor": 1.0, | |
| "label2id": { | |
| "LABEL_0": 0, | |
| "LABEL_1": 1, | |
| "LABEL_2": 2, | |
| "LABEL_3": 3, | |
| "LABEL_4": 4, | |
| "LABEL_5": 5, | |
| "LABEL_6": 6, | |
| "LABEL_7": 7 | |
| }, | |
| "logit_scale_init_value": 2.6592, | |
| "model_type": "clip", | |
| "problem_type": "single_label_classification", | |
| "projection_dim": 512, | |
| "text_config": { | |
| "bos_token_id": 0, | |
| "dropout": 0.0, | |
| "eos_token_id": 2, | |
| "model_type": "clip_text_model" | |
| }, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.45.2", | |
| "vision_config": { | |
| "dropout": 0.0, | |
| "model_type": "clip_vision_model", | |
| "patch_size": 16 | |
| } | |
| } | |