{ "_name_or_path": "openai/clip-vit-large-patch14", "architectures": ["CLIPTextModel"], "hidden_act": "quick_gelu", "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-05, "model_type": "clip", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 1, "vocab_size": 49408 }