| { |
| "architectures": [ |
| "CLIPModel" |
| ], |
| "initializer_factor": 1.0, |
| "logit_scale_init_value": 2.6592, |
| "model_type": "clip", |
| "projection_dim": 512, |
| "text_config": { |
| "_attn_implementation_autoset": true, |
| "_name_or_path": "SajjadAyoubi/clip-fa-text", |
| "architectures": [ |
| "RobertaModel" |
| ], |
| "attention_probs_dropout_prob": 0.1, |
| "bos_token_id": 0, |
| "classifier_dropout": null, |
| "eos_token_id": 2, |
| "gradient_checkpointing": false, |
| "hidden_act": "gelu", |
| "hidden_dropout_prob": 0.1, |
| "hidden_size": 768, |
| "intermediate_size": 3072, |
| "layer_norm_eps": 1e-12, |
| "max_position_embeddings": 514, |
| "model_type": "clip_text_model", |
| "num_attention_heads": 12, |
| "position_embedding_type": "absolute", |
| "torch_dtype": "float32", |
| "type_vocab_size": 1, |
| "use_cache": true, |
| "vocab_size": 42000 |
| }, |
| "torch_dtype": "float32", |
| "transformers_version": "4.47.0", |
| "vision_config": { |
| "_attn_implementation_autoset": true, |
| "_name_or_path": "SajjadAyoubi/clip-fa-vision", |
| "dropout": 0.0, |
| "gradient_checkpointing": false, |
| "model_type": "clip_vision_model" |
| } |
| } |
|
|