clip
clip_vit_emb_neuronx / config.json
Jingya's picture
Jingya HF Staff
Upload config.json with huggingface_hub
a9ce085 verified
{
"_attn_implementation_autoset": true,
"_name_or_path": "/tmp/tmpfbpcelx0",
"architectures": [
"CLIPModel"
],
"export_model_type": "clip",
"initializer_factor": 1.0,
"logit_scale_init_value": 2.6592,
"model_type": "clip",
"neuron": {
"auto_cast": null,
"auto_cast_type": null,
"compiler_type": "neuronx-cc",
"compiler_version": "2.16.372.0+4a9b2326",
"disable_fallback": false,
"disable_fast_relayout": false,
"dynamic_batch_size": false,
"inline_weights_to_neff": true,
"input_names": [
"input_ids",
"pixel_values",
"attention_mask"
],
"model_type": "clip",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"text_embeds",
"image_embeds"
],
"static_height": 224,
"static_image_batch_size": 1,
"static_num_channels": 3,
"static_sequence_length": 64,
"static_text_batch_size": 3,
"static_width": 224,
"tensor_parallel_size": 1
},
"projection_dim": 512,
"task": "feature-extraction",
"text_config": {
"bos_token_id": 0,
"dropout": 0.0,
"eos_token_id": 2,
"gradient_checkpointing": false,
"model_type": "clip_text_model",
"torch_dtype": "float32"
},
"torch_dtype": "float32",
"torchscript": true,
"transformers_version": "4.49.0",
"vision_config": {
"dropout": 0.0,
"gradient_checkpointing": false,
"model_type": "clip_vision_model",
"torch_dtype": "float32"
}
}