File size: 252 Bytes
8208718 |
1 2 3 4 5 6 7 8 9 |
{
"model_type": "clip2mt5-crossattention",
"library": "pytorch",
"architectures": ["CLIP2MT5_CrossAttention"],
"pipeline_tag": "image-text-to-text",
"description": "CLIP + mT5 VQA Model using cross-attention.",
"author": "MUERIS"
}
|