| { |
| "model_type": "memory_clip", |
| "auto_map": { |
| "AutoConfig": "memory_clip.MemoryCLIPConfig", |
| "AutoModel": "memory_clip.MemoryCLIPModel" |
| }, |
| "architectures": [ |
| "MemoryCLIPModel" |
| ], |
| "clip_model": "openai/clip-vit-large-patch14", |
| "clip_hidden": 768, |
| "clip_layers": 12, |
| "clip_max_tokens": 77, |
| "freeze_clip": true, |
| "n_memory_tokens": 8, |
| "bank_size": 64, |
| "anchor_dim": 768, |
| "n_bank_heads": 8, |
| "bank_cross_layers": 2, |
| "gate_type": "gru", |
| "extract_layers": [1, 3, 5, 7, 9, 11], |
| "layer_fusion": "learned", |
| "max_content_tokens": 18, |
| "segment_overlap": 4, |
| "max_segments": 32, |
| "cv_target": 0.20, |
| "torch_dtype": "float32", |
| "transformers_version": "4.48.0" |
| } |