| from transformers import PretrainedConfig | |
| class CheXficientConfig(PretrainedConfig): | |
| model_type = "chexficient_clip" | |
| def __init__( | |
| self, | |
| vision_model_name="dinov2_vitb14", | |
| text_model_name="emilyalsentzer/Bio_ClinicalBERT", | |
| projection_dim=512, | |
| image_size=378, | |
| max_bert_length=256, | |
| **kwargs | |
| ): | |
| super().__init__(**kwargs) | |
| self.vision_model_name = vision_model_name | |
| self.text_model_name = text_model_name | |
| self.projection_dim = projection_dim | |
| self.image_size = image_size | |
| self.max_bert_length = max_bert_length |