| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| from typing import Union |
|
|
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.models.auto import CONFIG_MAPPING |
| from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES |
| from transformers.utils import logging |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class CheXagentVisionConfig(PretrainedConfig): |
| model_type = "chexagent_vision_model" |
|
|
| def __init__( |
| self, |
| hidden_size=1408, |
| intermediate_size=6144, |
| num_hidden_layers=39, |
| num_attention_heads=16, |
| image_size=224, |
| patch_size=14, |
| hidden_act="gelu", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| initializer_range=1e-10, |
| qkv_bias=True, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.patch_size = patch_size |
| self.image_size = image_size |
| self.initializer_range = initializer_range |
| self.attention_dropout = attention_dropout |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
| self.qkv_bias = qkv_bias |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| cls._set_token_in_kwargs(kwargs) |
|
|
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| if config_dict.get("model_type") == "chexagent": |
| config_dict = config_dict["vision_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
| class CheXagentQFormerConfig(PretrainedConfig): |
| model_type = "chexagent_qformer" |
|
|
| def __init__( |
| self, |
| vocab_size=30522, |
| hidden_size=768, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| intermediate_size=3072, |
| hidden_act="gelu", |
| hidden_dropout_prob=0.1, |
| attention_probs_dropout_prob=0.1, |
| max_position_embeddings=512, |
| initializer_range=0.02, |
| layer_norm_eps=1e-12, |
| pad_token_id=0, |
| position_embedding_type="absolute", |
| cross_attention_frequency=2, |
| encoder_hidden_size=1408, |
| **kwargs, |
| ): |
| super().__init__(pad_token_id=pad_token_id, **kwargs) |
|
|
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.hidden_act = hidden_act |
| self.intermediate_size = intermediate_size |
| self.hidden_dropout_prob = hidden_dropout_prob |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob |
| self.max_position_embeddings = max_position_embeddings |
| self.initializer_range = initializer_range |
| self.layer_norm_eps = layer_norm_eps |
| self.position_embedding_type = position_embedding_type |
| self.cross_attention_frequency = cross_attention_frequency |
| self.encoder_hidden_size = encoder_hidden_size |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| cls._set_token_in_kwargs(kwargs) |
|
|
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| if config_dict.get("model_type") == "chexagent": |
| config_dict = config_dict["qformer_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
| class CheXagentConfig(PretrainedConfig): |
| model_type = "chexagent" |
|
|
| def __init__( |
| self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=128, |
| num_max_images=2, **kwargs |
| ): |
| super().__init__(**kwargs) |
|
|
| if vision_config is None: |
| vision_config = {} |
|
|
| if qformer_config is None: |
| qformer_config = {} |
|
|
| if text_config is None: |
| text_config = {} |
|
|
| self.vision_config = CheXagentVisionConfig(**vision_config) |
| self.qformer_config = CheXagentQFormerConfig(**qformer_config) |
| text_model_type = text_config["model_type"] if "model_type" in text_config else "opt" |
| self.text_config = CONFIG_MAPPING[text_model_type](**text_config) |
|
|
| self.tie_word_embeddings = self.text_config.tie_word_embeddings |
| self.is_encoder_decoder = self.text_config.is_encoder_decoder |
|
|
| self.num_query_tokens = num_query_tokens |
| self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size |
| self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES |
| self.initializer_factor = 1.0 |
| self.initializer_range = 0.02 |
| self.num_max_images = num_max_images |
|
|
| @classmethod |
| def from_vision_qformer_text_configs( |
| cls, |
| vision_config: CheXagentVisionConfig, |
| qformer_config: CheXagentQFormerConfig, |
| text_config: PretrainedConfig, |
| **kwargs, |
| ): |
| return cls( |
| vision_config=vision_config.to_dict(), |
| qformer_config=qformer_config.to_dict(), |
| text_config=text_config.to_dict(), |
| **kwargs, |
| ) |
|
|