File size: 5,569 Bytes
1bac84d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
# --------------------------------------------------------
# InternVL
# Copyright (c) 2024 OpenGVLab
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import copy
import copy
import os
import sys
from transformers import AutoConfig, LlamaConfig, Qwen2Config
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
# Handle both relative and absolute imports for dynamic module loading
import importlib
import importlib.util
try:
from .configuration_intern_vit import InternVisionConfig
except (ImportError, ValueError) as e:
# When loaded as a dynamic module by transformers, try to find already-loaded config first
InternVisionConfig = None
# Check if config module is already loaded in sys.modules
for module_name in list(sys.modules.keys()):
if 'configuration_intern_vit' in module_name:
try:
InternVisionConfig = sys.modules[module_name].InternVisionConfig
break
except AttributeError:
pass
# If not found, load from file
if InternVisionConfig is None:
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(current_dir, "configuration_intern_vit.py")
spec = importlib.util.spec_from_file_location("configuration_intern_vit", config_file)
config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config_module)
InternVisionConfig = config_module.InternVisionConfig
logger = logging.get_logger(__name__)
class InternVLChatConfig(PretrainedConfig):
model_type = 'internvl_chat'
is_composition = True
def __init__(
self,
vision_config=None,
llm_config=None,
use_backbone_lora=0,
use_llm_lora=0,
select_layer=-1,
force_image_size=None,
downsample_ratio=0.5,
template=None,
dynamic_image_size=False,
use_thumbnail=False,
ps_version='v1',
min_dynamic_patch=1,
max_dynamic_patch=6,
onnx_path=None,
vision_onnx_file=None,
**kwargs):
super().__init__(**kwargs)
if vision_config is None:
vision_config = {'architectures': ['InternVisionModel']}
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
if llm_config is None:
llm_config = {'architectures': ['Qwen2ForCausalLM']}
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
self.vision_config = InternVisionConfig(**vision_config)
if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
self.llm_config = LlamaConfig(**llm_config)
elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
self.llm_config = Qwen2Config(**llm_config)
else:
raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
self.use_backbone_lora = use_backbone_lora
self.use_llm_lora = use_llm_lora
self.select_layer = select_layer
self.force_image_size = force_image_size
self.downsample_ratio = downsample_ratio
self.template = template
self.dynamic_image_size = dynamic_image_size
self.use_thumbnail = use_thumbnail
self.ps_version = ps_version # pixel shuffle version
self.min_dynamic_patch = min_dynamic_patch
self.max_dynamic_patch = max_dynamic_patch
self.onnx_path = onnx_path
self.vision_onnx_file = vision_onnx_file
# By default, we use tie_word_embeddings=False for models of all sizes.
self.tie_word_embeddings = self.llm_config.tie_word_embeddings
logger.info(f'vision_select_layer: {self.select_layer}')
logger.info(f'ps_version: {self.ps_version}')
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output['vision_config'] = self.vision_config.to_dict()
output['llm_config'] = self.llm_config.to_dict()
output['model_type'] = self.__class__.model_type
output['use_backbone_lora'] = self.use_backbone_lora
output['use_llm_lora'] = self.use_llm_lora
output['select_layer'] = self.select_layer
output['force_image_size'] = self.force_image_size
output['downsample_ratio'] = self.downsample_ratio
output['template'] = self.template
output['dynamic_image_size'] = self.dynamic_image_size
output['use_thumbnail'] = self.use_thumbnail
output['ps_version'] = self.ps_version
output['min_dynamic_patch'] = self.min_dynamic_patch
output['max_dynamic_patch'] = self.max_dynamic_patch
output['onnx_path'] = self.onnx_path
output['vision_onnx_file'] = self.vision_onnx_file
return output
|