Spaces:
Runtime error
Runtime error
| import json | |
| import logging | |
| import os | |
| import re | |
| import warnings | |
| from copy import deepcopy | |
| from dataclasses import asdict | |
| from pathlib import Path | |
| from typing import Any, Dict, Optional, Tuple, Union | |
| import torch | |
| from .convert import convert_state_dict | |
| from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ | |
| resize_pos_embed, get_cast_dtype, resize_text_pos_embed, set_model_preprocess_cfg | |
| from .coca_model import CoCa | |
| from .loss import ClipLoss, DistillClipLoss, CoCaLoss, SigLipLoss, MultiPosConLossMM | |
| from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained,\ | |
| list_pretrained_tags_by_model, download_pretrained_from_hf | |
| from .transform import image_transform_v2, AugmentationCfg, PreprocessCfg, merge_preprocess_dict, merge_preprocess_kwargs | |
| from .tokenizer import HFTokenizer, SimpleTokenizer, SigLipTokenizer, DEFAULT_CONTEXT_LENGTH | |
| HF_HUB_PREFIX = 'hf-hub:' | |
| _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] | |
| _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs | |
| def _natural_key(string_): | |
| return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] | |
| def _rescan_model_configs(): | |
| global _MODEL_CONFIGS | |
| config_ext = ('.json',) | |
| config_files = [] | |
| for config_path in _MODEL_CONFIG_PATHS: | |
| if config_path.is_file() and config_path.suffix in config_ext: | |
| config_files.append(config_path) | |
| elif config_path.is_dir(): | |
| for ext in config_ext: | |
| config_files.extend(config_path.glob(f'*{ext}')) | |
| for cf in config_files: | |
| with open(cf, 'r') as f: | |
| model_cfg = json.load(f) | |
| if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): | |
| _MODEL_CONFIGS[cf.stem] = model_cfg | |
| _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} | |
| _rescan_model_configs() # initial populate of model config registry | |
| def list_models(): | |
| """ enumerate available model architectures based on config files """ | |
| return list(_MODEL_CONFIGS.keys()) | |
| def add_model_config(path): | |
| """ add model config path or file and update registry """ | |
| if not isinstance(path, Path): | |
| path = Path(path) | |
| _MODEL_CONFIG_PATHS.append(path) | |
| _rescan_model_configs() | |
| def get_model_config(model_name): | |
| """ Fetch model config from builtin (local library) configs. | |
| """ | |
| if model_name in _MODEL_CONFIGS: | |
| return deepcopy(_MODEL_CONFIGS[model_name]) | |
| else: | |
| return None | |
| def _get_hf_config( | |
| model_id: str, | |
| cache_dir: Optional[str] = None, | |
| ): | |
| """ Fetch model config from HuggingFace Hub. | |
| """ | |
| config_path = download_pretrained_from_hf( | |
| model_id, | |
| filename='open_clip_config.json', | |
| cache_dir=cache_dir, | |
| ) | |
| with open(config_path, 'r', encoding='utf-8') as f: | |
| config = json.load(f) | |
| return config | |
| def get_tokenizer( | |
| model_name: str = '', | |
| context_length: Optional[int] = None, | |
| cache_dir: Optional[str] = None, | |
| **kwargs, | |
| ): | |
| if model_name.startswith(HF_HUB_PREFIX): | |
| model_name = model_name[len(HF_HUB_PREFIX):] | |
| try: | |
| config = _get_hf_config(model_name, cache_dir=cache_dir)['model_cfg'] | |
| except Exception: | |
| tokenizer = HFTokenizer( | |
| model_name, | |
| context_length=context_length or DEFAULT_CONTEXT_LENGTH, | |
| cache_dir=cache_dir, | |
| **kwargs, | |
| ) | |
| return tokenizer | |
| else: | |
| config = get_model_config(model_name) | |
| assert config is not None, f"No valid model config found for {model_name}." | |
| text_config = config.get('text_cfg', {}) | |
| if 'tokenizer_kwargs' in text_config: | |
| tokenizer_kwargs = dict(text_config['tokenizer_kwargs'], **kwargs) | |
| else: | |
| tokenizer_kwargs = kwargs | |
| if context_length is None: | |
| context_length = text_config.get('context_length', DEFAULT_CONTEXT_LENGTH) | |
| model_name = model_name.lower() | |
| if text_config.get('hf_tokenizer_name', ''): | |
| tokenizer = HFTokenizer( | |
| text_config['hf_tokenizer_name'], | |
| context_length=context_length, | |
| cache_dir=cache_dir, | |
| **tokenizer_kwargs, | |
| ) | |
| elif 'siglip' in model_name: | |
| tn = 'gemma' if 'siglip2' in model_name else 'mc4' if 'i18n' in model_name else 'c4-en' | |
| tokenizer = SigLipTokenizer( | |
| tn, | |
| context_length=context_length, | |
| # **tokenizer_kwargs, | |
| ) | |
| else: | |
| tokenizer = SimpleTokenizer( | |
| context_length=context_length, | |
| **tokenizer_kwargs, | |
| ) | |
| return tokenizer | |
| def load_state_dict( | |
| checkpoint_path: str, | |
| device='cpu', | |
| weights_only=True, | |
| ): | |
| # Check if safetensors or not and load weights accordingly | |
| if str(checkpoint_path).endswith(".safetensors"): | |
| from safetensors.torch import load_file | |
| checkpoint = load_file(checkpoint_path, device=device) | |
| else: | |
| try: | |
| checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only) | |
| except Exception: | |
| checkpoint = torch.load(checkpoint_path, map_location=device) | |
| if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: | |
| state_dict = checkpoint['state_dict'] | |
| elif isinstance(checkpoint, torch.jit.ScriptModule): | |
| state_dict = checkpoint.state_dict() | |
| for key in ["input_resolution", "context_length", "vocab_size"]: | |
| state_dict.pop(key, None) | |
| else: | |
| state_dict = checkpoint | |
| if next(iter(state_dict.items()))[0].startswith('module'): | |
| state_dict = {k[7:]: v for k, v in state_dict.items()} | |
| return state_dict | |
| def load_checkpoint( | |
| model: Union[CLIP, CustomTextCLIP], | |
| checkpoint_path: str, | |
| strict: bool = True, | |
| weights_only: bool = True, | |
| device='cpu', | |
| ): | |
| if Path(checkpoint_path).suffix in ('.npz', '.npy'): | |
| # Separate path loading numpy big_vision (SigLIP) weights | |
| from open_clip.convert import load_big_vision_weights | |
| load_big_vision_weights(model, checkpoint_path) | |
| return {} | |
| state_dict = load_state_dict(checkpoint_path, device=device, weights_only=weights_only) | |
| # Detect & convert 3rd party state_dicts -> open_clip | |
| state_dict = convert_state_dict(model, state_dict) | |
| # Detect old format and make compatible with new format | |
| if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): | |
| state_dict = convert_to_custom_text_state_dict(state_dict) | |
| # correct if logit_scale differs in being scaler vs 1d param | |
| if 'logit_scale' in state_dict and model.logit_scale.ndim != state_dict['logit_scale'].ndim: | |
| state_dict['logit_scale'] = state_dict['logit_scale'].reshape(model.logit_scale.shape) | |
| # correct if logit_bias differs in being scaler vs 1d param | |
| if 'logit_bias' in state_dict and model.logit_bias.ndim != state_dict['logit_bias'].ndim: | |
| state_dict['logit_bias'] = state_dict['logit_bias'].reshape(model.logit_bias.shape) | |
| # If loading a non-SigLIP model for SigLIP training. See https://github.com/mlfoundations/open_clip/issues/712 | |
| if 'logit_bias' not in state_dict and model.logit_bias is not None: | |
| state_dict["logit_bias"] = torch.zeros_like(state_dict["logit_scale"]) | |
| # Certain text transformers no longer expect position_ids after transformers==4.31 | |
| position_id_key = 'text.transformer.embeddings.position_ids' | |
| if position_id_key in state_dict and not hasattr(model, position_id_key): | |
| del state_dict[position_id_key] | |
| resize_pos_embed(state_dict, model) | |
| resize_text_pos_embed(state_dict, model) | |
| # Finally, load the massaged state_dict into model | |
| incompatible_keys = model.load_state_dict(state_dict, strict=strict) | |
| if incompatible_keys.missing_keys: | |
| print("Missing keys:", incompatible_keys.missing_keys) | |
| if incompatible_keys.unexpected_keys: | |
| print("Unexpected keys:", incompatible_keys.unexpected_keys) | |
| logging.info(f"Missing keys: {incompatible_keys.missing_keys}") | |
| return incompatible_keys | |
| def create_model( | |
| model_name: str, | |
| pretrained: Optional[str] = None, | |
| precision: str = 'fp32', | |
| device: Union[str, torch.device] = 'cpu', | |
| jit: bool = False, | |
| force_quick_gelu: bool = False, | |
| force_custom_text: bool = False, | |
| force_patch_dropout: Optional[float] = None, | |
| force_image_size: Optional[Union[int, Tuple[int, int]]] = None, | |
| force_preprocess_cfg: Optional[Dict[str, Any]] = None, | |
| pretrained_image: bool = False, | |
| pretrained_hf: bool = True, | |
| cache_dir: Optional[str] = None, | |
| output_dict: Optional[bool] = None, | |
| require_pretrained: bool = False, | |
| load_weights_only: bool = True, | |
| long_clip: Optional[str] = 'disable', | |
| **model_kwargs, | |
| ): | |
| """Creates and configures a contrastive vision-language model. | |
| Args: | |
| model_name: Name of the model architecture to create. Can be a local model name | |
| or a Hugging Face model ID prefixed with 'hf-hub:'. | |
| pretrained: Tag/path for pretrained model weights. Can be: | |
| - A pretrained tag name (e.g., 'openai') | |
| - A path to local weights | |
| - None to initialize with random weights | |
| precision: Model precision/AMP configuration. Options: | |
| - 'fp32': 32-bit floating point | |
| - 'fp16'/'bf16': Mixed precision with FP32 for certain layers | |
| - 'pure_fp16'/'pure_bf16': Pure 16-bit precision | |
| device: Device to load the model on ('cpu', 'cuda', or torch.device object) | |
| jit: If True, JIT compile the model | |
| force_quick_gelu: Force use of QuickGELU activation | |
| force_custom_text: Force use of custom text encoder | |
| force_patch_dropout: Override default patch dropout value | |
| force_image_size: Override default image size for vision encoder | |
| force_preprocess_cfg: Override default preprocessing configuration | |
| pretrained_image: Load pretrained weights for timm vision models | |
| pretrained_hf: Load pretrained weights for HF text models when not loading CLIP weights | |
| cache_dir: Override default cache directory for downloaded model files | |
| output_dict: If True and model supports it, return dictionary of features | |
| require_pretrained: Raise error if pretrained weights cannot be loaded | |
| load_weights_only: Only deserialize model weights and unpickling torch checkpoints (for safety) | |
| **model_kwargs: Additional keyword arguments passed to model constructor | |
| Returns: | |
| Created and configured model instance | |
| Raises: | |
| RuntimeError: If model config is not found or required pretrained weights | |
| cannot be loaded | |
| Examples: | |
| # Create basic CLIP model | |
| model = create_model('ViT-B/32') | |
| # Create CLIP model with mixed precision on GPU | |
| model = create_model('ViT-B/32', precision='fp16', device='cuda') | |
| # Load pretrained OpenAI weights | |
| model = create_model('ViT-B/32', pretrained='openai') | |
| # Load Hugging Face model | |
| model = create_model('hf-hub:organization/model-name') | |
| """ | |
| force_preprocess_cfg = force_preprocess_cfg or {} | |
| preprocess_cfg = asdict(PreprocessCfg()) | |
| has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) | |
| if has_hf_hub_prefix: | |
| model_id = model_name[len(HF_HUB_PREFIX):] | |
| checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) | |
| config = _get_hf_config(model_id, cache_dir=cache_dir) | |
| preprocess_cfg = merge_preprocess_dict(preprocess_cfg, config['preprocess_cfg']) | |
| model_cfg = config['model_cfg'] | |
| pretrained_hf = False # override, no need to load original HF text weights | |
| else: | |
| model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names | |
| checkpoint_path = None | |
| model_cfg = None | |
| if isinstance(device, str): | |
| device = torch.device(device) | |
| model_cfg = model_cfg or get_model_config(model_name) | |
| if model_cfg is not None: | |
| logging.info(f'Loaded {model_name} model config.') | |
| else: | |
| logging.error(f'Model config for {model_name} not found; available models {list_models()}.') | |
| raise RuntimeError(f'Model config for {model_name} not found.') | |
| if force_quick_gelu: | |
| # override for use of QuickGELU on non-OpenAI transformer models | |
| model_cfg["quick_gelu"] = True | |
| if force_patch_dropout is not None: | |
| # override the default patch dropout value | |
| model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout | |
| if force_image_size is not None: | |
| # override model config's image size | |
| model_cfg["vision_cfg"]["image_size"] = force_image_size | |
| is_timm_model = 'timm_model_name' in model_cfg.get('vision_cfg', {}) | |
| if pretrained_image: | |
| if is_timm_model: | |
| # pretrained weight loading for timm models set via vision_cfg | |
| model_cfg['vision_cfg']['timm_model_pretrained'] = True | |
| else: | |
| assert False, 'pretrained image towers currently only supported for timm models' | |
| # cast_dtype set for fp16 and bf16 (manual mixed-precision), not set for 'amp' or 'pure' modes | |
| cast_dtype = get_cast_dtype(precision) | |
| is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) | |
| if is_hf_model: | |
| # load pretrained weights for HF text model IFF no CLIP weights being loaded | |
| model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf and not pretrained | |
| custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model | |
| model_cfg.update({"long_clip": long_clip}) | |
| model_cfg = dict(model_cfg, **model_kwargs) # merge cfg dict w/ kwargs (kwargs overrides cfg) | |
| if custom_text: | |
| if "multimodal_cfg" in model_cfg: | |
| model = CoCa(**model_cfg, cast_dtype=cast_dtype) | |
| else: | |
| model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) | |
| else: | |
| model = CLIP(**model_cfg, cast_dtype=cast_dtype) | |
| if precision in ("fp16", "bf16"): | |
| dtype = torch.float16 if 'fp16' in precision else torch.bfloat16 | |
| # manual mixed precision that matches original OpenAI behaviour | |
| if is_timm_model: | |
| # FIXME this is a bit janky, create timm based model in low-precision and | |
| # then cast only LayerNormFp32 instances back to float32 so they don't break. | |
| # Why? The convert_weights_to_lp fn only works with native models. | |
| model.to(device=device, dtype=dtype) | |
| from .transformer import LayerNormFp32 | |
| def _convert_ln(m): | |
| if isinstance(m, LayerNormFp32): | |
| m.weight.data = m.weight.data.to(torch.float32) | |
| m.bias.data = m.bias.data.to(torch.float32) | |
| model.apply(_convert_ln) | |
| else: | |
| model.to(device=device) | |
| convert_weights_to_lp(model, dtype=dtype) | |
| elif precision in ("pure_fp16", "pure_bf16"): | |
| dtype = torch.float16 if 'fp16' in precision else torch.bfloat16 | |
| model.to(device=device, dtype=dtype) | |
| else: | |
| model.to(device=device) | |
| pretrained_loaded = False | |
| if pretrained: | |
| checkpoint_path = '' | |
| pretrained_cfg = get_pretrained_cfg(model_name, pretrained) | |
| if pretrained_cfg: | |
| checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) | |
| preprocess_cfg = merge_preprocess_dict(preprocess_cfg, pretrained_cfg) | |
| pretrained_quick_gelu = pretrained_cfg.get('quick_gelu', False) | |
| model_quick_gelu = model_cfg.get('quick_gelu', False) | |
| if pretrained_quick_gelu and not model_quick_gelu: | |
| warnings.warn( | |
| f'These pretrained weights were trained with QuickGELU activation but the model config does ' | |
| f'not have that enabled. Consider using a model config with a "-quickgelu" suffix or enable with a flag.') | |
| elif not pretrained_quick_gelu and model_quick_gelu: | |
| warnings.warn( | |
| f'The pretrained weights were not trained with QuickGELU but this activation is enabled in the ' | |
| f'model config, consider using a model config without QuickGELU or disable override flags.') | |
| elif os.path.exists(pretrained): | |
| checkpoint_path = pretrained | |
| if checkpoint_path: | |
| logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') | |
| load_checkpoint(model, checkpoint_path, weights_only=load_weights_only, strict=False) | |
| else: | |
| error_str = ( | |
| f'Pretrained weights ({pretrained}) not found for model {model_name}.' | |
| f' Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.') | |
| logging.warning(error_str) | |
| raise RuntimeError(error_str) | |
| pretrained_loaded = True | |
| elif has_hf_hub_prefix: | |
| logging.info(f'Loading pretrained {model_name} weights ({checkpoint_path}).') | |
| load_checkpoint(model, checkpoint_path, weights_only=load_weights_only) | |
| pretrained_loaded = True | |
| if require_pretrained and not pretrained_loaded: | |
| # callers of create_model_from_pretrained always expect pretrained weights | |
| raise RuntimeError( | |
| f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.') | |
| if output_dict and hasattr(model, "output_dict"): | |
| model.output_dict = True | |
| if jit: | |
| model = torch.jit.script(model) | |
| # set image preprocessing configuration in model attributes for convenience | |
| if getattr(model.visual, 'image_size', None) is not None: | |
| # use image_size set on model creation (via config or force_image_size arg) | |
| force_preprocess_cfg['size'] = model.visual.image_size | |
| set_model_preprocess_cfg(model, merge_preprocess_dict(preprocess_cfg, force_preprocess_cfg)) | |
| return model | |
| def create_loss(args): | |
| if args.distill: | |
| return DistillClipLoss( | |
| local_loss=args.local_loss, | |
| gather_with_grad=args.gather_with_grad, | |
| cache_labels=True, | |
| rank=args.rank, | |
| world_size=args.world_size, | |
| use_horovod=args.horovod, | |
| ) | |
| elif "coca" in args.model.lower(): | |
| return CoCaLoss( | |
| caption_loss_weight=args.coca_caption_loss_weight, | |
| clip_loss_weight=args.coca_contrastive_loss_weight, | |
| local_loss=args.local_loss, | |
| gather_with_grad=args.gather_with_grad, | |
| cache_labels=True, | |
| rank=args.rank, | |
| world_size=args.world_size, | |
| use_horovod=args.horovod, | |
| ) | |
| elif args.siglip: | |
| assert not args.horovod, "Horovod not currently supported for SigLip" | |
| return SigLipLoss( | |
| rank=args.rank, | |
| world_size=args.world_size, | |
| dist_impl=args.loss_dist_impl, # siglip has multiple distributed implementations to choose from | |
| ) | |
| # elif args.mpcl_loss: | |
| # return MultiPosConLossMM( | |
| # rank=args.rank, | |
| # world_size=args.world_size, | |
| # temperature=0.07, w1=1.0, w2=1.0 | |
| # ) | |
| return ClipLoss( | |
| local_loss=args.local_loss, | |
| gather_with_grad=args.gather_with_grad, | |
| cache_labels=True, | |
| rank=args.rank, | |
| world_size=args.world_size, | |
| use_horovod=args.horovod, | |
| ) | |
| def create_model_and_transforms( | |
| model_name: str, | |
| pretrained: Optional[str] = None, | |
| precision: str = 'fp32', | |
| device: Union[str, torch.device] = 'cpu', | |
| jit: bool = False, | |
| force_quick_gelu: bool = False, | |
| force_custom_text: bool = False, | |
| force_patch_dropout: Optional[float] = None, | |
| force_image_size: Optional[Union[int, Tuple[int, int]]] = None, | |
| image_mean: Optional[Tuple[float, ...]] = None, | |
| image_std: Optional[Tuple[float, ...]] = None, | |
| image_interpolation: Optional[str] = None, | |
| image_resize_mode: Optional[str] = None, # only effective for inference | |
| aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None, | |
| pretrained_image: bool = False, | |
| pretrained_hf: bool = True, | |
| cache_dir: Optional[str] = None, | |
| output_dict: Optional[bool] = None, | |
| load_weights_only: bool = True, | |
| long_clip: Optional[str] = 'disable', | |
| use_imagecrop_aug: Optional[bool] = False, | |
| max_boxes: Optional[int] = 10, | |
| local_method: str = 'grids', | |
| **model_kwargs, | |
| ): | |
| force_preprocess_cfg = merge_preprocess_kwargs( | |
| {}, | |
| mean=image_mean, | |
| std=image_std, | |
| interpolation=image_interpolation, | |
| resize_mode=image_resize_mode, | |
| ) | |
| model = create_model( | |
| model_name, | |
| pretrained, | |
| precision=precision, | |
| device=device, | |
| jit=jit, | |
| force_quick_gelu=force_quick_gelu, | |
| force_custom_text=force_custom_text, | |
| force_patch_dropout=force_patch_dropout, | |
| force_image_size=force_image_size, | |
| force_preprocess_cfg=force_preprocess_cfg, | |
| pretrained_image=pretrained_image, | |
| pretrained_hf=pretrained_hf, | |
| cache_dir=cache_dir, | |
| output_dict=output_dict, | |
| load_weights_only=load_weights_only, | |
| long_clip=long_clip, | |
| **model_kwargs, | |
| ) | |
| pp_cfg = PreprocessCfg(**model.visual.preprocess_cfg) | |
| preprocess_train = image_transform_v2( | |
| pp_cfg, | |
| is_train=True, | |
| use_imagecrop_aug = use_imagecrop_aug, | |
| max_boxes = max_boxes, | |
| local_method = local_method, | |
| aug_cfg=aug_cfg, | |
| ) | |
| preprocess_val = image_transform_v2( | |
| pp_cfg, | |
| is_train=False, | |
| ) | |
| return model, preprocess_train, preprocess_val | |
| def create_model_from_pretrained( | |
| model_name: str, | |
| pretrained: Optional[str] = None, | |
| precision: str = 'fp32', | |
| device: Union[str, torch.device] = 'cpu', | |
| jit: bool = False, | |
| force_quick_gelu: bool = False, | |
| force_custom_text: bool = False, | |
| force_image_size: Optional[Union[int, Tuple[int, int]]] = None, | |
| image_mean: Optional[Tuple[float, ...]] = None, | |
| image_std: Optional[Tuple[float, ...]] = None, | |
| image_interpolation: Optional[str] = None, | |
| image_resize_mode: Optional[str] = None, # only effective for inference | |
| return_transform: bool = True, | |
| cache_dir: Optional[str] = None, | |
| load_weights_only: bool = True, | |
| **model_kwargs, | |
| ): | |
| force_preprocess_cfg = merge_preprocess_kwargs( | |
| {}, | |
| mean=image_mean, | |
| std=image_std, | |
| interpolation=image_interpolation, | |
| resize_mode=image_resize_mode, | |
| ) | |
| model = create_model( | |
| model_name, | |
| pretrained, | |
| precision=precision, | |
| device=device, | |
| jit=jit, | |
| force_quick_gelu=force_quick_gelu, | |
| force_custom_text=force_custom_text, | |
| force_image_size=force_image_size, | |
| force_preprocess_cfg=force_preprocess_cfg, | |
| cache_dir=cache_dir, | |
| require_pretrained=True, | |
| load_weights_only=load_weights_only, | |
| **model_kwargs, | |
| ) | |
| if not return_transform: | |
| return model | |
| preprocess = image_transform_v2( | |
| PreprocessCfg(**model.visual.preprocess_cfg), | |
| is_train=False, | |
| ) | |
| return model, preprocess | |