File size: 4,998 Bytes
58bf47f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
"""
materialize.py
Factory class for initializing Vision Backbones, LLM Backbones, and VLMs from a set registry; provides and exports
individual functions for clear control flow.
"""
from typing import Optional, Tuple
from transformers import PreTrainedTokenizerBase
from prismatic.models.backbones.llm import LLaMa2LLMBackbone, LLMBackbone, MistralLLMBackbone, PhiLLMBackbone
from prismatic.models.backbones.vision import (
CLIPViTBackbone,
DinoCLIPViTBackbone,
DinoSigLIPViTBackbone,
DinoV2ViTBackbone,
ImageTransform,
IN1KViTBackbone,
SigLIPViTBackbone,
VisionBackbone,
)
from prismatic.models.vlms import PrismaticVLM
# === Registries =>> Maps ID --> {cls(), kwargs} :: Different Registries for Vision Backbones, LLM Backbones, VLMs ===
# fmt: off
# === Vision Backbone Registry ===
VISION_BACKBONES = {
# === 224px Backbones ===
"clip-vit-l": {"cls": CLIPViTBackbone, "kwargs": {"default_image_size": 224}},
"siglip-vit-so400m": {"cls": SigLIPViTBackbone, "kwargs": {"default_image_size": 224}},
"dinov2-vit-l": {"cls": DinoV2ViTBackbone, "kwargs": {"default_image_size": 224}},
"in1k-vit-l": {"cls": IN1KViTBackbone, "kwargs": {"default_image_size": 224}},
"dinosiglip-vit-so-224px": {"cls": DinoSigLIPViTBackbone, "kwargs": {"default_image_size": 224}},
# === Assorted CLIP Backbones ===
"clip-vit-b": {"cls": CLIPViTBackbone, "kwargs": {"default_image_size": 224}},
"clip-vit-l-336px": {"cls": CLIPViTBackbone, "kwargs": {"default_image_size": 336}},
# === Assorted SigLIP Backbones ===
"siglip-vit-b16-224px": {"cls": SigLIPViTBackbone, "kwargs": {"default_image_size": 224}},
"siglip-vit-b16-256px": {"cls": SigLIPViTBackbone, "kwargs": {"default_image_size": 256}},
"siglip-vit-b16-384px": {"cls": SigLIPViTBackbone, "kwargs": {"default_image_size": 384}},
"siglip-vit-so400m-384px": {"cls": SigLIPViTBackbone, "kwargs": {"default_image_size": 384}},
# === Fused Backbones ===
"dinoclip-vit-l-336px": {"cls": DinoCLIPViTBackbone, "kwargs": {"default_image_size": 336}},
"dinosiglip-vit-so-384px": {"cls": DinoSigLIPViTBackbone, "kwargs": {"default_image_size": 384}},
}
# === Language Model Registry ===
LLM_BACKBONES = {
# === LLaMa-2 Pure (Non-Chat) Backbones ===
"llama2-7b-pure": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
"llama2-13b-pure": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
# === LLaMa-2 Chat Backbones ===
"llama2-7b-chat": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
"llama2-13b-chat": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
# === Vicuna-v1.5 Backbones ===
"vicuna-v15-7b": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
"vicuna-v15-13b": {"cls": LLaMa2LLMBackbone, "kwargs": {}},
# === Mistral v0.1 Backbones ===
"mistral-v0.1-7b-pure": {"cls": MistralLLMBackbone, "kwargs": {}},
"mistral-v0.1-7b-instruct": {"cls": MistralLLMBackbone, "kwargs": {}},
# === Phi-2 Backbone ===
"phi-2-3b": {"cls": PhiLLMBackbone, "kwargs": {}},
}
# fmt: on
def get_vision_backbone_and_transform(
vision_backbone_id: str, image_resize_strategy: str
) -> Tuple[VisionBackbone, ImageTransform]:
"""Instantiate a Vision Backbone, returning both the nn.Module wrapper class and default Image Transform."""
if vision_backbone_id in VISION_BACKBONES:
vision_cfg = VISION_BACKBONES[vision_backbone_id]
vision_backbone: VisionBackbone = vision_cfg["cls"](
vision_backbone_id, image_resize_strategy, **vision_cfg["kwargs"]
)
image_transform = vision_backbone.get_image_transform()
return vision_backbone, image_transform
else:
raise ValueError(f"Vision Backbone `{vision_backbone_id}` is not supported!")
def get_llm_backbone_and_tokenizer(
llm_backbone_id: str,
llm_max_length: int = 2048,
hf_token: Optional[str] = None,
inference_mode: bool = False,
) -> Tuple[LLMBackbone, PreTrainedTokenizerBase]:
if llm_backbone_id in LLM_BACKBONES:
llm_cfg = LLM_BACKBONES[llm_backbone_id]
llm_backbone: LLMBackbone = llm_cfg["cls"](
llm_backbone_id,
llm_max_length=llm_max_length,
hf_token=hf_token,
inference_mode=inference_mode,
**llm_cfg["kwargs"],
)
tokenizer = llm_backbone.get_tokenizer()
return llm_backbone, tokenizer
else:
raise ValueError(f"LLM Backbone `{llm_backbone_id}` is not supported!")
def get_vlm(
model_id: str,
arch_specifier: str,
vision_backbone: VisionBackbone,
llm_backbone: LLMBackbone,
enable_mixed_precision_training: bool = True,
) -> PrismaticVLM:
"""Lightweight wrapper around initializing a VLM, mostly for future-proofing (if one wants to add a new VLM)."""
return PrismaticVLM(
model_id,
vision_backbone,
llm_backbone,
enable_mixed_precision_training=enable_mixed_precision_training,
arch_specifier=arch_specifier,
)
|