backup / tools /llava.py
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
import torch
import torch.nn as nn
import sys
# Add the folder to sys.path
sys.path.append("/gpfs/home/ym621/UniPointMap")
import open_clip
from transformers import LlavaForConditionalGeneration, AutoProcessor, AutoTokenizer, AutoConfig
import torch.nn as nn
from transformers.activations import GELUActivation
class LlavaMultiModalProjector(nn.Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.linear_1 = nn.Linear(in_features, out_features, bias=True)
self.act = GELUActivation()
self.linear_2 = nn.Linear(out_features, out_features, bias=True)
def forward(self, x):
x = self.linear_1(x)
x = self.act(x)
x = self.linear_2(x)
return x
# ---------------------------------------------------------
# 1. Load OpenCLIP Vision Encoder (ViT-B/32)
# ---------------------------------------------------------
vision_model, _, preprocess = open_clip.create_model_and_transforms(
"ViT-B-32", pretrained="openai" # choices: openai, laion2b, laion400m, etc.
)
# Keep only the vision tower
custom_vision_tower = vision_model.visual
custom_hidden_size = custom_vision_tower.output_dim # ViT-B/32 = 512
# ---------------------------------------------------------
# 2. Load Llava Model (base LLaVA checkpoint)
# ---------------------------------------------------------
model_id = "liuhaotian/llava-v1.5-7b" # or 13b
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
device_map="auto"
)
breakpoint()
# ---------------------------------------------------------
# 3. Replace Vision Tower
# ---------------------------------------------------------
# Replace with OpenCLIP ViT-B/32
model.vision_tower = custom_vision_tower
# Fix mm_projector to map vision_dim -> LLM hidden size
llm_hidden_size = model.config.hidden_size # usually 4096 for LLaVA-7B
model.multi_modal_projector = LlavaMultiModalProjector(
in_features=512,
out_features=4096 # match LLM hidden size
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
config = AutoConfig.from_pretrained("liuhaotian/llava-v1.5-7b")
# ---------------------------------------------------------
# 4. Update Config
# ---------------------------------------------------------
model.config.vision_config.hidden_size = custom_hidden_size
model.config.vision_config.image_size = getattr(custom_vision_tower, "image_size", 224) # ViT-B/32 default
model.config.vision_config.patch_size = getattr(custom_vision_tower, "patch_size", 32)
# ---------------------------------------------------------
# 5. Update Processor
# ---------------------------------------------------------
# processor = AutoProcessor.from_pretrained(model_id)
image_processor = preprocess # swap OpenCLIP preprocessing
tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False)
processor = AutoProcessor.from_pretrained("liuhaotian/llava-v1.5-7b-pretrain")
# ---------------------------------------------------------
# 6. Test Inference (image + text prompt)
# ---------------------------------------------------------
from PIL import Image
# image = Image.open("test.jpg").convert("RGB")
prompt = "USER: Describe this image in detail. ASSISTANT:"
with torch.no_grad():
output_ids = model.generate(**inputs, max_new_tokens=200)
print(processor.tokenizer.decode(output_ids[0], skip_special_tokens=True))