fede97 commited on
Commit ·
b738300
1
Parent(s): 186e6bc
readme
Browse files
README.md
CHANGED
|
@@ -15,7 +15,7 @@ base_model: microsoft/Phi-4-mini-instruct
|
|
| 15 |
# Model Card for LLaVA_MORE-phi_4-finetuning
|
| 16 |
|
| 17 |
<div align="center">
|
| 18 |
-
<img src="https://
|
| 19 |
<h1> 🔥 LLaVA-MORE 🔥
|
| 20 |
|
| 21 |
A Comparative Study of LLMs and Visual Backbones <br>for Enhanced Visual Instruction Tuning
|
|
@@ -28,6 +28,18 @@ LLaVA-MORE integrates recent language models with diverse visual backbones. It e
|
|
| 28 |
|
| 29 |
It is designed for multimodal reasoning, generation, and instruction following, and provides insights into the design of more effective MLLMs.
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
## Model Details
|
| 32 |
|
| 33 |
### Model Description
|
|
@@ -94,113 +106,6 @@ export TOKENIZER_PATH=$model_path
|
|
| 94 |
python -u src/llava/eval/run_llava.py --model-path $model_path --model-architecture $model_architecture --conv-mode $conversation
|
| 95 |
```
|
| 96 |
|
| 97 |
-
**Direct Python Inference Example:**
|
| 98 |
-
|
| 99 |
-
This example demonstrates how to load the model and perform a simple inference. Note that the image preprocessing part, essential for LLaVA-style models, is included for a complete example.
|
| 100 |
-
|
| 101 |
-
```python
|
| 102 |
-
import numpy as np
|
| 103 |
-
import torch
|
| 104 |
-
import torchvision.transforms as T
|
| 105 |
-
from PIL import Image
|
| 106 |
-
from torchvision.transforms.functional import InterpolationMode
|
| 107 |
-
from transformers import AutoModel, AutoTokenizer
|
| 108 |
-
|
| 109 |
-
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 110 |
-
IMAGENET_STD = (0.229, 0.224, 0.406)
|
| 111 |
-
|
| 112 |
-
def build_transform(input_size):
|
| 113 |
-
"""Builds the image transformation pipeline."""
|
| 114 |
-
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 115 |
-
transform = T.Compose([
|
| 116 |
-
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 117 |
-
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 118 |
-
T.ToTensor(),
|
| 119 |
-
T.Normalize(mean=MEAN, std=STD)
|
| 120 |
-
])
|
| 121 |
-
return transform
|
| 122 |
-
|
| 123 |
-
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 124 |
-
"""Finds the target aspect ratio closest to the image's aspect ratio."""
|
| 125 |
-
best_ratio_diff = float('inf')
|
| 126 |
-
best_ratio = (1, 1)
|
| 127 |
-
area = width * height
|
| 128 |
-
for ratio in target_ratios:
|
| 129 |
-
target_aspect_ratio = ratio[0] / ratio[1]
|
| 130 |
-
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 131 |
-
if ratio_diff < best_ratio_diff:
|
| 132 |
-
best_ratio_diff = ratio_diff
|
| 133 |
-
best_ratio = ratio
|
| 134 |
-
elif ratio_diff == best_ratio_diff:
|
| 135 |
-
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 136 |
-
best_ratio = ratio
|
| 137 |
-
return best_ratio
|
| 138 |
-
|
| 139 |
-
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
| 140 |
-
"""Dynamically preprocesses images for multi-scale input."""
|
| 141 |
-
orig_width, orig_height = image.size
|
| 142 |
-
aspect_ratio = orig_width / orig_height
|
| 143 |
-
|
| 144 |
-
target_ratios = set(
|
| 145 |
-
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 146 |
-
i * j <= max_num and i * j >= min_num)
|
| 147 |
-
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 148 |
-
|
| 149 |
-
target_aspect_ratio = find_closest_aspect_ratio(
|
| 150 |
-
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 151 |
-
|
| 152 |
-
target_width = image_size * target_aspect_ratio[0]
|
| 153 |
-
target_height = image_size * target_aspect_ratio[1]
|
| 154 |
-
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 155 |
-
|
| 156 |
-
resized_img = image.resize((target_width, target_height))
|
| 157 |
-
processed_images = []
|
| 158 |
-
for i in range(blocks):
|
| 159 |
-
box = (
|
| 160 |
-
(i % (target_width // image_size)) * image_size,
|
| 161 |
-
(i // (target_width // image_size)) * image_size,
|
| 162 |
-
((i % (target_width // image_size)) + 1) * image_size,
|
| 163 |
-
((i // (target_width // image_size)) + 1) * image_size
|
| 164 |
-
)
|
| 165 |
-
split_img = resized_img.crop(box)
|
| 166 |
-
processed_images.append(split_img)
|
| 167 |
-
assert len(processed_images) == blocks
|
| 168 |
-
if use_thumbnail and len(processed_images) != 1:
|
| 169 |
-
thumbnail_img = image.resize((image_size, image_size))
|
| 170 |
-
processed_images.append(thumbnail_img)
|
| 171 |
-
return processed_images
|
| 172 |
-
|
| 173 |
-
def load_image(image_path, input_size=448, max_num=12):
|
| 174 |
-
"""Loads and preprocesses an image."""
|
| 175 |
-
image = Image.open(image_path).convert('RGB')
|
| 176 |
-
transform = build_transform(input_size=input_size)
|
| 177 |
-
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 178 |
-
pixel_values = [transform(image) for image in images]
|
| 179 |
-
pixel_values = torch.stack(pixel_values)
|
| 180 |
-
return pixel_values
|
| 181 |
-
|
| 182 |
-
# Load model and tokenizer
|
| 183 |
-
model_id = "aimagelab/LLaVA_MORE-phi_4-finetuning" # This specific model
|
| 184 |
-
model = AutoModel.from_pretrained(
|
| 185 |
-
model_id,
|
| 186 |
-
torch_dtype=torch.bfloat16, # Or torch.float16 if bfloat16 is not supported by your GPU
|
| 187 |
-
low_cpu_mem_usage=True,
|
| 188 |
-
trust_remote_code=True
|
| 189 |
-
).eval().cuda() # Move model to GPU
|
| 190 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, use_fast=False)
|
| 191 |
-
|
| 192 |
-
# Example Usage
|
| 193 |
-
# Replace 'path/to/your_image.jpg' with a valid image file on your system
|
| 194 |
-
# Or download an example image: e.g., !wget -P ./examples/images/ https://huggingface.co/aimagelab/LLaVA_MORE-llama_3_1-8B-finetuning/resolve/main/images/plot.png
|
| 195 |
-
pixel_values = load_image('./examples/images/plot.png', max_num=6).to(torch.bfloat16).cuda()
|
| 196 |
-
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 197 |
-
|
| 198 |
-
question = "Describe the image in detail."
|
| 199 |
-
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
| 200 |
-
print(f'User: {question}
|
| 201 |
-
Assistant: {response}')
|
| 202 |
-
```
|
| 203 |
-
|
| 204 |
## Training Details
|
| 205 |
|
| 206 |
### Training Data
|
|
@@ -251,15 +156,3 @@ For a complete list of all LLaVA-MORE checkpoints, you can refer to the [Hugging
|
|
| 251 |
We thank the [LLaVA](https://github.com/haotian-liu/LLaVA.git) team for open-sourcing a modular codebase to extend and train different models within the LLaVA family. We are also happy users of the [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval.git) library, which has significantly reduced the evaluation time of our checkpoints across different datasets.
|
| 252 |
|
| 253 |
We also thank [CINECA](https://www.hpc.cineca.it/systems/hardware/leonardo/) for the availability of high-performance computing resources used to train LLaVA-MORE. This work is supported by the PNRR-M4C2 project [FAIR - Future Artificial Intelligence Research](https://fondazione-fair.it/) and by the PNRR project [ITSERR - Italian Strengthening of Esfri RI Resilience](https://www.itserr.it/).
|
| 254 |
-
|
| 255 |
-
## Citation
|
| 256 |
-
If you make use of our work, please cite our paper:
|
| 257 |
-
|
| 258 |
-
```bibtex
|
| 259 |
-
@inproceedings{cocchi2025llava,
|
| 260 |
-
title={{LLaVA-MORE: A Comparative Study of LLMs and Visual Backbones for Enhanced Visual Instruction Tuning}},
|
| 261 |
-
author={Cocchi, Federico and Moratelli, Nicholas and Caffagni, Davide and Sarto, Sara and Baraldi, Lorenzo and Cornia, Marcella and Cucchiara, Rita},
|
| 262 |
-
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops},
|
| 263 |
-
year={2025}
|
| 264 |
-
}
|
| 265 |
-
```
|
|
|
|
| 15 |
# Model Card for LLaVA_MORE-phi_4-finetuning
|
| 16 |
|
| 17 |
<div align="center">
|
| 18 |
+
<!-- <img src="https://github.com/aimagelab/LLaVA-MORE/blob/main/images/image_no_back.png" width="200" height="200"> -->
|
| 19 |
<h1> 🔥 LLaVA-MORE 🔥
|
| 20 |
|
| 21 |
A Comparative Study of LLMs and Visual Backbones <br>for Enhanced Visual Instruction Tuning
|
|
|
|
| 28 |
|
| 29 |
It is designed for multimodal reasoning, generation, and instruction following, and provides insights into the design of more effective MLLMs.
|
| 30 |
|
| 31 |
+
## Citation
|
| 32 |
+
If you make use of our work, please cite our paper:
|
| 33 |
+
|
| 34 |
+
```bibtex
|
| 35 |
+
@inproceedings{cocchi2025llava,
|
| 36 |
+
title={{LLaVA-MORE: A Comparative Study of LLMs and Visual Backbones for Enhanced Visual Instruction Tuning}},
|
| 37 |
+
author={Cocchi, Federico and Moratelli, Nicholas and Caffagni, Davide and Sarto, Sara and Baraldi, Lorenzo and Cornia, Marcella and Cucchiara, Rita},
|
| 38 |
+
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops},
|
| 39 |
+
year={2025}
|
| 40 |
+
}
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
## Model Details
|
| 44 |
|
| 45 |
### Model Description
|
|
|
|
| 106 |
python -u src/llava/eval/run_llava.py --model-path $model_path --model-architecture $model_architecture --conv-mode $conversation
|
| 107 |
```
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
## Training Details
|
| 110 |
|
| 111 |
### Training Data
|
|
|
|
| 156 |
We thank the [LLaVA](https://github.com/haotian-liu/LLaVA.git) team for open-sourcing a modular codebase to extend and train different models within the LLaVA family. We are also happy users of the [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval.git) library, which has significantly reduced the evaluation time of our checkpoints across different datasets.
|
| 157 |
|
| 158 |
We also thank [CINECA](https://www.hpc.cineca.it/systems/hardware/leonardo/) for the availability of high-performance computing resources used to train LLaVA-MORE. This work is supported by the PNRR-M4C2 project [FAIR - Future Artificial Intelligence Research](https://fondazione-fair.it/) and by the PNRR project [ITSERR - Italian Strengthening of Esfri RI Resilience](https://www.itserr.it/).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|