Spaces:
Runtime error
Runtime error
File size: 6,228 Bytes
3c69edb 0d0ea38 3c69edb 3c30cca 3386413 3c69edb 3386413 3c69edb 3386413 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import numpy as np
import torch
import torchvision.transforms as T
import gradio as gr
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images
def load_image(image, input_size=448, max_num=12):
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
return pixel_values
# Load model and tokenizer globally to avoid reloading for each request
@torch.inference_mode()
def load_models():
model = AutoModel.from_pretrained(
"5CD-AI/Vintern-1B-v3_5",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True,
use_flash_attn=False,
).eval()
# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained("5CD-AI/Vintern-1B-v3_5", trust_remote_code=True, use_fast=False)
return model, tokenizer, device
model, tokenizer, device = load_models()
def generate_response(image, question):
if image is None:
return "Vui lòng tải lên một hình ảnh."
if not question:
question = '<image>\nTrích xuất thông tin chính trong ảnh và trả về dạng markdown.'
# Convert image to PIL if it's a file upload
if isinstance(image, str):
image = Image.open(image).convert('RGB')
else:
image = image.convert("RGB")
# Process image
pixel_values = load_image(image, max_num=6).to(torch.bfloat16).to(device)
# Generate response
generation_config = dict(max_new_tokens=1024, do_sample=False, num_beams=3, repetition_penalty=2.5)
response, _ = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
return response
# # Create Gradio interface
# with gr.Blocks() as demo:
gr.Markdown("# Vintern-1B-v3.5 Demo")
gr.Markdown("Tải lên hình ảnh và đặt câu hỏi (hoặc để trống để trích xuất thông tin).")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Hình ảnh")
question_input = gr.Textbox(
placeholder="<image>\nTrích xuất thông tin chính trong ảnh và trả về dạng markdown.",
label="Câu hỏi (để trống để trích xuất thông tin)"
)
submit_btn = gr.Button("Gửi")
with gr.Column():
output = gr.Markdown(label="Kết quả")
title = "🇻🇳 Vietnamese Image Captioning"
description = "Tải lên một bức ảnh và mô hình sẽ sinh mô tả tiếng Việt tương ứng."
demo = gr.Interface(
fn=generate_response,
inputs=[image_input, question_input],
outputs="text",
title=title,
description=description,
)
if __name__ == "__main__":
demo.launch()
# submit_btn.click(
# fn=generate_response,
# inputs=[image_input, question_input],
# outputs=output
# )
# # gr.Examples(
# # [
# # ["example1.jpg", "<image>\nTrích xuất thông tin chính trong ảnh và trả về dạng markdown."],
# # ["example2.jpg", "<image>\nĐây là hình ảnh gì?"],
# # ],
# # inputs=[image_input, question_input],
# # )
# demo.launch() |