| | import gradio as gr |
| | import os |
| | import numpy as np |
| | import torch |
| | import torchvision.transforms as T |
| | |
| | from PIL import Image |
| | from torchvision.transforms.functional import InterpolationMode |
| | from transformers import AutoModel, AutoTokenizer |
| | import matplotlib.pyplot as plt |
| | import glob |
| | import spaces |
| |
|
| | IMAGENET_MEAN = (0.485, 0.456, 0.406) |
| | IMAGENET_STD = (0.229, 0.224, 0.225) |
| |
|
| | def build_transform(input_size): |
| | MEAN, STD = IMAGENET_MEAN, IMAGENET_STD |
| | transform = T.Compose([ |
| | T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), |
| | T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), |
| | T.ToTensor(), |
| | T.Normalize(mean=MEAN, std=STD) |
| | ]) |
| | return transform |
| |
|
| | def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): |
| | best_ratio_diff = float('inf') |
| | best_ratio = (1, 1) |
| | area = width * height |
| | for ratio in target_ratios: |
| | target_aspect_ratio = ratio[0] / ratio[1] |
| | ratio_diff = abs(aspect_ratio - target_aspect_ratio) |
| | if ratio_diff < best_ratio_diff: |
| | best_ratio_diff = ratio_diff |
| | best_ratio = ratio |
| | elif ratio_diff == best_ratio_diff: |
| | if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: |
| | best_ratio = ratio |
| | return best_ratio |
| |
|
| | def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): |
| | orig_width, orig_height = image.size |
| | aspect_ratio = orig_width / orig_height |
| |
|
| | |
| | target_ratios = set( |
| | (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if |
| | i * j <= max_num and i * j >= min_num) |
| | target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) |
| |
|
| | |
| | target_aspect_ratio = find_closest_aspect_ratio( |
| | aspect_ratio, target_ratios, orig_width, orig_height, image_size) |
| |
|
| | |
| | target_width = image_size * target_aspect_ratio[0] |
| | target_height = image_size * target_aspect_ratio[1] |
| | blocks = target_aspect_ratio[0] * target_aspect_ratio[1] |
| |
|
| | |
| | resized_img = image.resize((target_width, target_height)) |
| | processed_images = [] |
| | for i in range(blocks): |
| | box = ( |
| | (i % (target_width // image_size)) * image_size, |
| | (i // (target_width // image_size)) * image_size, |
| | ((i % (target_width // image_size)) + 1) * image_size, |
| | ((i // (target_width // image_size)) + 1) * image_size |
| | ) |
| | |
| | split_img = resized_img.crop(box) |
| | processed_images.append(split_img) |
| | assert len(processed_images) == blocks |
| | if use_thumbnail and len(processed_images) != 1: |
| | thumbnail_img = image.resize((image_size, image_size)) |
| | processed_images.append(thumbnail_img) |
| | return processed_images |
| |
|
| | def load_image(image_file, input_size=448, max_num=12): |
| | image = Image.open(image_file).convert('RGB') |
| | transform = build_transform(input_size=input_size) |
| | images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) |
| | pixel_values = [transform(image) for image in images] |
| | pixel_values = torch.stack(pixel_values) |
| | return pixel_values |
| |
|
| | |
| | original_cuda = torch.Tensor.cuda |
| |
|
| | |
| | def safe_cuda(self, *args, **kwargs): |
| | if torch.cuda.is_available(): |
| | return original_cuda(self, *args, **kwargs) |
| | else: |
| | return self |
| |
|
| | |
| | torch.Tensor.cuda = safe_cuda |
| |
|
| |
|
| | model_name = "YuukiAsuna/Vintern-1B-v2-ViTable-docvqa" |
| |
|
| |
|
| | model = AutoModel.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.bfloat16, |
| | low_cpu_mem_usage=True, |
| | trust_remote_code=True |
| | ).eval().cuda() |
| |
|
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False) |
| |
|
| |
|
| |
|
| | |
| | def chat(message, history): |
| | print(history) |
| | print(message) |
| | if len(history) == 0 or len(message["files"]) != 0: |
| | test_image = message["files"][0] |
| | else: |
| | test_image = history[0][0][0] |
| | |
| | pixel_values = load_image(test_image, max_num=12).to(torch.bfloat16).cuda() |
| | generation_config = dict(max_new_tokens= 1024, do_sample=True, num_beams = 3, repetition_penalty=2.5) |
| | |
| | |
| | |
| | if len(history) == 0: |
| | question = '<image>\n'+message["text"] |
| | response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) |
| | else: |
| | conv_history = [] |
| | for chat_pair in history: |
| | if chat_pair[1] is not None: |
| | if len(conv_history) == 0 and len(message["files"]) == 0: |
| | chat_pair[0] = '<image>\n' + chat_pair[0] |
| | conv_history.append(tuple(chat_pair)) |
| | print(conv_history) |
| | if len(message["files"]) != 0: |
| | question = '<image>\n'+message["text"] |
| | else: |
| | question = message["text"] |
| | response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=conv_history, return_history=True) |
| | |
| | print(f'User: {question}\nAssistant: {response}') |
| |
|
| | return response |
| |
|
| | CSS =""" |
| | #component-3 { |
| | height: 50dvh !important; |
| | transform-origin: top; /* Đảm bảo rằng phần tử mở rộng từ trên xuống */ |
| | border-style: solid; |
| | overflow: hidden; |
| | flex-grow: 1; |
| | min-width: min(160px, 100%); |
| | border-width: var(--block-border-width); |
| | } |
| | /* Đảm bảo ảnh bên trong nút hiển thị đúng cách cho các nút có aria-label chỉ định */ |
| | button.svelte-1lcyrx4[aria-label="user's message: a file of type image/jpeg, "] img.svelte-1pijsyv { |
| | width: 100%; |
| | object-fit: contain; |
| | height: 100%; |
| | border-radius: 13px; /* Thêm bo góc cho ảnh */ |
| | max-width: 50vw; /* Giới hạn chiều rộng ảnh */ |
| | } |
| | /* Đặt chiều cao cho nút và cho phép chọn văn bản chỉ cho các nút có aria-label chỉ định */ |
| | button.svelte-1lcyrx4[aria-label="user's message: a file of type image/jpeg, "] { |
| | user-select: text; |
| | text-align: left; |
| | height: 300px; |
| | } |
| | /* Thêm bo góc và giới hạn chiều rộng cho ảnh không thuộc avatar container */ |
| | .message-wrap.svelte-1lcyrx4 > div.svelte-1lcyrx4 .svelte-1lcyrx4:not(.avatar-container) img { |
| | border-radius: 13px; |
| | max-width: 50vw; |
| | } |
| | .message-wrap.svelte-1lcyrx4 .message.svelte-1lcyrx4 img { |
| | margin: var(--size-2); |
| | max-height: 500px; |
| | } |
| | """ |
| |
|
| |
|
| | demo = gr.ChatInterface( |
| | fn=chat, |
| | description="""Try [Vintern-1B-v2-ViTable-docvqa](https://huggingface.co/YuukiAsuna/Vintern-1B-v2-ViTable-docvqa) in this demo. Vintern-1B-v2-ViTable-docvqa is a finetuned version of [Vintern-1B-v2](https://huggingface.co/5CD-AI/Vintern-1B-v2). This space is running on a CPU, which makes the response time slow. You can use [this Colab](https://colab.research.google.com/drive/1ricMh4BxntoiXIT2CnQvAZjrGZTtx4gj?usp=sharing) with a free T4 GPU for faster responses.""", |
| | title="Vintern-1B-v2-ViTable-docvqa", |
| | multimodal=True, |
| | css=CSS |
| | ) |
| | demo.queue().launch() |
| |
|
| |
|
| |
|
| |
|
| |
|