| | |
| | import numpy as np |
| | import torch |
| | import torchvision.transforms as T |
| | from decord import VideoReader, cpu |
| | from PIL import Image |
| | from torchvision.transforms.functional import InterpolationMode |
| | from transformers import AutoModel, AutoTokenizer |
| | import json |
| | import os |
| |
|
| | import os |
| | import random |
| | from datasets import load_dataset |
| | from PIL import Image |
| |
|
| | |
| | dataset = load_dataset("arampacha/rsicd") |
| |
|
| | |
| | output_dirs = { |
| | "train": "train", |
| | "test": "test", |
| | "valid": "valid" |
| | } |
| |
|
| | IMAGENET_MEAN = (0.485, 0.456, 0.406) |
| | IMAGENET_STD = (0.229, 0.224, 0.225) |
| |
|
| | def build_transform(input_size): |
| | MEAN, STD = IMAGENET_MEAN, IMAGENET_STD |
| | transform = T.Compose([ |
| | T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), |
| | T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), |
| | T.ToTensor(), |
| | T.Normalize(mean=MEAN, std=STD) |
| | ]) |
| | return transform |
| |
|
| | def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): |
| | best_ratio_diff = float('inf') |
| | best_ratio = (1, 1) |
| | area = width * height |
| | for ratio in target_ratios: |
| | target_aspect_ratio = ratio[0] / ratio[1] |
| | ratio_diff = abs(aspect_ratio - target_aspect_ratio) |
| | if ratio_diff < best_ratio_diff: |
| | best_ratio_diff = ratio_diff |
| | best_ratio = ratio |
| | elif ratio_diff == best_ratio_diff: |
| | if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: |
| | best_ratio = ratio |
| | return best_ratio |
| |
|
| | def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): |
| | orig_width, orig_height = image.size |
| | aspect_ratio = orig_width / orig_height |
| |
|
| | |
| | target_ratios = set( |
| | (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if |
| | i * j <= max_num and i * j >= min_num) |
| | target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) |
| |
|
| | |
| | target_aspect_ratio = find_closest_aspect_ratio( |
| | aspect_ratio, target_ratios, orig_width, orig_height, image_size) |
| |
|
| | |
| | target_width = image_size * target_aspect_ratio[0] |
| | target_height = image_size * target_aspect_ratio[1] |
| | blocks = target_aspect_ratio[0] * target_aspect_ratio[1] |
| |
|
| | |
| | resized_img = image.resize((target_width, target_height)) |
| | processed_images = [] |
| | for i in range(blocks): |
| | box = ( |
| | (i % (target_width // image_size)) * image_size, |
| | (i // (target_width // image_size)) * image_size, |
| | ((i % (target_width // image_size)) + 1) * image_size, |
| | ((i // (target_width // image_size)) + 1) * image_size |
| | ) |
| | |
| | split_img = resized_img.crop(box) |
| | processed_images.append(split_img) |
| | assert len(processed_images) == blocks |
| | if use_thumbnail and len(processed_images) != 1: |
| | thumbnail_img = image.resize((image_size, image_size)) |
| | processed_images.append(thumbnail_img) |
| | return processed_images |
| |
|
| | def load_image(image_file, input_size=448, max_num=12): |
| | image = Image.open(image_file).convert('RGB') |
| | transform = build_transform(input_size=input_size) |
| | images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) |
| | pixel_values = [transform(image) for image in images] |
| | pixel_values = torch.stack(pixel_values) |
| | return pixel_values |
| |
|
| | |
| | |
| | path = '/data/yyf/model/InternVL3-8B' |
| | model = AutoModel.from_pretrained( |
| | path, |
| | torch_dtype=torch.bfloat16, |
| | low_cpu_mem_usage=True, |
| | use_flash_attn=True, |
| | trust_remote_code=True).eval().cuda() |
| | tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False) |
| |
|
| | generation_config = dict(max_new_tokens=1024, do_sample=False) |
| |
|
| | q = """ |
| | Task Description: |
| | You are given a remote sensing image along with five incomplete captions for the image. Your task is to generate a comprehensive and accurate caption that fully describes the key features of the image. Use the provided captions as a reference, but ensure your final caption is complete and concise, capturing all major elements of the image. |
| | Instructions: |
| | 1. Review the five provided captions. |
| | 2. Analyze the content of the image. |
| | 3. Generate a single, comprehensive caption that describes the key elements in the image in a clear and concise manner. |
| | 4. The caption should cover all important aspects of the image without being too long. |
| | 5. Avoid introductory phrases like "This is an image of..." or "This is a satellite image showing...". Focus on directly describing the image. |
| | 6. Ensure the caption is detailed enough to convey the essential information. |
| | Here are the remote sensing image and the five incomplete captions:\n |
| | """ |
| |
|
| | def process_and_save(dataset_split): |
| | with open("/data/xcl/dataSet/RSICD_1/descriptions_test_full.txt", 'w', encoding='utf-8') as f_out: |
| | for idx, example in enumerate(dataset_split, start=1): |
| | filename = f"{idx}.png" |
| | filepath = os.path.join(r"/data/xcl/dataSet/RSICD_1/test_png", filename) |
| | pixel_values = load_image(filepath).to(torch.bfloat16).cuda() |
| | caption = str(example["captions"]) |
| | question = q + '<image>\n' + f"Captions: {caption}" |
| | response = model.chat(tokenizer, pixel_values, question, generation_config) |
| | response_single_line = response.strip().replace('\n', ' ').replace('\r', ' ') |
| | print(f'User: {question}\nAssistant: {response_single_line}') |
| | f_out.write(f"{idx} {response_single_line}\n") |
| |
|
| | |
| | for split, out_dir in output_dirs.items(): |
| | if split == "test": |
| | print(f"Processing {split} set ...") |
| | process_and_save(dataset[split]) |
| | print("All done!") |