20251119_temp1 / inference5.py
xcll's picture
Upload inference5.py with huggingface_hub
09b5f86 verified
# the edit intructions of junshi images for visual
import numpy as np
import torch
import torchvision.transforms as T
from decord import VideoReader, cpu
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
import json
import os
import os
import random
from PIL import Image
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images
def load_image(image_file, input_size=448, max_num=12):
image = Image.open(image_file).convert('RGB')
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
return pixel_values
# If you have an 80G A100 GPU, you can put the entire model on a single GPU.
# Otherwise, you need to load a model using multiple GPUs, please refer to the `Multiple GPUs` section.
path = '/data/yyf/model/InternVL2_5-8B'
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True).eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
# generation_config = dict(max_new_tokens=1024, do_sample=False)
generation_config = dict(max_new_tokens=1024, do_sample=True, temperature=0.9)
question = """
You are given an image showing one or more main entities (such as trucks, ships, airplanes or tanks).
You should first carefully observe what the main entities in the image are.
Then, your task is to write a concise and clear English instruction that describes how to edit or replace the foreground entities in the image.
The instruction should specify what changes to make to the main objects only.
Example: “Replace the trucks with airplanes” or “Replace the ships with airplanes”
Avoid mentioning the background or lighting changes.
Output only one short, natural-sounding English instruction.
Here is the image:
<image>
"""
# question = """
# You are given an image showing a military scene.
# Your task is to write a concise and clear English instruction that describes how to edit or replace the background.
# The new background should still remain consistent with a military environment — for example, changing the battlefield type, terrain, weather, or time of day.
# Example: “Replace the background with a desert battlefield under cloudy skies” or “Change the background to a snowy training ground.”
# Avoid introducing non-military environments such as cities, offices, or civilian areas.
# Output only one short, natural-sounding English instruction.
# """
# question = """
# You are given an image showing a military scene.
# Your task is to write a concise and clear English instruction that describes how to edit or replace both the foreground entities and the background simultaneously.
# The new foreground and background should together form a coherent military scene — for example, changing ground vehicles to aircraft while turning the environment into an airbase, or replacing soldiers with naval units in a sea battlefield.
# Avoid introducing any non-military contexts such as cities, civilian streets, or commercial areas.
# Output only one short, natural-sounding English instruction.
# """
data_dict = {}
# 目标目录
img_dir = "/data/xcl/dataSet/junshi_images"
# 遍历目录中的所有文件
for filename in os.listdir(img_dir):
# 拼接完整路径
img_path = os.path.join(img_dir, filename)
# 判断是否是文件(不是子目录)
if os.path.isfile(img_path):
# 加载图片
try:
pixel_values = load_image(img_path, max_num=12).to(torch.bfloat16).cuda()
print(f"已加载: {img_path}")
except Exception as e:
print(f"加载 {img_path} 出错: {e}")
# single-image single-round conversation (单图单轮对话)
response = model.chat(tokenizer, pixel_values, question, generation_config)
response_single_line = response.replace('\n', ' ').replace('\r', ' ')
print(f'User: {question}\nAssistant: {response}')
data_dict[filename] = response_single_line
with open(r'/data/xcl/dataSet/junshi_images_entity_gt.json', 'w', encoding='utf-8') as file:
json.dump(data_dict, file, ensure_ascii=False, indent=4)