File size: 6,040 Bytes
c94c8c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import os
import json
import torch
import argparse
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download, list_repo_files
from transformers import AutoTokenizer, AutoModel
from PIL import Image
from torchvision import transforms as T
from torchvision.transforms.functional import InterpolationMode
# -----------------------
# Arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("--segment", type=int, required=True, help="Segment index (0–7)")
parser.add_argument("--num_segments", type=int, default=8, help="Total number of segments")
args = parser.parse_args()
# -----------------------
# Load from HuggingFace
# -----------------------
def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"):
cached_path = hf_hub_download(
repo_id=repo_id,
filename=filename,
revision="7bb7c7f3d379c5145bb06d2cf0949c66ac9a2c4e",
repo_type=repo_type,
local_files_only=True
)
return load_file(cached_path)
# -----------------------
# Prompt Template
# -----------------------
PROMPT = """You are given an image of the indoor scene. Your task is to generate captions that are 100% accurate to the image, with no hallucination.
Instructions:
- Generate exactly 10 different CLIP-Style captions for the image.
- Captions must clearly describe the visible objects, their attributes, and their spatial relationships.
- Use spatial prepositions such as: on, under, next to, beside, behind, in front of, between, above, below.
- Focus only on what is visible in the image. Do not speculate or add details that are not present.
- Be precise and factual. Avoid opinions, emotions, or subjective language.
Examples:
1. Gray laptop centered on desk, coffee mug to the side, rolling chair positioned behind.
2. Light gray couch, wooden table placed in front, standing lamp on the left.
3. White bed with pillows, small wooden nightstand beside, bedside lamp on top.
4. Rectangular table with two chairs around, bright window in background, curtain partly covering.
Now, generate 10 captions following these rules. **Output must be a numbered list (1. ... 2. ... up to 10.).**
"""
# -----------------------
# InternVL3 Model + Tokenizer
# -----------------------
path = "OpenGVLab/InternVL3-38B"
model = AutoModel.from_pretrained(
path,
torch_dtype=torch.bfloat16,
load_in_8bit=True,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True
).eval()
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
# -----------------------
# Image Preprocessing (reuse your load_image)
# -----------------------
from torchvision import transforms as T
from torchvision.transforms.functional import InterpolationMode
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size=448):
return T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
])
def load_image(image_pil, input_size=448):
transform = build_transform(input_size=input_size)
return transform(image_pil).unsqueeze(0) # add batch dimension
# -----------------------
# Loop Over Safetensors
# -----------------------
repo_id = "MatchLab/PointMapVerse" # change this to your repo
all_files = list_repo_files(repo_id, repo_type="dataset")
safetensor_files = [f for f in all_files if f.endswith(".safetensors")]
# Split into segments
segment_size = len(safetensor_files) // args.num_segments
start = args.segment * segment_size
end = (args.segment + 1) * segment_size if args.segment < args.num_segments - 1 else len(safetensor_files)
segment_files = safetensor_files[start:end]
print(f"Segment {args.segment}: processing {len(segment_files)} files")
results = {}
BATCH_SIZE = 32
os.makedirs("captions", exist_ok=True)
out_file = f"captions/captions_segment{args.segment}.json"
if os.path.exists(out_file):
with open(out_file, "r") as f:
results = json.load(f)
print(f"Resuming from {len(results)} existing results in {out_file}")
else:
results = {}
for st_file in segment_files:
print(f"Processing {st_file} ...")
data = load_safetensor_from_hf(repo_id, st_file)
images = data['color_images'] # shape: [N,H,W,C]
# --- batch loop ---
for b_start in range(0, len(images), BATCH_SIZE):
batch_imgs = images[b_start:b_start + BATCH_SIZE]
pixel_values_list, num_patches_list, keys_to_process = [], [], []
for idx, img in enumerate(batch_imgs):
key = f"{st_file.split('.safetensors')[0]}_{b_start + idx}"
# skip if already generated
if key in results:
continue
image_pil = Image.fromarray((img.cpu().numpy()).astype("uint8"))
pixel_tensor = load_image(image_pil).to(torch.bfloat16).cuda()
num_patches_list.append(pixel_tensor.size(0))
pixel_values_list.append(pixel_tensor)
keys_to_process.append(key)
if not keys_to_process:
continue # whole batch already done
# concat into batch
pixel_values = torch.cat(pixel_values_list, dim=0)
questions = [f"<image>\n{PROMPT}"] * len(keys_to_process)
generation_config = dict(max_new_tokens=512, do_sample=True)
responses = model.batch_chat(
tokenizer,
pixel_values,
num_patches_list=num_patches_list,
questions=questions,
generation_config=generation_config,
)
for key, response in zip(keys_to_process, responses):
results[key] = response
# save after every processed batch (safer for long runs)
with open(out_file, "w") as f:
json.dump(results, f, indent=2)
print(f"Progress saved: {len(results)} results in {out_file}") |