File size: 31,188 Bytes
50bc7a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 |
import os
import sys
import argparse
from pathlib import Path
from PIL import Image
from typing import Any
import torch
import torchvision.transforms as T
from datasets import load_dataset
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ["GRADIO_TEMP_DIR"] = "./tmp"
from jodi_pipeline import JodiPipeline
from model.postprocess import (
ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor,
NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor,
)
from transformers import (
Qwen2VLForConditionalGeneration,
Qwen2_5_VLForConditionalGeneration,
Qwen3VLForConditionalGeneration,
Qwen3VLMoeForConditionalGeneration
)
from transformers import AutoProcessor, Trainer
from pathlib import Path
import itertools
import ast
import re
from PIL import Image
import json
import re
def clean_eval_question(q: str) -> str:
"""
Clean VQA-style question text for evaluation.
- If lettered options (A–Z) exist, keep text up to the last option.
- Otherwise, keep text up to the first '?' (inclusive).
"""
if not isinstance(q, str):
q = str(q)
# 删除 <image> 占位符
q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE)
# 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ...
option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)"
matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE))
if matches:
# 找到最后一个选项出现位置 → 保留到该选项行的结束处
last_match = matches[-1]
# 找到从最后一个选项开始到该段落结束(如选项内容的末尾)
tail = q[last_match.end():]
# 截断尾部任何额外提示("Please answer..." 等)
tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0]
q = q[:last_match.end()] + tail_cut
else:
# 无选项 → 只保留问句(问号前的部分)
match_qmark = re.search(r"\?", q)
if match_qmark:
q = q[:match_qmark.end()]
else:
q = q.split("\n")[0] # fallback
# 清理多余换行与空格
q = re.sub(r"\n+", " ", q)
q = re.sub(r"\s+", " ", q).strip()
return q
def clean_prompt_question(q: str) -> str:
"""Clean VQA-style question text, keeping only the question stem before '?'. """
if not isinstance(q, str):
q = str(q)
# 删除 <image> 占位符
q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE)
# 截取问号之前的部分(包括问号)
match = re.search(r"^(.*?\?)", q)
if match:
q = match.group(1)
else:
# 若无问号则保留首句
q = q.split("\n")[0]
# 去除多余空白与换行
q = re.sub(r"\s+", " ", q).strip()
return q
def dump_image(image, save_root):
os.makedirs(save_root, exist_ok=True)
save_path = os.path.join(save_root, "input.jpg")
image.convert("RGB").save(save_path, format="JPEG", quality=95)
return save_path
def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"):
""" 将多个图像拼接成一张大图并保存。
Args: image_paths: List[str] 图像路径列表
save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行)
image_format: 保存格式
"""
from PIL import Image
import io
# 读取图像
images = [Image.open(p).convert("RGB") for p in image_paths]
if images_per_row is None:
images_per_row = len(images)
# 调整尺寸(可选)
target_size = min(1024, images[0].size[0])
images = [img.resize((target_size, target_size)) for img in images]
# 拼接
widths, heights = zip(*(img.size for img in images))
max_width = max(widths)
rows = (len(images) + images_per_row - 1) // images_per_row
total_height = sum(heights[:images_per_row]) * rows
new_im = Image.new("RGB", (max_width * images_per_row, total_height))
y_offset = 0
for i in range(0, len(images), images_per_row):
row_imgs = images[i:i + images_per_row]
x_offset = 0
for img in row_imgs:
new_im.paste(img, (x_offset, y_offset))
x_offset += max_width
y_offset += heights[0]
os.makedirs(os.path.dirname(save_path), exist_ok=True)
new_im.save(save_path, format=image_format.upper())
print(f"🧩 Saved merged image → {save_path}")
return save_path
def build_vqa_message(root, prompt, question):
"""
Build Qwen3-VL message for multimodal or single-image VQA.
Now explicitly tags each modality image before feeding into Qwen3-VL,
so that the model can distinguish RGB, edge, depth, normal, etc.
"""
root_path = Path(root)
# ---------- 单图像情况 ----------
if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]:
image_path = str(root)
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image_path},
{"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
],
}
]
return messages
# ---------- 多模态文件夹情况 ----------
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
# "annotation_openpose",
]
# 检查存在的模态文件
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# 可读名称映射
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
# "annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
text_prompt = (
f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
f"The following caption describes the image in detail: '{prompt}'. "
f"Question:{question}"
)
# ---------- 构建内容序列(模态锚定) ----------
content = []
print(f'available:{available}')
for name, path in available:
readable = readable_map.get(name, "visual input")
# 在每张图像前显式标注模态类型
content.append({"type": "text", "text": f"This is the {readable}."})
content.append({"type": "image", "image": path})
# 最后加入主指令
content.append({"type": "text", "text": text_prompt})
messages = [{"role": "user", "content": content}]
return messages
def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""):
"""
Build Qwen3-VL message for multi-modal caption refinement.
Explicitly binds each image to its modality name (RGB, edge, depth, etc.)
so Qwen3-VL can reason over them correctly and refine the caption faithfully.
"""
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
# "annotation_openpose",
]
# --- 检查存在的模态 ---
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# --- 构建模态说明 ---
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
# "annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
# --- 构造文本指令 ---
text_prompt = (
f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. "
f"Use all available modalities jointly to reason about the same scene rather than describing them separately. "
f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. "
f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues "
f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, "
f"while maintaining faithfulness to the original visual content. "
f"Do not include any additional commentary or evaluations. "
f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. "
f"Focus on describing the visual properties, including: "
f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, "
f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. "
f"Exclude any stylistic, environmental, emotional, or narrative information. "
f"Consider the following feedback when refining your description: '{feedback}'. "
f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. "
f"Coarse caption: '{coarse_caption}' "
)
# text_prompt0 = (
# f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
# f"The **RGB image** provides the most accurate and realistic appearance of the scene, "
# f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n"
# f"### Your Task:\n"
# f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. "
# f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n"
# f"### Guidelines:\n"
# f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n"
# f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n"
# f"3. Do NOT invent or assume anything not visually supported.\n"
# f"4. Avoid including any additional commentary or evaluations.\n"
# f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n"
# f"### Coarse Caption:\n'{coarse_caption}'\n\n"
# f"### Feedback to Incorporate:\n'{feedback}'\n\n"
# f"Now produce the final refined caption describing the scene based on the multimodal evidence below."
# )
# --- 构建消息内容:在每个图像前加模态标识 ---
content = []
for name, path in available:
readable = readable_map.get(name, "visual input")
content.append({
"type": "text",
"text": f"This is the {readable}, which provides {get_modality_description(name)}."
})
content.append({"type": "image", "image": path})
# 最后附上总任务说明
content.append({"type": "text", "text": text_prompt})
messages = [{"role": "user", "content": content}]
return messages
def get_modality_description(name: str) -> str:
"""为每个模态生成一句说明,用于提示模型理解模态功能"""
desc_map = {
"image": "the main visual appearance of the scene, including color, texture, and lighting",
"annotation_lineart": "structural outlines, object contours, and fine geometry",
"annotation_edge": "strong boundaries and contrast edges between objects",
"annotation_depth": "distance and perspective information for spatial understanding",
"annotation_normal": "surface orientation and geometric curvature cues",
"annotation_albedo": "pure surface color without lighting or shading effects",
"annotation_seg_12colors": "semantic regions and object categories",
"annotation_openpose": "human body keypoints, joints, and orientation",
}
return desc_map.get(name, "complementary visual evidence")
# ------------------------------
# Argument Parser
# ------------------------------
def get_parser():
parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.")
parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
help="Path to model checkpoint.")
parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.")
parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth',
help="Path to model checkpoint.")
parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
help="Path to model checkpoint.")
parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images",
help="Prompt text for generation.")
parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json",
help="Optional negative prompt.")
parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp",
help="Prompt text for generation.")
parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.")
parser.add_argument("--question", type=str, default="how many cars in this image?",
help="Optional negative prompt.")
parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.")
parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.")
parser.add_argument("--guidance_scale", type=float, default=4.5)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.")
return parser
# ------------------------------
# Main Inference Function
# ------------------------------
@torch.inference_mode()
def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300):
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
],
}
]
print(messages)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / str(vqa_id)
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300):
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": f"Describe this image."},
],
}
]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def evaluate_consistency(image_path, model, processor, question, answer, max_length=256):
# --- 构造 Qwen 输入 ---
question = clean_eval_question(question)
eval_prompt = f"""
You are a VQA answer evaluator.
Given an image, a question, and a proposed answer,
score how correct the answer is according to the image evidence.
Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved
to make the answer more accurate or grounded in the image.
Return JSON strictly:
{{"AnswerScore": <float 0-1>, "Feedback": "<short suggestion>"}}
Question: "{question}"
Answer: "{answer}"
<image>
"""
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image_path},
{"type": "text", "text": eval_prompt},
],
}
]
# --- 推理 ---
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
out_ids = model.generate(**inputs, max_new_tokens=max_length)
out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]
# --- 解析输出 ---
try:
data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
score = float(data.get("AnswerScore", 0))
feedback = data.get("Feedback", "")
except Exception:
score, feedback = 0.0, text.strip()
print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}")
return score, feedback
@torch.inference_mode()
def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256):
"""
Evaluate VQA answer correctness using all available modalities (not just RGB).
This reduces model bias and improves visual grounding reliability.
"""
# 检查存在的模态文件
modality_names = [
"image", "annotation_lineart", "annotation_edge",
"annotation_depth", "annotation_normal", "annotation_albedo",
"annotation_seg_12colors", "annotation_openpose"
]
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# 可读映射
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
"annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
# 构造 prompt
eval_prompt = f"""
You are a multimodal visual reasoning evaluator.
You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}.
Your task is to judge **how correct and visually grounded** the given answer is for the question,
based purely on visual evidence from all modalities.
Follow this process:
1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors).
2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities.
3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence.
4. Otherwise, directly evaluate how accurate the free-form answer is.
5. Penalize any parts that contradict the image, or ignore modalities.
Return JSON strictly:
{{
"AnswerScore": <float between 0 and 1>,
"Feedback": "<short and specific suggestion mentioning what aspect (e.g., object count, relation, visibility) could be improved>"
}}
Question: "{question}"
Answer: "{answer}"
"""
# 构建内容序列(模态+图像)
content = []
for name, path in available:
readable = readable_map.get(name, "visual input")
content.append({"type": "text", "text": f"This is the {readable}."})
content.append({"type": "image", "image": path})
content.append({"type": "text", "text": eval_prompt})
messages = [{"role": "user", "content": content}]
# --- 推理 ---
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True,
return_dict=True, return_tensors="pt"
).to(model.device)
out_ids = model.generate(**inputs, max_new_tokens=max_length)
out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]
# --- 解析输出 ---
try:
data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
score = float(data.get("AnswerScore", 0))
feedback = data.get("Feedback", "")
except Exception:
score, feedback = 0.0, text.strip()
print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}")
return score, feedback
@torch.inference_mode()
def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300):
question = clean_prompt_question(question)
messages = build_multimodal_message(root, question, prompt, feedback)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300):
messages = build_vqa_message(root, prompt, question)
print(messages)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer'
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id):
# print(f"🚀 Generating with prompt: {prompt}")
outputs = pipe(
images=images,
role=role,
prompt=prompt,
negative_prompt=args.negative_prompt,
height=height,
width=width,
num_inference_steps=args.steps,
guidance_scale=args.guidance_scale,
num_images_per_prompt=1,
generator=generator,
task='t2i'
)
# Apply post-processing for each modality
results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)]
results = torch.stack(results, dim=1).reshape(-1, 3, height, width)
results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)]
# --------------------------
# Save results
# --------------------------
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
for idx, img in enumerate(results):
name = modality_names[idx]
save_path = save_dir / f"{name}.png"
img.save(save_path)
print(f"💾 Saved {name} → {save_path}")
merged_path = save_dir / f"merged_iteration_{iter_num}.png"
concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path)
print(f"\n✅ All results saved in: {save_dir}\n")
return save_dir
if __name__ == "__main__":
args = get_parser().parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"✅ Using device: {device}")
processor = AutoProcessor.from_pretrained(
args.model_name_or_path,
)
model = Qwen3VLForConditionalGeneration.from_pretrained(
args.text_model_path,
attn_implementation="flash_attention_2",
dtype=(torch.bfloat16),
).to(device)
pipe = JodiPipeline(args.config)
pipe.from_pretrained(args.model_path)
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
"annotation_openpose",
]
# Build post-processors
post_processors: list[Any] = [ImagePostProcessor()]
for condition in pipe.config.conditions: # type: ignore
if condition == "lineart":
post_processors.append(LineartPostProcessor())
elif condition == "edge":
post_processors.append(EdgePostProcessor())
elif condition == "depth":
post_processors.append(DepthPostProcessor())
elif condition == "normal":
post_processors.append(NormalPostProcessor())
elif condition == "albedo":
post_processors.append(AlbedoPostProcessor())
elif condition == "segmentation":
post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True))
elif condition == "openpose":
post_processors.append(OpenposePostProcessor())
else:
print(f"⚠️ Warning: Unknown condition: {condition}")
post_processors.append(ImagePostProcessor())
torch.manual_seed(args.seed)
generator = torch.Generator(device=device).manual_seed(args.seed)
with open(args.json, "r", encoding="utf-8") as f:
annotations = json.load(f)
for sample in annotations[15:306]:
image_path = os.path.join(args.data_path, sample["image"])
image_id = sample["image"].split('.')[0]
image = Image.open(image_path)
question = sample["question"]
control_images = [image.convert('RGB')] + [None] * pipe.num_conditions
role = [1] + [0] * pipe.num_conditions
print(role)
best_result, best_score = '', 0.0
max_length = 1024
# input_img = Image.open(image_path).convert("RGB")
width, height = image.size
print(f'ori width:{width}', f'ori height:{height}')
prompt = init_i2t(model, processor, image_path, 0, image_id, max_length)
result = vqa_i2t(model, processor, image_path, question, 100, max_length)
score, feedback = evaluate_consistency(image_path, model, processor, question, result)
if score >= best_score:
best_result, best_score = result, score
for step in range(1, args.iters):
save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width,
image_id)
max_length += 100
prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length)
result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length)
score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result)
if score >= best_score:
best_result, best_score = result, score
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer'
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(best_result)
print(best_result)
|