File size: 26,830 Bytes
b3035f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
import os
import sys
import argparse
from pathlib import Path
from PIL import Image
from typing import Any
import torch
import torchvision.transforms as T
from datasets import load_dataset
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ["GRADIO_TEMP_DIR"] = "./tmp"
from jodi_pipeline import JodiPipeline
from model.postprocess import (
    ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor,
    NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor,
)
from transformers import (
    Qwen2VLForConditionalGeneration,
    Qwen2_5_VLForConditionalGeneration,
    Qwen3VLForConditionalGeneration,
    Qwen3VLMoeForConditionalGeneration
)
from transformers import AutoProcessor, Trainer
from pathlib import Path
import itertools
import ast
import re
from PIL import Image
import json
def clean_question(q: str) -> str:
    if not isinstance(q, str):
        q = str(q)
    # 删除 <image 1>、<image1>、<image 2> 等占位符    q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE)
    # 再清理多余空白
    q = re.sub(r"\s+", " ", q).strip()
    return q
def dump_image(image, save_root):
    os.makedirs(save_root, exist_ok=True)
    save_path = os.path.join(save_root, "input.jpg")
    image.convert("RGB").save(save_path, format="JPEG", quality=95)    
    return save_path

def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"):
    """    将多个图像拼接成一张大图并保存。
    Args:        image_paths: List[str] 图像路径列表
        save_path: 保存路径(包括文件名)        images_per_row: 每行图像数量(默认为全部在一行)
        image_format: 保存格式
    """
    from PIL import Image
    import io
    # 读取图像
    images = [Image.open(p).convert("RGB") for p in image_paths]

    if images_per_row is None:
        images_per_row = len(images)

    # 调整尺寸(可选)
    target_size = min(1024, images[0].size[0])
    images = [img.resize((target_size, target_size)) for img in images]

    # 拼接
    widths, heights = zip(*(img.size for img in images))
    max_width = max(widths)
    rows = (len(images) + images_per_row - 1) // images_per_row
    total_height = sum(heights[:images_per_row]) * rows

    new_im = Image.new("RGB", (max_width * images_per_row, total_height))
    y_offset = 0
    for i in range(0, len(images), images_per_row):
        row_imgs = images[i:i + images_per_row]
        x_offset = 0
        for img in row_imgs:
            new_im.paste(img, (x_offset, y_offset))
            x_offset += max_width
        y_offset += heights[0]

    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    new_im.save(save_path, format=image_format.upper())
    print(f"🧩 Saved merged image → {save_path}")
    return save_path


def build_vqa_message(root, prompt, question):
    """
    Build Qwen3-VL message for multimodal or single-image VQA.
    Now explicitly tags each modality image before feeding into Qwen3-VL,
    so that the model can distinguish RGB, edge, depth, normal, etc.
    """

    root_path = Path(root)

    # ---------- 单图像情况 ----------
    if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]:
        image_path = str(root)
        messages = [
            {
                "role": "user",
                "content": [
                    {"type": "image", "image": image_path},
                    {"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
                ],
            }
        ]
        return messages

    # ---------- 多模态文件夹情况 ----------
    modality_names = [
        "image",
        "annotation_lineart",
        "annotation_edge",
        "annotation_depth",
        "annotation_normal",
        "annotation_albedo",
        "annotation_seg_12colors",
        #"annotation_openpose",
    ]

    # 检查存在的模态文件
    available = []
    for name in modality_names:
        for ext in [".png", ".jpg", ".jpeg"]:
            path = Path(root) / f"{name}{ext}"
            if path.exists():
                available.append((name, str(path)))
                break



    # 可读名称映射
    readable_map = {
        "image": "RGB image",
        "annotation_lineart": "line drawing",
        "annotation_edge": "edge map",
        "annotation_depth": "depth map",
        "annotation_normal": "normal map",
        "annotation_albedo": "albedo map",
        "annotation_seg_12colors": "segmentation map",
        #"annotation_openpose": "human pose map",
    }

    present_modalities = [readable_map[n] for n, _ in available]

    # ---------- 指令文本 ----------
    text_prompt = (
        f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
        f"The **RGB image** is the primary and most reliable modality that truly represents the scene. "
        #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, "
        #f"so use them only as optional references for additional context. "
        #f"Each modality provides complementary information about the same visual content:\n"
        #f"- The line drawing highlights object outlines, shapes, and fine structures.\n"
        #f"- The edge map emphasizes boundaries and contours.\n"
        #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n"
        #f"- The normal map shows surface orientation and geometric curvature.\n"
        #f"- The albedo map presents true surface color without illumination or shadows.\n"
        #f"- The segmentation map divides the scene into semantic regions and object categories.\n"
        #f"- The human pose map indicates body orientation, structure, and articulation.\n\n"
        #f"Together, these modalities offer a unified, rich understanding of the scene.\n"
        #f"Scene description: \"{prompt}\"\n\n"
        f"Please answer the following question using visual reasoning primarily grounded in the RGB image, "
        #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n"
        #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n"
        f"Question: \"{question}\"\n"
    )

    # ---------- 构建内容序列(模态锚定) ----------
    content = []
    print(f'available:{available}')
    for name, path in available:
        readable = readable_map.get(name, "visual input")
        # 在每张图像前显式标注模态类型
        content.append({"type": "text", "text": f"This is the {readable}."})
        content.append({"type": "image", "image": path})

    # 最后加入主指令
    content.append({"type": "text", "text": text_prompt})

    messages = [{"role": "user", "content": content}]
    return messages




def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""):
    """
    Build Qwen3-VL message for multi-modal caption refinement.
    Explicitly binds each image to its modality name (RGB, edge, depth, etc.)
    so Qwen3-VL can reason over them correctly and refine the caption faithfully.
    """

    modality_names = [
        "image",
        "annotation_lineart",
        "annotation_edge",
        "annotation_depth",
        "annotation_normal",
        "annotation_albedo",
        "annotation_seg_12colors",
        #"annotation_openpose",
    ]

    # --- 检查存在的模态 ---
    available = []
    for name in modality_names:
        for ext in [".png", ".jpg", ".jpeg"]:
            path = Path(root) / f"{name}{ext}"
            if path.exists():
                available.append((name, str(path)))
                break

    # --- 构建模态说明 ---
    readable_map = {
        "image": "RGB image",
        "annotation_lineart": "line drawing",
        "annotation_edge": "edge map",
        "annotation_depth": "depth map",
        "annotation_normal": "normal map",
        "annotation_albedo": "albedo map",
        "annotation_seg_12colors": "segmentation map",
        #"annotation_openpose": "human pose map",
    }

    present_modalities = [readable_map[n] for n, _ in available]

    # --- 构造文本指令 ---
    text_prompt = (
        f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
        f"The **RGB image** is the primary modality that provides the most reliable view of the scene. "
        #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n"
        #f"Each modality provides distinct complementary information:\n"
        #f"- The line drawing highlights structure and contours.\n"
        #f"- The edge map emphasizes object boundaries.\n"
        #f"- The depth map shows spatial distance and perspective.\n"
        #f"- The normal map captures surface orientation and geometry.\n"
        #f"- The albedo map shows intrinsic surface color.\n"
        #f"- The segmentation map reveals semantic regions.\n"
        #f"- The human pose map indicates body structure and articulation.\n\n"
        f"### Your Task:\n"
        f"Refine the coarse caption into a more accurate, realistic, and visually grounded description "
        f"of the scene, integrating information from all available modalities.\n\n"
        f"### Rules:\n"
        f"1. Describe only what is visible in the images — do NOT hallucinate.\n"
        #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n"
        f"3. Incorporate the following feedback into your refinement: '{feedback}'\n"
        f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n"
        f"### Coarse Caption:\n'{coarse_caption}'\n\n"
        f"Now refine the caption according to the multimodal evidence below."
    )

    text_prompt0 = (
        f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
        f"The **RGB image** provides the most accurate and realistic appearance of the scene, "
        f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n"
        f"### Your Task:\n"
        f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. "
        f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n"
        f"### Guidelines:\n"
        f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n"
        f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n"
        f"3. Do NOT invent or assume anything not visually supported.\n"
        f"4. Avoid including any additional commentary or evaluations.\n"
        f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n"
        f"### Coarse Caption:\n'{coarse_caption}'\n\n"
        f"### Feedback to Incorporate:\n'{feedback}'\n\n"
        f"Now produce the final refined caption describing the scene based on the multimodal evidence below."
    )


    # --- 构建消息内容:在每个图像前加模态标识 ---
    content = []
    for name, path in available:
        readable = readable_map.get(name, "visual input")
        content.append({
            "type": "text",
            "text": f"This is the {readable}, which provides {get_modality_description(name)}."
        })
        content.append({"type": "image", "image": path})

    # 最后附上总任务说明
    content.append({"type": "text", "text": text_prompt})

    messages = [{"role": "user", "content": content}]
    return messages


def get_modality_description(name: str) -> str:
    """为每个模态生成一句说明,用于提示模型理解模态功能"""
    desc_map = {
        "image": "the main visual appearance of the scene, including color, texture, and lighting",
        "annotation_lineart": "structural outlines, object contours, and fine geometry",
        "annotation_edge": "strong boundaries and contrast edges between objects",
        "annotation_depth": "distance and perspective information for spatial understanding",
        "annotation_normal": "surface orientation and geometric curvature cues",
        "annotation_albedo": "pure surface color without lighting or shading effects",
        "annotation_seg_12colors": "semantic regions and object categories",
        "annotation_openpose": "human body keypoints, joints, and orientation",
    }
    return desc_map.get(name, "complementary visual evidence")




# ------------------------------
# Argument Parser
# ------------------------------
def get_parser():
    parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.")
    parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
                        help="Path to model checkpoint.")
    parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.")
    parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth',
                        help="Path to model checkpoint.")
    parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
                        help="Path to model checkpoint.")
    parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images",
                        help="Prompt text for generation.")
    parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json",
                        help="Optional negative prompt.")
    parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp",
                        help="Prompt text for generation.")
    parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.")
    parser.add_argument("--question", type=str, default="how many cars in this image?",
                        help="Optional negative prompt.")
    parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.")
    parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.")
    parser.add_argument("--guidance_scale", type=float, default=4.5)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.")
    return parser


# ------------------------------
# Main Inference Function
# ------------------------------


@torch.inference_mode()
def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": image_path,
                },
                {"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
            ],
        }
    ]

    print(messages)

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    )
    inputs = inputs.to(model.device)

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)

    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / str(vqa_id)
    save_dir.mkdir(parents=True, exist_ok=True)
    caption_path = Path(save_dir) / f"caption.txt"
    with open(caption_path, "w", encoding="utf-8") as f:
        f.write(output_text[0].strip())

    return output_text[0]


@torch.inference_mode()
def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": image_path,
                },
                {"type": "text", "text": f"Describe this image."},
            ],
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True, return_dict=True, return_tensors="pt"
    )
    inputs = inputs.to(model.device)

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)

    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)
    caption_path = Path(save_dir) / f"caption.txt"
    with open(caption_path, "w", encoding="utf-8") as f:
        f.write(output_text[0].strip())

    return output_text[0]


@torch.inference_mode()
def evaluate_consistency(image_path, model, processor, caption, max_length=256):

    # --- 构造 Qwen 输入 ---
    eval_prompt = f"""
    You are an image-text alignment evaluator.
    Given one RGB image and a description, score how well the text matches
    the visual evidence in the image. Then provide one short feedback
    sentence suggesting how to make the description better aligned.
    
    Return JSON strictly:
    {{"Consistency": <float 0-1>, "Feedback": "<sentence>"}}
    
    Description: "{caption}"
    <image>
    """

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image_path},
                {"type": "text", "text": eval_prompt},
            ],
        }
    ]

    # --- 推理 ---
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    out_ids = model.generate(**inputs, max_new_tokens=max_length)
    out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
    text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]

    # --- 解析输出 ---
    try:
        data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
        score = float(data.get("Consistency", 0))
        feedback = data.get("Feedback", "")
    except Exception:
        score, feedback = 0.0, text.strip()

    print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}")
    return score, feedback


@torch.inference_mode()
def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300):
    messages = build_multimodal_message(root, prompt, feedback)
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    )
    inputs = inputs.to(model.device)

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)

    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)
    caption_path = Path(save_dir) / f"caption.txt"
    with open(caption_path, "w", encoding="utf-8") as f:
        f.write(output_text[0].strip())
    return output_text[0]

@torch.inference_mode()
def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300):
    messages = build_vqa_message(root, prompt, question)
    print(messages)
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    )
    inputs = inputs.to(model.device)
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)
    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer'
    save_dir.mkdir(parents=True, exist_ok=True)
    caption_path = Path(save_dir) / f"caption.txt"
    with open(caption_path, "w", encoding="utf-8") as f:
        f.write(output_text[0].strip())
    return output_text[0]

@torch.inference_mode()
def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id):
    # print(f"🚀 Generating with prompt: {prompt}")
    outputs = pipe(
        images=images,
        role=role,
        prompt=prompt,
        negative_prompt=args.negative_prompt,
        height=height,
        width=width,
        num_inference_steps=args.steps,
        guidance_scale=args.guidance_scale,
        num_images_per_prompt=1,
        generator=generator,
        task='t2i'
    )

    # Apply post-processing for each modality
    results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)]
    results = torch.stack(results, dim=1).reshape(-1, 3, height, width)
    results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)]

    # --------------------------
    # Save results
    # --------------------------
    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)
    for idx, img in enumerate(results):
        name = modality_names[idx]
        save_path = save_dir / f"{name}.png"
        img.save(save_path)
        print(f"💾 Saved {name}{save_path}")


    merged_path = save_dir / f"merged_iteration_{iter_num}.png"
    concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path)
    print(f"\n✅ All results saved in: {save_dir}\n")
    return save_dir

if __name__ == "__main__":
    args = get_parser().parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"✅ Using device: {device}")

    processor = AutoProcessor.from_pretrained(
        args.model_name_or_path,
    )

    model = Qwen3VLForConditionalGeneration.from_pretrained(
        args.text_model_path,
        attn_implementation="flash_attention_2",
        dtype=(torch.bfloat16),
    ).to(device)

    pipe = JodiPipeline(args.config)
    pipe.from_pretrained(args.model_path)

    modality_names = [
        "image",
        "annotation_lineart",
        "annotation_edge",
        "annotation_depth",
        "annotation_normal",
        "annotation_albedo",
        "annotation_seg_12colors",
        "annotation_openpose",
    ]

    # Build post-processors
    post_processors: list[Any] = [ImagePostProcessor()]
    for condition in pipe.config.conditions:  # type: ignore
        if condition == "lineart":
            post_processors.append(LineartPostProcessor())
        elif condition == "edge":
            post_processors.append(EdgePostProcessor())
        elif condition == "depth":
            post_processors.append(DepthPostProcessor())
        elif condition == "normal":
            post_processors.append(NormalPostProcessor())
        elif condition == "albedo":
            post_processors.append(AlbedoPostProcessor())
        elif condition == "segmentation":
            post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True))
        elif condition == "openpose":
            post_processors.append(OpenposePostProcessor())
        else:
            print(f"⚠️ Warning: Unknown condition: {condition}")
            post_processors.append(ImagePostProcessor())

    torch.manual_seed(args.seed)
    generator = torch.Generator(device=device).manual_seed(args.seed)

    with open(args.json, "r", encoding="utf-8") as f:
            annotations = json.load(f)

    for sample in annotations[:153]:
        image_path = os.path.join(args.data_path, sample["image"])
        image_id = sample["image"].split('.')[0]
        image = Image.open(image_path)
        question = sample["question"]

        control_images = [image.convert('RGB')] + [None] * pipe.num_conditions

        role = [1] + [0] * pipe.num_conditions
        print(role)

        best_dir, best_caption, best_score = '', '', 0.0
        max_length = 1024

        # input_img = Image.open(image_path).convert("RGB")
        width, height = image.size
        print(f'ori width:{width}', f'ori height:{height}')

        prompt = init_i2t(model, processor, image_path, 0, image_id, max_length)
        _ = vqa_i2t(model, processor, image_path, question, 100, max_length)
        score, feedback = evaluate_consistency(image_path, model, processor, prompt)

        if score >= best_score:
            best_caption, best_score = prompt, score
            best_dir = image_path

        for step in range(1, args.iters):
            save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width,
                                    image_id)
            max_length += 100
            prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length)
            result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length)
            score, feedback = evaluate_consistency(image_path, model, processor, prompt)

            if score >= best_score:
                best_caption, best_score = prompt, score
                best_dir = save_dir

        result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length)
        print(f'result:{result}')