File size: 25,703 Bytes
50bc7a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
import os
import sys
import argparse
from pathlib import Path
from PIL import Image
from typing import Any
import torch
import torchvision.transforms as T
import re
from shutil import copy

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ["GRADIO_TEMP_DIR"] = "./tmp"

from jodi_pipeline import JodiPipeline
from model.postprocess import (
    ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor,
    NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor,
)
from transformers import (
    Qwen2VLForConditionalGeneration,
    Qwen2_5_VLForConditionalGeneration,
    Qwen3VLForConditionalGeneration,
    Qwen3VLMoeForConditionalGeneration
)
from transformers import AutoProcessor, Trainer
from pathlib import Path
import itertools

import nltk
nltk.download('averaged_perceptron_tagger_eng')
try:
    nltk.data.find("tokenizers/punkt_tab")
except LookupError:
    nltk.download("punkt_tab")
    nltk.download("punkt")


from nltk import word_tokenize, pos_tag

def extract_main_objects(prompt: str):
    """
    提取主要对象名词:
    - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语
    - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing)
    - 回退到通用名词提取
    """
    if not isinstance(prompt, str):
        return []
    
    prompt = prompt.strip().lower()

    # Step 1️⃣: 优先匹配介词后的核心名词短语
    # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator"
    pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)"
    match = re.search(pattern, prompt)
    candidates = []
    if match:
        segment = match.group(1)
        tokens = word_tokenize(segment)
        tagged = pos_tag(tokens)
        candidates = [w for w, pos in tagged if pos.startswith("NN")]
    
    # Step 2️⃣: 如果未匹配,则通用名词提取
    if not candidates:
        tokens = word_tokenize(prompt)
        tagged = pos_tag(tokens)
        candidates = [w for w, pos in tagged if pos.startswith("NN")]

    # Step 3️⃣: 过滤掉常见媒介词
    filter_words = {
        "photo", "picture", "image", "scene", "view",
        "shot", "painting", "drawing", "sketch",
        "illustration", "render", "frame", "snapshot"
    }
    filtered = [w for w in candidates if w not in filter_words]

    # Step 4️⃣: 去重但保持顺序
    main_objects = list(dict.fromkeys(filtered))

    return main_objects


def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"):
    """
    将多个图像拼接成一张大图并保存。
    Args:
        image_paths: List[str] 图像路径列表
        save_path: 保存路径(包括文件名)
        images_per_row: 每行图像数量(默认为全部在一行)
        image_format: 保存格式
    """
    from PIL import Image
    import io

    # 读取图像
    images = [Image.open(p).convert("RGB") for p in image_paths]

    if images_per_row is None:
        images_per_row = len(images)

    # 调整尺寸(可选)
    target_size = min(1024, images[0].size[0])
    images = [img.resize((target_size, target_size)) for img in images]

    # 拼接
    widths, heights = zip(*(img.size for img in images))
    max_width = max(widths)
    rows = (len(images) + images_per_row - 1) // images_per_row
    total_height = sum(heights[:images_per_row]) * rows

    new_im = Image.new("RGB", (max_width * images_per_row, total_height))
    y_offset = 0
    for i in range(0, len(images), images_per_row):
        row_imgs = images[i:i + images_per_row]
        x_offset = 0
        for img in row_imgs:
            new_im.paste(img, (x_offset, y_offset))
            x_offset += max_width
        y_offset += heights[0]

    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    new_im.save(save_path, format=image_format.upper())
    print(f"🧩 Saved merged image → {save_path}")
    return save_path


def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"):
    """
    Build Qwen3-VL message for multi-modal caption refinement.
    Automatically detects available modalities under root.
    """
    modality_names = [
        "image",
        "annotation_lineart",
        "annotation_edge",
        "annotation_depth",
        "annotation_normal",
        "annotation_albedo",
        "annotation_seg_12colors",
        "annotation_openpose",
    ]

    # --- 检查存在的模态 ---
    available = []
    for name in modality_names:
        for ext in [".png", ".jpg", ".jpeg"]:
            path = Path(root) / f"{name}{ext}"
            if path.exists():
                available.append((name, str(path)))
                break

    # --- 构建模态说明 ---
    readable_map = {
        "image": "RGB image",
        "annotation_lineart": "line drawing",
        "annotation_edge": "edge map",
        "annotation_depth": "depth map",
        "annotation_normal": "normal map",
        "annotation_albedo": "albedo map",
        "annotation_seg_12colors": "segmentation map",
        "annotation_openpose": "human pose map",
    }

    present_modalities = [readable_map[n] for n, _ in available]

    # --- 构造文本指令 ---
    text_prompt = (
            f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. "
            f"Use all available modalities jointly to reason about the same scene rather than describing them separately. "
            f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. "
            f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. "
            f"Do not include any additional commentary or evaluations. "
            f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. "
            f"Focus on describing the visual properties, including: "
            f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, "
            f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. "
            f"Exclude any stylistic, environmental, emotional, or narrative information. "
            f"Consider the following feedback when refining your description: '{feedback}'. "
            f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. "
            f"Coarse caption: '{coarse_caption}' "
    )

    # --- 构建消息内容:在每个图像前加模态标识 ---
    content = []
    for name, path in available:
        readable = readable_map.get(name, "visual input")
        content.append({
            "type": "text",
            "text": f"This is the {readable}, which provides {get_modality_description(name)}."
        })
        content.append({"type": "image", "image": path})

    # 最后附上总任务说明
    content.append({"type": "text", "text": text_prompt})

    messages = [{"role": "user", "content": content}]
    return messages

def get_modality_description(name: str) -> str:
    """为每个模态生成一句说明,用于提示模型理解模态功能"""
    desc_map = {
        "image": "the main visual appearance of the scene, including color, texture, and lighting",
        "annotation_lineart": "structural outlines, object contours, and fine geometry",
        "annotation_edge": "strong boundaries and contrast edges between objects",
        "annotation_depth": "distance and perspective information for spatial understanding",
        "annotation_normal": "surface orientation and geometric curvature cues",
        "annotation_albedo": "pure surface color without lighting or shading effects",
        "annotation_seg_12colors": "semantic regions and object categories",
        "annotation_openpose": "human body keypoints, joints, and orientation",
    }
    return desc_map.get(name, "complementary visual evidence")


# ------------------------------
# Argument Parser
# ------------------------------
def get_parser():
    parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.")
    parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
                        help="Path to model checkpoint.")
    parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.")
    parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth',
                        help="Path to model checkpoint.")
    parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
                        help="Path to model checkpoint.")
    parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.")
    parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.")
    parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.")
    parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.")
    parser.add_argument("--guidance_scale", type=float, default=4.5)
    parser.add_argument("--height", type=int, default=1024)
    parser.add_argument("--width", type=int, default=1024)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.")
    return parser


# ------------------------------
# Main Inference Function
# ------------------------------
@torch.inference_mode()
def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num):
    # --------------------------
    # Inference
    # --------------------------

    print(f"🚀 Generating with prompt: {prompt}")
    outputs = pipe(
        images=[None] * (1 + pipe.num_conditions),
        role=[0] * (1 + pipe.num_conditions),
        prompt=prompt,
        negative_prompt=args.negative_prompt,
        height=args.height,
        width=args.width,
        num_inference_steps=args.steps,
        guidance_scale=args.guidance_scale,
        num_images_per_prompt=1,
        generator=generator
    )

    # Apply post-processing for each modality
    results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)]
    results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width)
    results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)]

    # --------------------------
    # Save results
    # --------------------------
    os.makedirs(args.output_dir, exist_ok=True)

    save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)

    for idx, img in enumerate(results):
        name = modality_names[idx]
        save_path = save_dir / f"{name}.png"
        img.save(save_path)
        print(f"💾 Saved {name}{save_path}")

    merged_path = save_dir / f"merged_iteration.png"
    concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path)

    print(f"\n✅ All results saved in: {save_dir}\n")
    return save_dir


@torch.inference_mode()
def evaluate_consistency(image_path, model, processor, prompt, ori_prompt,  max_length=256):
    
    main_objects = extract_main_objects(ori_prompt)
    print(main_objects)
    number = len(main_objects)
    main_str = ", ".join(main_objects) if main_objects else "the main described objects"
    # --- 构造 Qwen 输入 ---
    #eval_prompt = f"""
    #You are an image–text consistency evaluator.
    #Given one RGB image and a textual description, evaluate how well the description matches
    #the visual evidence in the image across the following semantic dimensions:
    #{number} Main described objects (core subjects): {main_str}.
    #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image?
    #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate?
    #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct?
    #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent?
    #5. **Global (G)** – Does the overall scene composition and meaning match the description?
    #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)?
    #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured?
    #If any of the main objects are only partially visible, occluded, or treated as background,
    #reduce the score for Completeness and Salience.
    #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect).
    #Then provide one short feedback sentence describing which aspects could be improved.
    #Return JSON strictly:
    #{{
    #  "Entity": <float>,
    #  "Attribute": <float>,
    #  "Relation": <float>,
    #  "CountState": <float>,
    #  "Global": <float>,
    #  "Completeness": <float>,
    #  "Salience": <float>,
    #  "Feedback": "<short sentence>"
    #}}
    #Description: "{prompt}"
    #<image>
    #"""
    eval_prompt = f"""
    You are an image–text alignment evaluator and visual correction advisor.
    Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown.
    Focus only on the main described objects: "{main_str}".
    Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible.
    If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6),
    Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}".
    Your feedback must be **constructive**, not punitive:
    For example:
    - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color."
    - If a car appears blue but should be red, say: "The car is not blue; it should be red."
    - If one of three objects is missing, say: "Only two objects are visible; add one more to make three."
    
    Return JSON only:
    {{
        "Consistency": <float 0–1>,
        "Feedback": "<one short sentence explaining which object should be adjusted or reworded>"
    }}
    Description: "{ori_prompt}"
    <image>
    """
    messages = [
            {
            "role": "user",
            "content": [
                {"type": "image", "image": image_path},
                {"type": "text", "text": eval_prompt},
            ],
        }
    ]

    # --- 推理 ---
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    out_ids = model.generate(**inputs, max_new_tokens=max_length)
    out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
    text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]

    # --- 解析输出 ---
    try:
        data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
        score = float(data.get("Consistency", 0))
        feedback = data.get("Feedback", "")

        # 👇 手动计算 Overall
        #score = e + a + r + c + g + v

    except Exception:
        score, feedback = 0.0, text.strip()

    print(
        #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]"
        f" → Overall={score:.3f}"
    )
    print(f"💡 Feedback: {feedback}")
    return score, feedback


def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300):
    messages = build_multimodal_message(root, caption, feedback, prompt)
    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    )
    inputs = inputs.to(model.device)

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    print(output_text)

    os.makedirs(args.output_dir, exist_ok=True)
    save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)
    caption_path = Path(save_dir) / f"caption.txt"
    with open(caption_path, "w", encoding="utf-8") as f:
        f.write(output_text[0].strip())

    return output_text[0]

def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024):
    chi_prompt = """
    You are a visual scene enhancement expert specialized in preparing prompts for image generation models.
    Given a user prompt, rewrite it into an 'Enhanced prompt' that provides detailed, vivid, and spatially coherent visual descriptions.
    Your enhancement should depend on the original prompt's level of detail:
    - If the user prompt is brief or abstract, expand it with concrete details about colors, shapes, materials, lighting, textures, and spatial relationships between objects.
    - If the user prompt is already detailed, refine and slightly enhance the existing descriptions to make them more visually precise and realistic without overcomplicating.

    Follow these examples:
    - User Prompt: A cat sleeping → Enhanced: A small, fluffy white cat curled up tightly on a sunny windowsill, light streaming through a lace curtain, highlighting the cat’s soft fur and the warm wooden frame.
    - User Prompt: A busy city street → Enhanced: A bustling city street at dusk, glowing streetlights reflecting off wet asphalt, people in colorful coats crossing a crosswalk, and tall glass buildings illuminated by neon signs.

    Rules:
    1. Do not add new objects or unrelated elements.
    2. Avoid emotional, stylistic, or narrative phrases; focus purely on visual reality.
    3. Write one concise, self-contained sentence that fully describes the visible scene.

    Now generate only the enhanced description for the following prompt:
    User Prompt: "{}"
    """.format(raw_prompt)

    messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    )
    inputs = inputs.to(model.device)

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=max_length)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )

    return output_text[0]



def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num):
    #control_images = []
    #for name in modality_names:
        #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB"))

    print(f"🚀 Generating with prompt: {caption}")

    outputs = pipe(
        images=[None] * (1 + pipe.num_conditions),
        role=[0] * (1 + pipe.num_conditions),
        prompt=prompt,
        negative_prompt=args.negative_prompt,
        height=args.height,
        width=args.width,
        num_inference_steps=args.steps,
        guidance_scale=args.guidance_scale,
        num_images_per_prompt=1,
        generator=generator,
    )

    # Apply post-processing for each modality
    results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)]
    results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width)
    results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)]

    # --------------------------
    # Save results
    # --------------------------
    os.makedirs(args.output_dir, exist_ok=True)

    save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}"
    save_dir.mkdir(parents=True, exist_ok=True)

    for idx, img in enumerate(results):
        name = modality_names[idx]
        save_path = save_dir / f"{name}.png"
        img.save(save_path)
        print(f"💾 Saved {name}{save_path}")

    merged_path = save_dir / f"merged_iteration_{iter_num}.png"
    concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path)

    print(f"\n✅ All results saved in: {save_dir}\n")
    return save_dir


# ------------------------------
# Entry Point
# ------------------------------
if __name__ == "__main__":
    args = get_parser().parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"✅ Using device: {device}")

    processor = AutoProcessor.from_pretrained(
        args.model_name_or_path,
    )

    model = Qwen3VLForConditionalGeneration.from_pretrained(
        args.text_model_path,
        attn_implementation="flash_attention_2",
        dtype=(torch.bfloat16),
    ).to(device)

    pipe = JodiPipeline(args.config)
    pipe.from_pretrained(args.model_path)

    modality_names = [
        "image",
        "annotation_lineart",
        "annotation_edge",
        "annotation_depth",
        "annotation_normal",
        "annotation_albedo",
        "annotation_seg_12colors",
        "annotation_openpose",
    ]

    # Build post-processors
    post_processors: list[Any] = [ImagePostProcessor()]
    for condition in pipe.config.conditions:  # type: ignore
        if condition == "lineart":
            post_processors.append(LineartPostProcessor())
        elif condition == "edge":
            post_processors.append(EdgePostProcessor())
        elif condition == "depth":
            post_processors.append(DepthPostProcessor())
        elif condition == "normal":
            post_processors.append(NormalPostProcessor())
        elif condition == "albedo":
            post_processors.append(AlbedoPostProcessor())
        elif condition == "segmentation":
            post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True))
        elif condition == "openpose":
            post_processors.append(OpenposePostProcessor())
        else:
            print(f"⚠️ Warning: Unknown condition: {condition}")
            post_processors.append(ImagePostProcessor())

    import json

    with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp:
        metadatas = [json.loads(line) for line in fp][271:300]

    for index, metadata in enumerate(metadatas):
        index += 271
        ori_caption = metadata['prompt']

        for num in range(4):

            best_score = 0
            best_dir = None
            best_caption = None

            sample_seed = torch.randint(0, 100000, (1,)).item()
            print(sample_seed)

            torch.manual_seed(sample_seed)
            generator = torch.Generator(device=device).manual_seed(sample_seed)

            #caption = refine_prompt_with_qwen(model, processor, ori_caption)
            caption = ori_caption
            init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num)

            save_dir = init_dir
            prompt = caption
            max_length = 1024
            image_path = str(init_dir / "image.png")
            score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption)

            if score >= best_score:
                best_score = score
                best_dir = save_dir
                best_caption = prompt

            for step in range(1, args.iters):
                prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length)
                max_length += 100
                save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num)
                image_path = str(save_dir / "image.png")
                score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption)

                if score >= best_score:
                    best_score = score
                    best_dir = save_dir
                    best_caption = prompt

            best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best"
            best_save_dir.mkdir(parents=True, exist_ok=True)
            copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png')
            with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f:
                f.write(best_caption.strip())
            with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f:
                f.write(str(best_score))