diff --git a/__pycache__/jodi_pipeline.cpython-310.pyc b/__pycache__/jodi_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da98be2379310bcb18df69c3cd353841b0b75421 Binary files /dev/null and b/__pycache__/jodi_pipeline.cpython-310.pyc differ diff --git a/__pycache__/jodi_pipeline.cpython-312.pyc b/__pycache__/jodi_pipeline.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a67861b290cfe4949e56e38aa13eb25062a3be8 Binary files /dev/null and b/__pycache__/jodi_pipeline.cpython-312.pyc differ diff --git a/c2i.py b/c2i.py new file mode 100644 index 0000000000000000000000000000000000000000..27517a11c8e90ebcc0edd2c2f7eafa3023a66a08 --- /dev/null +++ b/c2i.py @@ -0,0 +1,376 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="A mountain range.", help="Prompt text for generation.") + parser.add_argument("--image_root", type=str, default="./assets/1/", help="Prompt text for generation.") + parser.add_argument("--condition", type=list[str], default=['lineart'], help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./demo_c2i_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, images, role, pipe, iter_num, post_processors, modality_names, generator): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {args.prompt}") + outputs = pipe( + images=images, + role=role, + prompt=args.prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(prompt, images, role, pipe, root, iter_num, modality_names, generator): + + #control_images = [] + #for name in modality_names: + # control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {args.prompt}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_paths = glob.glob(os.path.join(args.image_root, '*.jpg')) + + control_images = [] + + for name in modality_names: + found_path = None + for c in args.condition: + matched_files = [f for f in image_paths if c in f and c in name] + if matched_files: + found_path = matched_files[0] + break + control_images.append(Image.open(found_path).convert("RGB") if found_path else None) + + + role = [0 if img is None else 1 for img in control_images] + print(role) + + init_dir = init_t2i(args, control_images, role, pipe, 0, post_processors, modality_names, generator) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, prompt, step, max_length) + max_length += 100 + save_dir = image_refine(prompt, control_images, role, pipe, save_dir, step, modality_names, generator) + + diff --git a/c2t.py b/c2t.py new file mode 100644 index 0000000000000000000000000000000000000000..3eed6de5bec3836fe93f69e47affe05fa6eeaa3d --- /dev/null +++ b/c2t.py @@ -0,0 +1,448 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_init_message(image_paths, role): + """ + Build Qwen3-VL message for multi-modal image description. + - `image_paths`: list of image file paths in modality order. + - `role`: list[int] of 0/1, indicating which modalities are active. + - Includes per-modality visual descriptions. + - No coarse caption, fixed instruction: "Describe this image." + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 输入检查 --- + if len(role) != len(modality_names): + raise ValueError(f"role length {len(role)} must match modality_names length {len(modality_names)}") + if len(image_paths) != sum(role): + raise ValueError(f"image_paths length {len(image_paths)} must match modality_names length {len(modality_names)}") + + # --- 每个模态的视觉提示定义 --- + modality_descriptions = { + "image": "provides color, texture, lighting, and overall visual appearance.", + "annotation_lineart": "reveals fine structural outlines, shapes, and proportions.", + "annotation_edge": "highlights boundaries and contours of objects.", + "annotation_depth": "shows spatial distance, perspective, and 3D geometry.", + "annotation_normal": "captures surface orientation and fine geometric curvature.", + "annotation_albedo": "shows intrinsic surface colors unaffected by lighting.", + "annotation_seg_12colors": "provides semantic regions and object boundaries.", + "annotation_openpose": "shows human body keypoints, orientation, and posture.", + } + + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + # --- 选择存在的模态与路径 --- + selected_modalities = [m for m, r in zip(modality_names, role) if r == 1] + available = [str(Path(p)) for p in image_paths] + + if not available: + raise FileNotFoundError("No valid modality images found in image_paths for selected roles.") + + # --- 拼接模态说明 --- + modality_desc_text = " ".join( + [f"- The {readable_map[m]} {modality_descriptions[m]}" for m in selected_modalities] + ) + + # --- 构造文本提示 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: " + f"{', '.join([readable_map[m] for m in selected_modalities])}. " + f"{modality_desc_text} " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT mention modality names explicitly. " + f"Describe this image." + + " " + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="./assets/2/", help="Prompt text for generation.") + parser.add_argument("--condition", type=list[str], default=["normal"], help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=768) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./demo_c2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, role, iter_num, max_length=300): + messages = build_init_message(image_path, role) + + print(f'init prompt:{messages}') + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator): + + #print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + import glob + image_paths = glob.glob(os.path.join(args.image_root, '*.jpg')) + glob.glob(os.path.join(args.image_root, '*.png')) + + control_images = [] + + for name in modality_names: + found_path = None + for c in args.condition: + matched_files = [f for f in image_paths if c in f and c in name] + if matched_files: + found_path = matched_files[0] + break + control_images.append(Image.open(found_path).convert("RGB") if found_path else None) + + + role = [0 if img is None else 1 for img in control_images] + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_paths, role, 0, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, max_length) + + diff --git a/code/test_real1.py b/code/test_real1.py new file mode 100644 index 0000000000000000000000000000000000000000..fc79cf746215777bc5fac2fd13633c2d89296cb4 --- /dev/null +++ b/code/test_real1.py @@ -0,0 +1,805 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + #f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + #content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + #content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + #content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[1:306]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/code/test_realworldqa_vqa.py b/code/test_realworldqa_vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..03063ea75a357014fcd1ea0dc58d946795ea8703 --- /dev/null +++ b/code/test_realworldqa_vqa.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[:153]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/i2t.py b/i2t.py new file mode 100644 index 0000000000000000000000000000000000000000..094deb833f0f1f34550e308b6d00dac85332f5a0 --- /dev/null +++ b/i2t.py @@ -0,0 +1,358 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_path", type=str, default="./assets/test_images/pexels-jplenio-1105378.jpg", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=768) + parser.add_argument("--width", type=int, default=1152) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./demo_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_path = args.image_path + + control_images = [Image.open(image_path).convert("RGB")] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, max_length) + + diff --git a/jodi_pipeline.py b/jodi_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..028b3620a3c684618c568cc74e581543a0dd3b32 --- /dev/null +++ b/jodi_pipeline.py @@ -0,0 +1,333 @@ +#s file is modified from https://github.com/NVlabs/Sana + +# Copyright 2024 NVIDIA CORPORATION & AFFILIATES +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import warnings +import pyrallis +from dataclasses import dataclass, field +from typing import Tuple, List +from PIL import Image + +import torch +import torchvision.transforms as T + +warnings.filterwarnings("ignore") # ignore warning + + +from diffusion import DPMS +from model.builder import build_model, get_tokenizer_and_text_encoder, get_vae, vae_decode, vae_encode +from model.utils import get_weight_dtype, prepare_prompt_ar +from utils.config import BaseConfig, ModelConfig, AEConfig, TextEncoderConfig, SchedulerConfig, model_init_config +from utils.logger import get_root_logger + +from tools.download import find_model + + +def read_image(image): + if isinstance(image, str): + assert os.path.exists(image), f"Image {image} does not exist." + image = Image.open(image).convert("RGB") + transform = T.Compose([T.ToTensor(), T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) + image = transform(image) + elif isinstance(image, Image.Image): + transform = T.Compose([T.ToTensor(), T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) + image = transform(image) + elif isinstance(image, torch.Tensor): + assert image.ndim == 3, "Image tensor should be 3D." + else: + raise TypeError("Unsupported image type. Expected str, PIL Image, or Tensor.") + return image + + +def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: + """Returns binned height and width.""" + ar = float(height / width) + closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) + default_hw = ratios[closest_ratio] + return int(default_hw[0]), int(default_hw[1]) + + +@dataclass +class JodiInference(BaseConfig): + model: ModelConfig + vae: AEConfig + text_encoder: TextEncoderConfig + scheduler: SchedulerConfig + config: str = "./configs/inference.yaml" + conditions: List[str] = field(default_factory=list) + work_dir: str = "output/" + + +class JodiPipeline: + def __init__( + self, + config: str, + device: torch.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), + ): + super().__init__() + config = pyrallis.load(JodiInference, open(config)) + self.config = config + self.device = device + self.logger = get_root_logger() + self.progress_fn = lambda progress, desc: None + + # set some hyperparameters + self.image_size = config.model.image_size + self.latent_size = self.image_size // config.vae.vae_downsample_rate + self.max_sequence_length = config.text_encoder.model_max_length + self.flow_shift = config.scheduler.flow_shift + + self.weight_dtype = get_weight_dtype(config.model.mixed_precision) + self.vae_dtype = get_weight_dtype(config.vae.weight_dtype) + + self.logger.info(f"flow_shift: {self.flow_shift}") + self.logger.info(f"Inference with {self.weight_dtype}") + + self.num_conditions = len(config.conditions) + + # 1. build vae and text encoder + self.vae = self.build_vae(config.vae) + self.tokenizer, self.text_encoder = self.build_text_encoder(config.text_encoder) + + # 2. build Jodi + self.model = self.build_jodi(config).to(self.device) + + # 3. pre-compute null embedding + with torch.no_grad(): + null_caption_token = self.tokenizer( + "", max_length=self.max_sequence_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(self.device) + self.null_caption_embs = self.text_encoder( + null_caption_token.input_ids, null_caption_token.attention_mask + )[0] + + @property + def base_ratios(self): + return { + "0.25": [512.0, 2048.0], # 1:4 + "0.33": [576.0, 1728.0], # 1:3 + "0.4": [640.0, 1600.0], # 2:5 + "0.5": [704.0, 1408.0], # 1:2 + "0.67": [768.0, 1152.0], # 2:3 + "0.75": [864.0, 1152.0], # 3:4 + "0.82": [896.0, 1088.0], # 5:6 + "1.0": [1024.0, 1024.0], # 1:1 + "1.21": [1088.0, 896.0], # 6:5 + "1.33": [1152.0, 864.0], # 4:3 + "1.5": [1152.0, 768.0], # 3:2 + "2.0": [1408.0, 704.0], # 2:1 + "2.5": [1600.0, 640.0], # 5:2 + "3.0": [1728.0, 576.0], # 3:1 + "4.0": [2048.0, 512.0], # 4:1 + } + + def build_vae(self, config): + vae = get_vae(config.vae_type, config.vae_pretrained, self.device).to(self.vae_dtype) + return vae + + def build_text_encoder(self, config): + tokenizer, text_encoder = get_tokenizer_and_text_encoder(name=config.text_encoder_name, device=self.device) + return tokenizer, text_encoder + + def build_jodi(self, config): + # model setting + model_kwargs = model_init_config(config, latent_size=self.latent_size) + model = build_model( + config.model.model, + use_fp32_attention=config.model.get("fp32_attention", False) and config.model.mixed_precision != "bf16", + num_conditions=self.num_conditions, + **model_kwargs, + ) + self.logger.info(f"use_fp32_attention: {model.fp32_attention}") + self.logger.info( + f"{model.__class__.__name__}:{config.model.model}," + f"Model Parameters: {sum(p.numel() for p in model.parameters()):,}" + ) + return model + + def from_pretrained(self, model_path): + state_dict = find_model(model_path) + state_dict = state_dict.get("state_dict", state_dict) + if "pos_embed" in state_dict: + del state_dict["pos_embed"] + missing, unexpected = self.model.load_state_dict(state_dict, strict=False) + self.model.eval().to(self.weight_dtype) + + self.logger.info(f"Generating sample from ckpt: {model_path}") + self.logger.warning(f"Missing keys: {missing}") + self.logger.warning(f"Unexpected keys: {unexpected}") + + def register_progress_bar(self, progress_fn=None): + self.progress_fn = progress_fn if progress_fn is not None else self.progress_fn + + @torch.inference_mode() + def __call__( + self, + images, + role, + prompt="", + height=1024, + width=1024, + negative_prompt="", + num_inference_steps=20, + guidance_scale=4.5, + num_images_per_prompt=1, + generator=None, + latents=None, + ): + ori_height, ori_width = height, width + height, width = classify_height_width_bin(height, width, ratios=self.base_ratios) + latent_size_h, latent_size_w = ( + height // self.config.vae.vae_downsample_rate, + width // self.config.vae.vae_downsample_rate, + ) + + # pre-compute negative embedding + if negative_prompt != "": + null_caption_token = self.tokenizer( + negative_prompt, + max_length=self.max_sequence_length, + padding="max_length", + truncation=True, + return_tensors="pt", + ).to(self.device) + self.null_caption_embs = self.text_encoder( + null_caption_token.input_ids, null_caption_token.attention_mask + )[0] + + # compute clean_x + if len(images) != 1 + self.num_conditions: + raise ValueError(f"Number of images {len(images)} != {1 + self.num_conditions}.") + if len(role) != 1 + self.num_conditions: + raise ValueError(f"Number of roles {len(role)} != {1 + self.num_conditions}.") + clean_x = [ + torch.zeros( + 1, + self.config.vae.vae_latent_dim, + latent_size_h, + latent_size_w, + device=self.device, + dtype=self.vae_dtype, + ) + ] * (self.num_conditions + 1) + for i, image in enumerate(images): + if role[i] == 1: + assert image is not None + image = read_image(image).unsqueeze(0).to(self.device, self.vae_dtype) + + image_height, image_width = image.shape[-2:] + if height / image_height > width / image_width: + resize_size = height, int(image_width * height / image_height) + else: + resize_size = int(image_height * width / image_width), width + + resize_and_crop = T.Compose([ + T.Resize(resize_size, interpolation=T.InterpolationMode.BILINEAR, antialias=True), + T.CenterCrop((height, width)), + ]) + image = resize_and_crop(image) + clean_x[i] = vae_encode( + self.config.vae.vae_type, self.vae, image, self.config.vae.sample_posterior, self.device + ) + clean_x = torch.stack(clean_x, dim=1) # (1, 1+K, 32, 32, 32) + role = torch.tensor(role).unsqueeze(0) # (1, 1+K) + role = role.to(dtype=torch.long, device=self.device) + + prompts = [ + prepare_prompt_ar(prompt, self.base_ratios, device=self.device, show=False)[0].strip() + for _ in range(num_images_per_prompt) + ] + + with torch.no_grad(): + # prepare text feature + if not self.config.text_encoder.chi_prompt: + max_length_all = self.config.text_encoder.model_max_length + prompts_all = prompts + else: + chi_prompt = "\n".join(self.config.text_encoder.chi_prompt) + prompts_all = [chi_prompt + prompt for prompt in prompts] + num_chi_prompt_tokens = len(self.tokenizer.encode(chi_prompt)) + max_length_all = ( + num_chi_prompt_tokens + self.config.text_encoder.model_max_length - 2 + ) # magic number 2: [bos], [_] + + caption_token = self.tokenizer( + prompts_all, + max_length=max_length_all, + padding="max_length", + truncation=True, + return_tensors="pt", + ).to(device=self.device) + select_index = [0] + list(range(-self.config.text_encoder.model_max_length + 1, 0)) + caption_embs = self.text_encoder(caption_token.input_ids, caption_token.attention_mask)[0][:, None][ + :, :, select_index + ].to(self.weight_dtype) + emb_masks = caption_token.attention_mask[:, select_index] + null_y = self.null_caption_embs.repeat(len(prompts), 1, 1)[:, None].to(self.weight_dtype) + + n = len(prompts) + if latents is None: + z = torch.randn( + n, + 1 + self.num_conditions, + self.config.vae.vae_latent_dim, + latent_size_h, + latent_size_w, + generator=generator, + device=self.device, + ) + else: + assert latents.shape == ( + n, + 1 + self.num_conditions, + self.config.vae.vae_latent_dim, + latent_size_h, + latent_size_w, + ) + z = latents.to(self.device) + role = role.repeat(n, 1) + clean_x = clean_x.repeat(n, 1, 1, 1, 1) + + model_kwargs = dict(mask=emb_masks, role=role, clean_x=clean_x) + scheduler = DPMS( + self.model, + condition=caption_embs, + uncondition=null_y, + cfg_scale=guidance_scale, + model_type="flow", + model_kwargs=model_kwargs, + schedule="FLOW", + ) + scheduler.register_progress_bar(self.progress_fn) + sample = scheduler.sample( + z, + steps=num_inference_steps, + order=2, + skip_type="time_uniform_flow", + method="multistep", + flow_shift=self.flow_shift, + ) + + sample = torch.where(torch.eq(role, 1)[:, :, None, None, None], clean_x, sample) + sample = sample.to(self.vae_dtype) + sample = torch.unbind(sample, dim=1) + with torch.no_grad(): + sample = [vae_decode(self.config.vae.vae_type, self.vae, s) for s in sample] + resize = T.Resize((ori_height, ori_width), interpolation=T.InterpolationMode.BILINEAR) + sample = [resize(s).clamp(-1, 1) for s in sample] + return sample diff --git a/old_code/test_realworldqa_vqa.py b/old_code/test_realworldqa_vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..6d03a1985eb184fa431d160b76cd47235f0c2395 --- /dev/null +++ b/old_code/test_realworldqa_vqa.py @@ -0,0 +1,620 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png"]: + image_path = str(root_path) + text_prompt = ( + f"You are given one RGB image and a text description of the same scene.\n" + f"Scene description: \"{prompt}\"\n\n" + f"Now analyze the image carefully and answer the following question based only on what is visible.\n" + f"Do NOT guess or add details not supported by the image.\n" + f"Question: \"{question}\"\n" + "" + ) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": text_prompt}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + f"so use them only as optional references for additional context. " + f"Each modality provides complementary information about the same visual content:\n" + f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + f"- The edge map emphasizes boundaries and contours.\n" + f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + f"- The normal map shows surface orientation and geometric curvature.\n" + f"- The albedo map presents true surface color without illumination or shadows.\n" + f"- The segmentation map divides the scene into semantic regions and object categories.\n" + f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + f"Together, these modalities offer a unified, rich understanding of the scene.\n" + f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + + # --- 构建消息内容:在每个图像前加模态标识 --- + + + content = [] + + text_prompt = ("you are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}.\n" + f"Each modality provides a different aspect of visual information about the same scene.\n\n" + f"### Modality Information:\n" + f"- **RGB image:** shows colors, textures, lighting, and overall appearance.\n" + f"- **Line drawing:** reveals outlines, object contours, and structural details.\n" + f"- **Edge map:** highlights strong edges and object boundaries.\n" + f"- **Depth map:** encodes per-object spatial distance and perspective. " + f"For each main object, estimate its approximate physical distance from the camera or ground reference " + f"in **meters**. " + f"If multiple objects are visible, provide numeric distances rather than qualitative terms like " + f"'closer' or 'farther'.\n" + f"- **Normal map:** provides surface orientation and facing direction.\n" + f"- **Albedo map:** shows true surface color unaffected by lighting or shadows.\n" + f"- **Segmentation map:** divides the image into semantic regions and object categories.\n" + f"- **Human pose map:** depicts human keypoints, poses, and orientations if present.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a detailed, modality-wise visual description. " + f"For each available modality listed above, generate one corresponding description paragraph " + f"based only on what that modality shows.\n\n" + f"### Rules:\n" + f"1. Follow the order and modality names given in 'Modality Information'.\n" + f"2. Start each paragraph with the modality name (e.g., 'RGB image:').\n" + f"3. Describe only what is visible in that modality—do NOT merge or summarize multiple modalities.\n" + f"4. Use **numeric distance estimates in meters** for the depth map whenever possible.\n" + f"5. Use clear and factual language (no imagination or hallucination).\n" + #f"6. You may use the following feedback for improvement: '{feedback}'\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now, according to the 'Modality Information' above, write one detailed description for each available modality below." + ) + + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=41) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + You are given one RGB image and a description that may include references + to multiple visual modalities (e.g., depth map, normal map, segmentation map, etc.). + These terms are just analytical perspectives of the same scene — they should not reduce + the consistency score. Focus only on whether the described visual content matches what + is visible in the RGB image. + Your task: + 1. Judge how accurately the text describes what is visually present in the image. + 2. Ignore mentions of modality names (such as 'depth map' or 'normal map'). + 3. Provide a consistency score between 0.0 (completely mismatched) and 1.0 (perfect match). + 4. Provide one short feedback sentence suggesting how to make the description better aligned. + Return JSON strictly in this format: + {{"Consistency": , "Feedback": ""}} + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[1:255]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + #if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, max_length) + print(f'result:{result}') diff --git a/old_code/test_realworldqa_vqa1.py b/old_code/test_realworldqa_vqa1.py new file mode 100644 index 0000000000000000000000000000000000000000..743ee094606c9763731288364b9a0eb5c7b6f369 --- /dev/null +++ b/old_code/test_realworldqa_vqa1.py @@ -0,0 +1,669 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[:153]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/old_code/test_realworldqa_vqa2.py b/old_code/test_realworldqa_vqa2.py new file mode 100644 index 0000000000000000000000000000000000000000..1a293662e9270b42990fe2307e6fbd7b8e6260ea --- /dev/null +++ b/old_code/test_realworldqa_vqa2.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[153:306]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/old_code/test_realworldqa_vqa3.py b/old_code/test_realworldqa_vqa3.py new file mode 100644 index 0000000000000000000000000000000000000000..ddf0ed6befa74e82e4df7bbfa68b51cddf9567d2 --- /dev/null +++ b/old_code/test_realworldqa_vqa3.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[306:459]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/old_code/test_realworldqa_vqa4.py b/old_code/test_realworldqa_vqa4.py new file mode 100644 index 0000000000000000000000000000000000000000..b8d5678a518cefb3dfb4d3efc9e5458a2670e69b --- /dev/null +++ b/old_code/test_realworldqa_vqa4.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[459:612]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/old_code/test_realworldqa_vqa5.py b/old_code/test_realworldqa_vqa5.py new file mode 100644 index 0000000000000000000000000000000000000000..fd64c8a90e4ad3e02f4655dd9e60425fb7dc7705 --- /dev/null +++ b/old_code/test_realworldqa_vqa5.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[612:]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/qwen_real.py b/qwen_real.py new file mode 100644 index 0000000000000000000000000000000000000000..af0a724b6f36a84d4003c1f538dd585ba64632f4 --- /dev/null +++ b/qwen_real.py @@ -0,0 +1,449 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow question: " + f"Question: \"{question}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, question, image_id, max_length) + + diff --git a/qwen_vqa_Agricultur.py b/qwen_vqa_Agricultur.py new file mode 100644 index 0000000000000000000000000000000000000000..7df9fed115c45c5d0e62305389ee1a49d73a7337 --- /dev/null +++ b/qwen_vqa_Agricultur.py @@ -0,0 +1,471 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question, options, subfield): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + options_list = ast.literal_eval(options) + option_text = "\n".join([f"{chr(65+i)}. {opt}" for i, opt in enumerate(options_list)]) + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Scientific Subfield: \"{subfield}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow multiple-choice question: " + f"Question: \"{question}\" " + f"Options: \"{option_text}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/MMMU/Agriculture/validation-00000-of-00001.parquet", help="Prompt text for generation.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_Agricultur_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, vqa_id, question, option, max_length=300): + + options_list = ast.literal_eval(option) + option_text="\n".join([f"{chr(65+i)}.{opt}" for i, opt in enumerate(options_list)]) + + question = clean_question(question) + + text_prompt = ( + f"Analyze the given image and answer the following question." + f"Question: \"{question}\" \n" + f"Options: \"{option_text}\" " + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": text_prompt}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, options, subfield, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question, options, subfield) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, subfield): + + print(f"🚀 Generating with prompt: {prompt}") + prompt = f'{subfield} image,' + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + dataset = load_dataset( + "parquet", + data_files=args.data_path, + split="train") + + for sample in dataset: + + image_keys = [f"image_{i}" for i in range(1, 8)] + num_images = sum(1 for key in image_keys if key in sample and isinstance(sample[key], type(sample["image_1"])) and sample[key] is not None) + + if num_images > 1: + continue + + image = sample["image_1"] + image_path = dump_image(image, args.temp_dir) + question = clean_question(sample["question"]) + image_id = sample["id"] + options = sample["options"] + field = sample["subfield"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, image_id, question, options, max_length) + diff --git a/qwen_vqa_Art.py b/qwen_vqa_Art.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea6c158fba23fa98ccc0c212ba9d815c11fad0e --- /dev/null +++ b/qwen_vqa_Art.py @@ -0,0 +1,471 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question, options, subfield): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + options_list = ast.literal_eval(options) + option_text = "\n".join([f"{chr(65+i)}. {opt}" for i, opt in enumerate(options_list)]) + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Scientific Subfield: \"{subfield}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow multiple-choice question: " + f"Question: \"{question}\" " + f"Options: \"{option_text}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/MMMU/Art/validation-00000-of-00001.parquet", help="Prompt text for generation.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_Art_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, vqa_id, question, option, max_length=300): + + options_list = ast.literal_eval(option) + option_text="\n".join([f"{chr(65+i)}.{opt}" for i, opt in enumerate(options_list)]) + + question = clean_question(question) + + text_prompt = ( + f"Analyze the given image and answer the following question." + f"Question: \"{question}\" \n" + f"Options: \"{option_text}\" " + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": text_prompt}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, options, subfield, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question, options, subfield) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, subfield): + + print(f"🚀 Generating with prompt: {prompt}") + prompt = f'{subfield} image,' + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + dataset = load_dataset( + "parquet", + data_files=args.data_path, + split="train") + + for sample in dataset: + + image_keys = [f"image_{i}" for i in range(1, 8)] + num_images = sum(1 for key in image_keys if key in sample and isinstance(sample[key], type(sample["image_1"])) and sample[key] is not None) + + if num_images > 1: + continue + + image = sample["image_1"] + image_path = dump_image(image, args.temp_dir) + question = clean_question(sample["question"]) + image_id = sample["id"] + options = sample["options"] + field = sample["subfield"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, image_id, question, options, max_length) + diff --git a/qwen_vqa_Artthepry.py b/qwen_vqa_Artthepry.py new file mode 100644 index 0000000000000000000000000000000000000000..13b66c8e396d987ac55f9eda93cc73bda61d98e8 --- /dev/null +++ b/qwen_vqa_Artthepry.py @@ -0,0 +1,471 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question, options, subfield): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + options_list = ast.literal_eval(options) + option_text = "\n".join([f"{chr(65+i)}. {opt}" for i, opt in enumerate(options_list)]) + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Scientific Subfield: \"{subfield}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow multiple-choice question: " + f"Question: \"{question}\" " + f"Options: \"{option_text}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/MMMU/Art_Theory/validation-00000-of-00001.parquet", help="Prompt text for generation.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_Art_theory_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, vqa_id, question, option, max_length=300): + + options_list = ast.literal_eval(option) + option_text="\n".join([f"{chr(65+i)}.{opt}" for i, opt in enumerate(options_list)]) + + question = clean_question(question) + + text_prompt = ( + f"Analyze the given image and answer the following question." + f"Question: \"{question}\" \n" + f"Options: \"{option_text}\" " + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": text_prompt}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, options, subfield, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question, options, subfield) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, subfield): + + print(f"🚀 Generating with prompt: {prompt}") + prompt = f'{subfield} image,' + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + dataset = load_dataset( + "parquet", + data_files=args.data_path, + split="train") + + for sample in dataset: + + image_keys = [f"image_{i}" for i in range(1, 8)] + num_images = sum(1 for key in image_keys if key in sample and isinstance(sample[key], type(sample["image_1"])) and sample[key] is not None) + + if num_images > 1: + continue + + image = sample["image_1"] + image_path = dump_image(image, args.temp_dir) + question = clean_question(sample["question"]) + image_id = sample["id"] + options = sample["options"] + field = sample["subfield"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, image_id, question, options, max_length) + diff --git a/qwen_vqa_Design.py b/qwen_vqa_Design.py new file mode 100644 index 0000000000000000000000000000000000000000..b74ed69530706cf988edba88579faf7af959a496 --- /dev/null +++ b/qwen_vqa_Design.py @@ -0,0 +1,471 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question, options, subfield): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + options_list = ast.literal_eval(options) + option_text = "\n".join([f"{chr(65+i)}. {opt}" for i, opt in enumerate(options_list)]) + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Scientific Subfield: \"{subfield}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow multiple-choice question: " + f"Question: \"{question}\" " + f"Options: \"{option_text}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/MMMU/Design/validation-00000-of-00001.parquet", help="Prompt text for generation.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_Design_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, vqa_id, question, option, max_length=300): + + options_list = ast.literal_eval(option) + option_text="\n".join([f"{chr(65+i)}.{opt}" for i, opt in enumerate(options_list)]) + + question = clean_question(question) + + text_prompt = ( + f"Analyze the given image and answer the following question." + f"Question: \"{question}\" \n" + f"Options: \"{option_text}\" " + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": text_prompt}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, options, subfield, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question, options, subfield) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, subfield): + + print(f"🚀 Generating with prompt: {prompt}") + prompt = f'{subfield} image,' + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + dataset = load_dataset( + "parquet", + data_files=args.data_path, + split="train") + + for sample in dataset: + + image_keys = [f"image_{i}" for i in range(1, 8)] + num_images = sum(1 for key in image_keys if key in sample and isinstance(sample[key], type(sample["image_1"])) and sample[key] is not None) + + if num_images > 1: + continue + + image = sample["image_1"] + image_path = dump_image(image, args.temp_dir) + question = clean_question(sample["question"]) + image_id = sample["id"] + options = sample["options"] + field = sample["subfield"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, image_id, question, options, max_length) + diff --git a/qwen_vqa_Literature.py b/qwen_vqa_Literature.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6fd0ed56a64acdc0a3ce5073ff6bb830940d45 --- /dev/null +++ b/qwen_vqa_Literature.py @@ -0,0 +1,471 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re + +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question, options, subfield): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + options_list = ast.literal_eval(options) + option_text = "\n".join([f"{chr(65+i)}. {opt}" for i, opt in enumerate(options_list)]) + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Scientific Subfield: \"{subfield}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow multiple-choice question: " + f"Question: \"{question}\" " + f"Options: \"{option_text}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/MMMU/Literature/validation-00000-of-00001.parquet", help="Prompt text for generation.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./qwen_Literature_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, vqa_id, question, option, max_length=300): + + options_list = ast.literal_eval(option) + option_text="\n".join([f"{chr(65+i)}.{opt}" for i, opt in enumerate(options_list)]) + + question = clean_question(question) + + text_prompt = ( + f"Analyze the given image and answer the following question." + f"Question: \"{question}\" \n" + f"Options: \"{option_text}\" " + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": text_prompt}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, options, subfield, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question, options, subfield) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, subfield): + + print(f"🚀 Generating with prompt: {prompt}") + prompt = f'{subfield} image,' + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + dataset = load_dataset( + "parquet", + data_files=args.data_path, + split="train") + + for sample in dataset: + + image_keys = [f"image_{i}" for i in range(1, 8)] + num_images = sum(1 for key in image_keys if key in sample and isinstance(sample[key], type(sample["image_1"])) and sample[key] is not None) + + if num_images > 1: + continue + + image = sample["image_1"] + image_path = dump_image(image, args.temp_dir) + question = clean_question(sample["question"]) + image_id = sample["id"] + options = sample["options"] + field = sample["subfield"] + + max_length = 1024 + + #input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, image_id, question, options, max_length) + diff --git a/t2i.py b/t2i.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8d9d1aa83ec7fdca12e83a2e299215403e10cf --- /dev/null +++ b/t2i.py @@ -0,0 +1,357 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./demo_t2i_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, pipe, iter_num, post_processors, modality_names, generator): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {args.prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=args.prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(prompt, root, iter_num, modality_names, generator): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {args.prompt}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, pipe, 0, post_processors, modality_names, generator) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + for step in range(1, args.iters): + prompt = text_refine(save_dir,model, processor, prompt, step, max_length) + max_length += 100 + save_dir = image_refine(prompt, save_dir, step, modality_names, generator) + + diff --git a/test_i2t_coco.py b/test_i2t_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..743479607595a89e20c3155dfd6f8a194b166324 --- /dev/null +++ b/test_i2t_coco.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[:123]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco1.py b/test_i2t_coco1.py new file mode 100644 index 0000000000000000000000000000000000000000..dff933ce3573d80c186e5d63d78cb3d2d6feaf9d --- /dev/null +++ b/test_i2t_coco1.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[123:246]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco2.py b/test_i2t_coco2.py new file mode 100644 index 0000000000000000000000000000000000000000..14e6a9abb0a89f6351ae7d564e62776d0940bd60 --- /dev/null +++ b/test_i2t_coco2.py @@ -0,0 +1,457 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import re + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=''): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./example_coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + + +@torch.inference_mode() +def evaluate_caption(image_path, model, processor, caption, max_length=256): + """ + Evaluate how well the generated caption truthfully describes the given image. + """ + eval_prompt = f""" + You are an image–caption alignment evaluator and factuality advisor. + Given one RGB image and a textual caption, evaluate how well the caption + truthfully and comprehensively describes what is visually shown. + + Caption: "{caption}" + + ## Evaluation focus + - Describe whether all **objects, attributes, and relations** mentioned in the caption are actually visible. + - The caption should only include what is clearly seen in the image — no imaginary or hallucinated content. + - The caption should also cover the **main visible objects** and their essential attributes (color, count, relative position) if possible. + - If the caption adds nonexistent objects or attributes, reduce the score sharply (<0.6). + - If the caption omits minor details but remains overall faithful, keep a moderate score (~0.8–0.9). + - If the caption perfectly matches and fully reflects the visual scene, score near 1.0. + + ## Feedback instruction + Provide **one short constructive feedback sentence** to improve the caption. + - Focus on what should be *added, adjusted, or rephrased* for truthfulness. + - Do NOT mention errors or missing things directly (avoid "not", "no", "missing", "wrong", "fail"). + - Start with a verb such as "Add", "Replace", "Adjust", "Rephrase", "Include", "Describe". + - Example: + - If the caption says "a cat and a dog" but only a cat is visible → "Remove the dog and describe only the cat." + - If the caption omits a visible red car → "Add the red car on the right side of the road." + - If the color or quantity is inaccurate → "Replace with the correct color and number as seen." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + print(f'eval:{messages}') + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + #print(f" → Overall={score:.3f}") + #print(f"💡 Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + print(f'refine message:{messages}') + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + #print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + #print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + #print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[246:369]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + score, feedback = evaluate_caption(image_path, model, processor, prompt) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_name, max_length) + score, feedback = evaluate_caption(image_path, model, processor, prompt) + + diff --git a/test_i2t_coco3.py b/test_i2t_coco3.py new file mode 100644 index 0000000000000000000000000000000000000000..6b8141b1021b6634f855ccc0b08530b153364c29 --- /dev/null +++ b/test_i2t_coco3.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[369:492]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco4.py b/test_i2t_coco4.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ac60ac7918ed30cc0116271606993daa4601a2 --- /dev/null +++ b/test_i2t_coco4.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[492:615]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco5.py b/test_i2t_coco5.py new file mode 100644 index 0000000000000000000000000000000000000000..f586e5ad4adf9ce28755164e2d5ec02d8b1e42a9 --- /dev/null +++ b/test_i2t_coco5.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[615:738]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco6.py b/test_i2t_coco6.py new file mode 100644 index 0000000000000000000000000000000000000000..56b9a15510a374a53164cc06e7d2a5867d536d0e --- /dev/null +++ b/test_i2t_coco6.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[738:861]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_coco7.py b/test_i2t_coco7.py new file mode 100644 index 0000000000000000000000000000000000000000..f138650e4555140e219012b27439fa4aab74e768 --- /dev/null +++ b/test_i2t_coco7.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/COCO_Karpathy/karpathy_test.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./coco_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + save_image_names = os.listdir("/home/efs/mjw/mjw/code/Jodi/coco_i2t_outputs/val2014") + image_names = [item["image_path"] for item in data][4021:] + + for image_name in image_names[861:]: + + if image_name in save_image_names: + print(f'already got {image_name} in ', f'our {save_image_names}') + + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_nocaps.py b/test_i2t_nocaps.py new file mode 100644 index 0000000000000000000000000000000000000000..aa16e05bd5f95d42be387db5c458b5c481ecadb4 --- /dev/null +++ b/test_i2t_nocaps.py @@ -0,0 +1,368 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data][:750] + + for image_name in image_names: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_nocaps1.py b/test_i2t_nocaps1.py new file mode 100644 index 0000000000000000000000000000000000000000..7f21c6fcca04b4173a31b8e4c770a6ee426a0a59 --- /dev/null +++ b/test_i2t_nocaps1.py @@ -0,0 +1,368 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data][750:1500] + + for image_name in image_names: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_nocaps2.py b/test_i2t_nocaps2.py new file mode 100644 index 0000000000000000000000000000000000000000..5c5c4c899177beae9be0ec46d17dc7850b3880c7 --- /dev/null +++ b/test_i2t_nocaps2.py @@ -0,0 +1,448 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +import re +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./example_nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_caption(image_path, model, processor, caption, max_length=256): + """ + Evaluate how well the generated caption truthfully describes the given image. + """ + eval_prompt = f""" + You are an image–caption alignment evaluator and factuality advisor. + Given one RGB image and a textual caption, evaluate how well the caption + truthfully and comprehensively describes what is visually shown. + + Caption: "{caption}" + + ## Evaluation focus + - Describe whether all **objects, attributes, and relations** mentioned in the caption are actually visible. + - The caption should only include what is clearly seen in the image — no imaginary or hallucinated content. + - The caption should also cover the **main visible objects** and their essential attributes (color, count, relative position) if possible. + - If the caption adds nonexistent objects or attributes, reduce the score sharply (<0.6). + - If the caption omits minor details but remains overall faithful, keep a moderate score (~0.8–0.9). + - If the caption perfectly matches and fully reflects the visual scene, score near 1.0. + + ## Feedback instruction + Provide **one short constructive feedback sentence** to improve the caption. + - Focus on what should be *added, adjusted, or rephrased* for truthfulness. + - Do NOT mention errors or missing things directly (avoid "not", "no", "missing", "wrong", "fail"). + - Start with a verb such as "Add", "Replace", "Adjust", "Rephrase", "Include", "Describe". + - Example: + - If the caption says "a cat and a dog" but only a cat is visible → "Remove the dog and describe only the cat." + - If the caption omits a visible red car → "Add the red car on the right side of the road." + - If the color or quantity is inaccurate → "Replace with the correct color and number as seen." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f" → Overall={score:.3f}") + print(f"💡 Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, name, max_length=300): + messages = build_multimodal_message(root, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feed.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data] + + for image_name in image_names[97:]: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + score, feedback = evaluate_caption(image_path, model, processor, prompt) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_name, max_length) + score, feedback = evaluate_caption(image_path, model, processor, prompt) + + diff --git a/test_i2t_nocaps3.py b/test_i2t_nocaps3.py new file mode 100644 index 0000000000000000000000000000000000000000..8f682af16510c45eb3f11721687b8111a3d1f429 --- /dev/null +++ b/test_i2t_nocaps3.py @@ -0,0 +1,368 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data][2250:3000] + + for image_name in image_names: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_nocaps4.py b/test_i2t_nocaps4.py new file mode 100644 index 0000000000000000000000000000000000000000..7a4321b6a4c89bc417aa97228b5dcb0ac2740c1b --- /dev/null +++ b/test_i2t_nocaps4.py @@ -0,0 +1,368 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data][3000:3750] + + for image_name in image_names: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_i2t_nocaps5.py b/test_i2t_nocaps5.py new file mode 100644 index 0000000000000000000000000000000000000000..a41f094b7f8da88301d9aa87b5ab2ece001775b9 --- /dev/null +++ b/test_i2t_nocaps5.py @@ -0,0 +1,368 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import json +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_root", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/images", help="Prompt text for generation.") + parser.add_argument("--json_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/NoCaps_hf_validation/captions.json", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./nocaps_i2t_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, name, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, name, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / name / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, name): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / name/ f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_root = args.image_root + json_path = args.json_path + + with open(json_path, "r") as f: + data = json.load(f) + + image_names = [item["image_name"] for item in data][3750:] + + for image_name in image_names: + image_path = os.path.join(image_root, image_name) + image = Image.open(image_path).convert("RGB") + width, height = image.size + + control_images = [image] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + prompt = init_i2t(model, processor, image_path, 0, image_name, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_name) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, image_name, max_length) + + diff --git a/test_pope.py b/test_pope.py new file mode 100644 index 0000000000000000000000000000000000000000..9569ec148317e31a5cca4c33bd972ccd23fab0df --- /dev/null +++ b/test_pope.py @@ -0,0 +1,858 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +import torch.nn.functional as F +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + #text_prompt = ( + # f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + #f"The following caption describes the image in detail: '{prompt}'. " + # f"Question:{question}" + #) + + text_prompt = ( + f"Answer the question using ONLY visual evidence from the images, including: {', '.join(present_modalities)}. " + f"Do NOT rely on prior knowledge or assumptions. " + f"Carefully inspect all visible objects and count them precisely. " + f"If objects appear similar or are located at different heights or positions, " + f"they MUST be counted separately if they are distinct and not connected. " + f"Cross-check all modalities (RGB, lines, edges, depth, segmentation) " + f"to ensure you do not merge distinct objects into one. " + f"Your answer MUST strictly follow what is visible, even if it seems unusual. " + f"Just response yes or no. " + f"Now answer the question:\n{question}\n") + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--tmp", type=str, default="/home/efs/mjw/mjw/code/Jodi/pope_tmp") + parser.add_argument("--output_dir", type=str, default="./vqa_pope_output", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(f'vqa messages:{messages}') + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + print(f'eval_message:{messages}') + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + #print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + print(f'eval message:{messages}') + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + gen_start = inputs["input_ids"].shape[1] + gen_ids = out_ids[:, gen_start:] + #gen_ids = out_ids[:, gen_start:] + gen_text = processor.tokenizer.decode(gen_ids[0], skip_special_tokens=False) + num_match = re.search(r"AnswerScore\"\s*:\s*([0-9\.]+)", gen_text) + conf = 0.0 + if num_match: + num_text = num_match.group(1) + num_ids = processor.tokenizer.encode(num_text, add_special_tokens=False) + num_str = processor.tokenizer.decode(num_ids) + gen_id_list = gen_ids[0].tolist() + match_positions = [] + for i in range(len(gen_id_list) - len(num_ids) + 1): + if gen_id_list[i:i+len(num_ids)] == num_ids: + match_positions = list(range(i, i+len(num_ids))) + break + + if match_positions: + probs = [] + for pos in match_positions: + step_prob = F.softmax(scores[pos], dim=-1) + token_id = gen_ids[0, pos] + probs.append(step_prob[0, token_id]) + conf = torch.stack(probs).mean().item() + + #print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + #print(f"📊 [Confidence(AnswerScore)] {conf:.4f}") + + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + print(f'refine message:{messages}') + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(f'vqa messages:{messages}') + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + #with open(args.json, "r", encoding="utf-8") as f: + # annotations = json.load(f) + + dataset = load_dataset("lmms-lab/POPE", split="test") + subset = dataset.select(range(4500,len(dataset))) + + for sample in subset: + #image_path = os.path.join(args.data_path, sample["image"]) + #image_id = sample["image"].split('.')[0] + image_path = os.path.join(args.tmp, sample["image_source"]+'.jpg') + + print(type(sample["image"])) + + image_id = sample["id"] + image = sample["image"].convert("RGB") + image.save(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real1.py b/test_real1.py new file mode 100644 index 0000000000000000000000000000000000000000..facec285d72f06b479b43a10e238a4122ed5e4e0 --- /dev/null +++ b/test_real1.py @@ -0,0 +1,817 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response Yes or No" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/POPEv2/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/POPEv2/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_popev2_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations: + + out_names = os.listdir(args.output_dir) + + image_path = os.path.join(args.data_path, sample["image_name"].split('/')[-1]) + image_id = sample["image_name"].split('/')[-1].split('.')[0] + + if image_id in out_names: + print(f'this {image_id} is exist.') + continue + + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real2.py b/test_real2.py new file mode 100644 index 0000000000000000000000000000000000000000..e2a61d6d8be44fb1fa6094e8817dc5b00340025b --- /dev/null +++ b/test_real2.py @@ -0,0 +1,857 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +import torch.nn.functional as F +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + #text_prompt = ( + # f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + #f"The following caption describes the image in detail: '{prompt}'. " + # f"Question:{question}" + #) + + text_prompt = ( + f"Answer the question using ONLY visual evidence from the images, including: {', '.join(present_modalities)}. " + f"Do NOT rely on prior knowledge or assumptions. " + f"Carefully inspect all visible objects and count them precisely. " + f"If objects appear similar or are located at different heights or positions, " + f"they MUST be counted separately if they are distinct and not connected. " + f"Cross-check all modalities (RGB, lines, edges, depth, segmentation) " + f"to ensure you do not merge distinct objects into one. " + f"Your answer MUST strictly follow what is visible, even if it seems unusual. " + f"Just response yes or no. " + f"Now answer the question:\n{question}\n") + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--tmp", type=str, default="/home/efs/mjw/mjw/code/Jodi/pope_tmp") + parser.add_argument("--output_dir", type=str, default="./vqa_pope_output", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(f'vqa messages:{messages}') + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + print(f'eval_message:{messages}') + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + #print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + print(f'eval message:{messages}') + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + gen_start = inputs["input_ids"].shape[1] + gen_ids = out_ids[:, gen_start:] + #gen_ids = out_ids[:, gen_start:] + gen_text = processor.tokenizer.decode(gen_ids[0], skip_special_tokens=False) + num_match = re.search(r"AnswerScore\"\s*:\s*([0-9\.]+)", gen_text) + conf = 0.0 + if num_match: + num_text = num_match.group(1) + num_ids = processor.tokenizer.encode(num_text, add_special_tokens=False) + num_str = processor.tokenizer.decode(num_ids) + gen_id_list = gen_ids[0].tolist() + match_positions = [] + for i in range(len(gen_id_list) - len(num_ids) + 1): + if gen_id_list[i:i+len(num_ids)] == num_ids: + match_positions = list(range(i, i+len(num_ids))) + break + + if match_positions: + probs = [] + for pos in match_positions: + step_prob = F.softmax(scores[pos], dim=-1) + token_id = gen_ids[0, pos] + probs.append(step_prob[0, token_id]) + conf = torch.stack(probs).mean().item() + + #print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + #print(f"📊 [Confidence(AnswerScore)] {conf:.4f}") + + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + print(f'refine message:{messages}') + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(f'vqa messages:{messages}') + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + #with open(args.json, "r", encoding="utf-8") as f: + # annotations = json.load(f) + + dataset = load_dataset("lmms-lab/POPE", split="test") + + for sample in dataset: + #image_path = os.path.join(args.data_path, sample["image"]) + #image_id = sample["image"].split('.')[0] + image_path = os.path.join(args.tmp, sample["image_source"]+'.jpg') + + print(type(sample["image"])) + + image_id = sample["id"] + image = sample["image"].convert("RGB") + image.save(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real3.py b/test_real3.py new file mode 100644 index 0000000000000000000000000000000000000000..1777bd2660aa2f1a8a5633f1c0182864e320c0e5 --- /dev/null +++ b/test_real3.py @@ -0,0 +1,701 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[306:459]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') + diff --git a/test_real4.py b/test_real4.py new file mode 100644 index 0000000000000000000000000000000000000000..1d6203749c4aa567c4c3c4706223c67492895fe6 --- /dev/null +++ b/test_real4.py @@ -0,0 +1,701 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[459:612]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') + diff --git a/test_real5.py b/test_real5.py new file mode 100644 index 0000000000000000000000000000000000000000..5032533515084653f8657da1c0ec390f3ff3470f --- /dev/null +++ b/test_real5.py @@ -0,0 +1,701 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[612:]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') + diff --git a/test_real_amber.py b/test_real_amber.py new file mode 100644 index 0000000000000000000000000000000000000000..b6f819d92748db9b769b82f59067c846c8c4988b --- /dev/null +++ b/test_real_amber.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[:3432]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real_amber1.py b/test_real_amber1.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa87771d158ac22f0381ef5de1364cc9e6a04f1 --- /dev/null +++ b/test_real_amber1.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[3432:6864]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real_amber2.py b/test_real_amber2.py new file mode 100644 index 0000000000000000000000000000000000000000..68500f681a6e12c3eca31a8b8b3169f09c17e331 --- /dev/null +++ b/test_real_amber2.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[6864:10296]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real_amber3.py b/test_real_amber3.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd7168fb68cdad33e2634c88bf52c486ae6a413 --- /dev/null +++ b/test_real_amber3.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[10296:13728]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real_amber4.py b/test_real_amber4.py new file mode 100644 index 0000000000000000000000000000000000000000..97469b8a8796448c5ee7e5f9f3207cbab2ac3635 --- /dev/null +++ b/test_real_amber4.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[13728:17160]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_real_amber5.py b/test_real_amber5.py new file mode 100644 index 0000000000000000000000000000000000000000..d2374e596b0b2bc9b54453c650039ae96e77f1b0 --- /dev/null +++ b/test_real_amber5.py @@ -0,0 +1,810 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +import re + + +def clean_eval_question(q: str) -> str: + """ + Clean VQA-style question text for evaluation. + - If lettered options (A–Z) exist, keep text up to the last option. + - Otherwise, keep text up to the first '?' (inclusive). + """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ... + option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)" + matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE)) + + if matches: + # 找到最后一个选项出现位置 → 保留到该选项行的结束处 + last_match = matches[-1] + # 找到从最后一个选项开始到该段落结束(如选项内容的末尾) + tail = q[last_match.end():] + # 截断尾部任何额外提示("Please answer..." 等) + tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0] + q = q[:last_match.end()] + tail_cut + else: + # 无选项 → 只保留问句(问号前的部分) + match_qmark = re.search(r"\?", q) + if match_qmark: + q = q[:match_qmark.end()] + else: + q = q.split("\n")[0] # fallback + + # 清理多余换行与空格 + q = re.sub(r"\n+", " ", q) + q = re.sub(r"\s+", " ", q).strip() + return q + + +def clean_prompt_question(q: str) -> str: + """Clean VQA-style question text, keeping only the question stem before '?'. """ + if not isinstance(q, str): + q = str(q) + + # 删除 占位符 + q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + + # 截取问号之前的部分(包括问号) + match = re.search(r"^(.*?\?)", q) + if match: + q = match.group(1) + else: + # 若无问号则保留首句 + q = q.split("\n")[0] + + # 去除多余空白与换行 + q = re.sub(r"\s+", " ", q).strip() + return q + + +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + text_prompt = ( + f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The following caption describes the image in detail: '{prompt}'. " + f"Question:{question}" + f"Just response yes or no" + ) + + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + #content.append({"type": "text", "text": text_prompt}) + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + # "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + # "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. " + f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues " + f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, " + f"while maintaining faithfulness to the original visual content. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. " + f"Coarse caption: '{coarse_caption}' " + ) + + # text_prompt0 = ( + # f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + # f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + # f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + # f"### Your Task:\n" + # f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + # f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + # f"### Guidelines:\n" + # f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + # f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + # f"3. Do NOT invent or assume anything not visually supported.\n" + # f"4. Avoid including any additional commentary or evaluations.\n" + # f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + # f"### Coarse Caption:\n'{coarse_caption}'\n\n" + # f"### Feedback to Incorporate:\n'{feedback}'\n\n" + # f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + # ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + #content.append({"type": "text", "text": text_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/image", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/miw/dataset/dataset/AMBER/merged.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=5, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_amber_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, question, answer, max_length=256): + # --- 构造 Qwen 输入 --- + question = clean_eval_question(question) + eval_prompt = f""" + You are a VQA answer evaluator. + Given an image, a question, and a proposed answer, + score how correct the answer is according to the image evidence. + Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved + to make the answer more accurate or grounded in the image. + Return JSON strictly: + {{"AnswerScore": , "Feedback": ""}} + + Question: "{question}" + Answer: "{answer}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + #print(f'out_ids.logits:{out_ids.logit}') + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + +@torch.inference_mode() +def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256): + """ + Evaluate VQA answer correctness using all available modalities (not just RGB). + This reduces model bias and improves visual grounding reliability. + """ + + # 检查存在的模态文件 + modality_names = [ + "image", "annotation_lineart", "annotation_edge", + "annotation_depth", "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", "annotation_openpose" + ] + + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # 构造 prompt + eval_prompt = f""" + You are a multimodal visual reasoning evaluator. + + You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. + Your task is to judge **how correct and visually grounded** the given answer is for the question, + based purely on visual evidence from all modalities. + + Follow this process: + 1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors). + 2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities. + 3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence. + 4. Otherwise, directly evaluate how accurate the free-form answer is. + 5. Penalize any parts that contradict the image, or ignore modalities. + + Return JSON strictly: + {{ + "AnswerScore": , + "Feedback": "" + }} + + Question: "{question}" + Answer: "{answer}" + """ + + # 构建内容序列(模态+图像) + content = [] + #content.append({"type": "text", "text": eval_prompt}) + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + content.append({"type": "text", "text": eval_prompt}) + + messages = [{"role": "user", "content": content}] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_dict=True, return_tensors="pt" + ).to(model.device) + + outs = model.generate(**inputs, max_new_tokens=max_length, output_scores=True, return_dict_in_generate=True) + #print(out_ids) + out_ids = outs['sequences'] + scores = outs['scores'] + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("AnswerScore", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}") + return score, feedback + + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300): + question = clean_prompt_question(question) + messages = build_multimodal_message(root, question, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feedback.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + return output_text[0] + + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + #attn_implementation="sdpa", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[17160:]: + + image_path = os.path.join(args.data_path, sample["image"]) + image_id = str(sample["id"]) + image = Image.open(image_path) + question = sample["query"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_result, best_score = '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + result = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result) + + if score >= best_score: + best_result, best_score = result, score + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(best_result) + print(best_result) + diff --git a/test_realworldqa_vqa.py b/test_realworldqa_vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..6d03a1985eb184fa431d160b76cd47235f0c2395 --- /dev/null +++ b/test_realworldqa_vqa.py @@ -0,0 +1,620 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png"]: + image_path = str(root_path) + text_prompt = ( + f"You are given one RGB image and a text description of the same scene.\n" + f"Scene description: \"{prompt}\"\n\n" + f"Now analyze the image carefully and answer the following question based only on what is visible.\n" + f"Do NOT guess or add details not supported by the image.\n" + f"Question: \"{question}\"\n" + "" + ) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": text_prompt}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + f"so use them only as optional references for additional context. " + f"Each modality provides complementary information about the same visual content:\n" + f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + f"- The edge map emphasizes boundaries and contours.\n" + f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + f"- The normal map shows surface orientation and geometric curvature.\n" + f"- The albedo map presents true surface color without illumination or shadows.\n" + f"- The segmentation map divides the scene into semantic regions and object categories.\n" + f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + f"Together, these modalities offer a unified, rich understanding of the scene.\n" + f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + + # --- 构建消息内容:在每个图像前加模态标识 --- + + + content = [] + + text_prompt = ("you are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}.\n" + f"Each modality provides a different aspect of visual information about the same scene.\n\n" + f"### Modality Information:\n" + f"- **RGB image:** shows colors, textures, lighting, and overall appearance.\n" + f"- **Line drawing:** reveals outlines, object contours, and structural details.\n" + f"- **Edge map:** highlights strong edges and object boundaries.\n" + f"- **Depth map:** encodes per-object spatial distance and perspective. " + f"For each main object, estimate its approximate physical distance from the camera or ground reference " + f"in **meters**. " + f"If multiple objects are visible, provide numeric distances rather than qualitative terms like " + f"'closer' or 'farther'.\n" + f"- **Normal map:** provides surface orientation and facing direction.\n" + f"- **Albedo map:** shows true surface color unaffected by lighting or shadows.\n" + f"- **Segmentation map:** divides the image into semantic regions and object categories.\n" + f"- **Human pose map:** depicts human keypoints, poses, and orientations if present.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a detailed, modality-wise visual description. " + f"For each available modality listed above, generate one corresponding description paragraph " + f"based only on what that modality shows.\n\n" + f"### Rules:\n" + f"1. Follow the order and modality names given in 'Modality Information'.\n" + f"2. Start each paragraph with the modality name (e.g., 'RGB image:').\n" + f"3. Describe only what is visible in that modality—do NOT merge or summarize multiple modalities.\n" + f"4. Use **numeric distance estimates in meters** for the depth map whenever possible.\n" + f"5. Use clear and factual language (no imagination or hallucination).\n" + #f"6. You may use the following feedback for improvement: '{feedback}'\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now, according to the 'Modality Information' above, write one detailed description for each available modality below." + ) + + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=41) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + You are given one RGB image and a description that may include references + to multiple visual modalities (e.g., depth map, normal map, segmentation map, etc.). + These terms are just analytical perspectives of the same scene — they should not reduce + the consistency score. Focus only on whether the described visual content matches what + is visible in the RGB image. + Your task: + 1. Judge how accurately the text describes what is visually present in the image. + 2. Ignore mentions of modality names (such as 'depth map' or 'normal map'). + 3. Provide a consistency score between 0.0 (completely mismatched) and 1.0 (perfect match). + 4. Provide one short feedback sentence suggesting how to make the description better aligned. + Return JSON strictly in this format: + {{"Consistency": , "Feedback": ""}} + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / 'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[1:255]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + #if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, max_length) + print(f'result:{result}') diff --git a/test_realworldqa_vqa1.py b/test_realworldqa_vqa1.py new file mode 100644 index 0000000000000000000000000000000000000000..743ee094606c9763731288364b9a0eb5c7b6f369 --- /dev/null +++ b/test_realworldqa_vqa1.py @@ -0,0 +1,669 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[:153]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + generator = torch.Generator(device=device).manual_seed(args.seed) + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/test_realworldqa_vqa2.py b/test_realworldqa_vqa2.py new file mode 100644 index 0000000000000000000000000000000000000000..1a293662e9270b42990fe2307e6fbd7b8e6260ea --- /dev/null +++ b/test_realworldqa_vqa2.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[153:306]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/test_realworldqa_vqa3.py b/test_realworldqa_vqa3.py new file mode 100644 index 0000000000000000000000000000000000000000..ddf0ed6befa74e82e4df7bbfa68b51cddf9567d2 --- /dev/null +++ b/test_realworldqa_vqa3.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[306:459]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/test_realworldqa_vqa4.py b/test_realworldqa_vqa4.py new file mode 100644 index 0000000000000000000000000000000000000000..b8d5678a518cefb3dfb4d3efc9e5458a2670e69b --- /dev/null +++ b/test_realworldqa_vqa4.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[459:612]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/test_realworldqa_vqa5.py b/test_realworldqa_vqa5.py new file mode 100644 index 0000000000000000000000000000000000000000..fd64c8a90e4ad3e02f4655dd9e60425fb7dc7705 --- /dev/null +++ b/test_realworldqa_vqa5.py @@ -0,0 +1,668 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +from datasets import load_dataset +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import ast +import re +from PIL import Image +import json +def clean_question(q: str) -> str: + if not isinstance(q, str): + q = str(q) + # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) + # 再清理多余空白 + q = re.sub(r"\s+", " ", q).strip() + return q +def dump_image(image, save_root): + os.makedirs(save_root, exist_ok=True) + save_path = os.path.join(save_root, "input.jpg") + image.convert("RGB").save(save_path, format="JPEG", quality=95) + return save_path + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ 将多个图像拼接成一张大图并保存。 + Args: image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multimodal or single-image VQA. + Now explicitly tags each modality image before feeding into Qwen3-VL, + so that the model can distinguish RGB, edge, depth, normal, etc. + """ + + root_path = Path(root) + + # ---------- 单图像情况 ---------- + if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: + image_path = str(root) + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + return messages + + # ---------- 多模态文件夹情况 ---------- + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # 检查存在的模态文件 + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + + + # 可读名称映射 + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # ---------- 指令文本 ---------- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " + #f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " + #f"so use them only as optional references for additional context. " + #f"Each modality provides complementary information about the same visual content:\n" + #f"- The line drawing highlights object outlines, shapes, and fine structures.\n" + #f"- The edge map emphasizes boundaries and contours.\n" + #f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" + #f"- The normal map shows surface orientation and geometric curvature.\n" + #f"- The albedo map presents true surface color without illumination or shadows.\n" + #f"- The segmentation map divides the scene into semantic regions and object categories.\n" + #f"- The human pose map indicates body orientation, structure, and articulation.\n\n" + #f"Together, these modalities offer a unified, rich understanding of the scene.\n" + #f"Scene description: \"{prompt}\"\n\n" + f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " + #f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" + #f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" + f"Question: \"{question}\"\n" + ) + + # ---------- 构建内容序列(模态锚定) ---------- + content = [] + print(f'available:{available}') + for name, path in available: + readable = readable_map.get(name, "visual input") + # 在每张图像前显式标注模态类型 + content.append({"type": "text", "text": f"This is the {readable}."}) + content.append({"type": "image", "image": path}) + + # 最后加入主指令 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + + + +def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Explicitly binds each image to its modality name (RGB, edge, depth, etc.) + so Qwen3-VL can reason over them correctly and refine the caption faithfully. + """ + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + #"annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + #"annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " + #f"Other modalities (depth, normal, edge, segmentation, etc.) serve as structural or semantic references.\n\n" + #f"Each modality provides distinct complementary information:\n" + #f"- The line drawing highlights structure and contours.\n" + #f"- The edge map emphasizes object boundaries.\n" + #f"- The depth map shows spatial distance and perspective.\n" + #f"- The normal map captures surface orientation and geometry.\n" + #f"- The albedo map shows intrinsic surface color.\n" + #f"- The segmentation map reveals semantic regions.\n" + #f"- The human pose map indicates body structure and articulation.\n\n" + f"### Your Task:\n" + f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " + f"of the scene, integrating information from all available modalities.\n\n" + f"### Rules:\n" + f"1. Describe only what is visible in the images — do NOT hallucinate.\n" + #f"2. Use the RGB image as your main reference, and use other modalities to verify geometric or structural details.\n" + f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" + f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"Now refine the caption according to the multimodal evidence below." + ) + + text_prompt0 = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"The **RGB image** provides the most accurate and realistic appearance of the scene, " + f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" + f"### Your Task:\n" + f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " + f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" + f"### Guidelines:\n" + f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" + f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" + f"3. Do NOT invent or assume anything not visually supported.\n" + f"4. Avoid including any additional commentary or evaluations.\n" + f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" + f"### Coarse Caption:\n'{coarse_caption}'\n\n" + f"### Feedback to Incorporate:\n'{feedback}'\n\n" + f"Now produce the final refined caption describing the scene based on the multimodal evidence below." + ) + + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", + help="Prompt text for generation.") + parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", + help="Optional negative prompt.") + parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", + help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", + help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + + +@torch.inference_mode() +def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Answer the follow question:{question} based on the ."}, + ], + } + ] + + print(messages) + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / str(vqa_id) + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": f"Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, caption, max_length=256): + + # --- 构造 Qwen 输入 --- + eval_prompt = f""" + You are an image-text alignment evaluator. + Given one RGB image and a description, score how well the text matches + the visual evidence in the image. Then provide one short feedback + sentence suggesting how to make the description better aligned. + + Return JSON strictly: + {{"Consistency": , "Feedback": ""}} + + Description: "{caption}" + + """ + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + except Exception: + score, feedback = 0.0, text.strip() + + print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") + return score, feedback + + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): + messages = build_multimodal_message(root, prompt, feedback) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): + messages = build_vqa_message(root, prompt, question) + print(messages) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): + # print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + with open(args.json, "r", encoding="utf-8") as f: + annotations = json.load(f) + + for sample in annotations[612:]: + image_path = os.path.join(args.data_path, sample["image"]) + image_id = sample["image"].split('.')[0] + image = Image.open(image_path) + question = sample["question"] + + control_images = [image.convert('RGB')] + [None] * pipe.num_conditions + + role = [1] + [0] * pipe.num_conditions + print(role) + + best_dir, best_caption, best_score = '', '', 0.0 + max_length = 1024 + + # input_img = Image.open(image_path).convert("RGB") + width, height = image.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) + _ = vqa_i2t(model, processor, image_path, question, 100, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = image_path + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, + image_id) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) + result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) + score, feedback = evaluate_consistency(image_path, model, processor, prompt) + + if score >= best_score: + best_caption, best_score = prompt, score + best_dir = save_dir + + result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) + print(f'result:{result}') diff --git a/test_t2i.py b/test_t2i.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc25ce6b337269bbbf6b8bf4384be92df3a0e05 --- /dev/null +++ b/test_t2i.py @@ -0,0 +1,371 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][:3750] + kinds = [v["category"] for v in data.values() if "category" in v][:3750] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i1.py b/test_t2i1.py new file mode 100644 index 0000000000000000000000000000000000000000..e76895cfb0a8772ba38d0ccb6ac67d1933bd02d0 --- /dev/null +++ b/test_t2i1.py @@ -0,0 +1,372 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][7031:7500] + kinds = [v["category"] for v in data.values() if "category" in v][7031:7500] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 7031 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i2.py b/test_t2i2.py new file mode 100644 index 0000000000000000000000000000000000000000..72828e03f262693b2f702c8774b7c1e831223374 --- /dev/null +++ b/test_t2i2.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][7500:11250] + kinds = [v["category"] for v in data.values() if "category" in v][7500:11250] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 7500 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i3.py b/test_t2i3.py new file mode 100644 index 0000000000000000000000000000000000000000..16378b3e10aff1d258d774eb105cf0719590dca8 --- /dev/null +++ b/test_t2i3.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][11250:15000] + kinds = [v["category"] for v in data.values() if "category" in v][11250:15000] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 11250 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i4.py b/test_t2i4.py new file mode 100644 index 0000000000000000000000000000000000000000..79c6d5b9d36cd63f2c7a2773b631932130252f35 --- /dev/null +++ b/test_t2i4.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][15000:18750] + kinds = [v["category"] for v in data.values() if "category" in v][15000:18750] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 15000 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i5.py b/test_t2i5.py new file mode 100644 index 0000000000000000000000000000000000000000..cd06720418cd133f3012f0333ac45b22aa8cb24b --- /dev/null +++ b/test_t2i5.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][18750:22500] + kinds = [v["category"] for v in data.values() if "category" in v][18750:22500] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 18750 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i6.py b/test_t2i6.py new file mode 100644 index 0000000000000000000000000000000000000000..57fe61109d6b195c507e8aa0caa05ffaad41d138 --- /dev/null +++ b/test_t2i6.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][22500:26250] + kinds = [v["category"] for v in data.values() if "category" in v][22500:26250] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 22500 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i7.py b/test_t2i7.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f89954d0f049319711c18ba626294b32858ffb --- /dev/null +++ b/test_t2i7.py @@ -0,0 +1,373 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + with open("/home/efs/mjw/mjw/code/Jodi/meta_data.json", "r") as f: + data = json.load(f) + + #prompts = [item["prompt"] for item in data if "prompt" in item] + prompts = [v["prompt"] for v in data.values() if "prompt" in v][26250:] + kinds = [v["category"] for v in data.values() if "category" in v][26250:] + + for i, (text, kind) in enumerate(zip(prompts, kinds)): + + i = i + 26250 + + caption = kind + ' image. ' + text + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, i) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, i, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, i) + + diff --git a/test_t2i_dpg.py b/test_t2i_dpg.py new file mode 100644 index 0000000000000000000000000000000000000000..82351bbecf3e56f4d329fa2c1724d7f0b06ad044 --- /dev/null +++ b/test_t2i_dpg.py @@ -0,0 +1,376 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index): + + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + +def text_refine(root, model, processor, caption, prompt, iter_num, index, max_length=300): + text = caption + ' ' + prompt + messages = build_multimodal_message(root, text) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index): + + control_images = [] + for name in modality_names: + control_images.append(Image.open(os.path.join(root, name+'.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=control_images, + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for _, row in df.iterrows(): + + caption = row["text"] + + if caption not in cap_list: + cap_list.append(caption) + else: + continue + + name = str(row["item_id"]) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name) + + save_dir = init_dir + prompt = args.prompt + max_length = 1024 + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, step, name, max_length) + max_length += 100 + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name) + + diff --git a/test_t2i_dpg1.py b/test_t2i_dpg1.py new file mode 100644 index 0000000000000000000000000000000000000000..87da96f2eed3470f180f295e011c05666883d01a --- /dev/null +++ b/test_t2i_dpg1.py @@ -0,0 +1,634 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list) < 50: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg10.py b/test_t2i_dpg10.py new file mode 100644 index 0000000000000000000000000000000000000000..b4886b57df67e6b7a845b54110e9104c01150307 --- /dev/null +++ b/test_t2i_dpg10.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<450: + continue + elif len(cap_list)<500: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg11.py b/test_t2i_dpg11.py new file mode 100644 index 0000000000000000000000000000000000000000..c33d8c2cb476ea4cde30bed88a08739dd4ac5b82 --- /dev/null +++ b/test_t2i_dpg11.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<500: + continue + elif len(cap_list)<550: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg12.py b/test_t2i_dpg12.py new file mode 100644 index 0000000000000000000000000000000000000000..6b9d647a6327b3788c91e5309780d635c11f91a8 --- /dev/null +++ b/test_t2i_dpg12.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + + if len(cap_list)<550: + continue + elif len(cap_list)<600: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg13.py b/test_t2i_dpg13.py new file mode 100644 index 0000000000000000000000000000000000000000..b76765f037bb2a4fdf0b190a408a5de2e363a770 --- /dev/null +++ b/test_t2i_dpg13.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<600: + continue + elif len(cap_list)<650: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg14.py b/test_t2i_dpg14.py new file mode 100644 index 0000000000000000000000000000000000000000..97946401aa5d1a0a63d7105fed7b902c6c29a181 --- /dev/null +++ b/test_t2i_dpg14.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<650: + continue + elif len(cap_list)<700: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg15.py b/test_t2i_dpg15.py new file mode 100644 index 0000000000000000000000000000000000000000..06494d8cb3b87d681bbda88b90f2aa013534b2d0 --- /dev/null +++ b/test_t2i_dpg15.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<700: + continue + elif len(cap_list)<750: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg16.py b/test_t2i_dpg16.py new file mode 100644 index 0000000000000000000000000000000000000000..1c93087fbc99d304f858d765a7ce44d89ea0c67f --- /dev/null +++ b/test_t2i_dpg16.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<750: + continue + elif len(cap_list)<800: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg17.py b/test_t2i_dpg17.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8d8b6c304a75c14ddc5402354ca5b8fcd14dab --- /dev/null +++ b/test_t2i_dpg17.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<800: + continue + elif len(cap_list)<850: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg18.py b/test_t2i_dpg18.py new file mode 100644 index 0000000000000000000000000000000000000000..241d2eea47001229aa78419d3f87951266a6c7cf --- /dev/null +++ b/test_t2i_dpg18.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<850: + continue + elif len(cap_list)<900: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg19.py b/test_t2i_dpg19.py new file mode 100644 index 0000000000000000000000000000000000000000..60b49944573c8924069863f94d9592222a39e423 --- /dev/null +++ b/test_t2i_dpg19.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<900: + continue + elif len(cap_list)<950: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg2.py b/test_t2i_dpg2.py new file mode 100644 index 0000000000000000000000000000000000000000..f469b844b9ce90c70498eeba63fc5f613670609b --- /dev/null +++ b/test_t2i_dpg2.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<50: + continue + elif len(cap_list)<100: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg20.py b/test_t2i_dpg20.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2633d0b45cdd91f116a46f5594e8814c4720ed --- /dev/null +++ b/test_t2i_dpg20.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<950: + continue + elif len(cap_list)<1000: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg21.py b/test_t2i_dpg21.py new file mode 100644 index 0000000000000000000000000000000000000000..e69e6d37a160a63e0ed4c66f257f46ad4f99d7d1 --- /dev/null +++ b/test_t2i_dpg21.py @@ -0,0 +1,635 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<1000: + continue + else: + pass + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg3.py b/test_t2i_dpg3.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5b3c80803db477abb893d7e0716be4670764ab --- /dev/null +++ b/test_t2i_dpg3.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list) <100: + continue + elif len(cap_list) <150: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg4.py b/test_t2i_dpg4.py new file mode 100644 index 0000000000000000000000000000000000000000..e4bd50fdcc27cc4f273ee6faae2a2a98d4e99b90 --- /dev/null +++ b/test_t2i_dpg4.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<150: + continue + elif len(cap_list)<200: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg5.py b/test_t2i_dpg5.py new file mode 100644 index 0000000000000000000000000000000000000000..d1695057cbac7952ae46810ee20e9079f4ce7abf --- /dev/null +++ b/test_t2i_dpg5.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<200: + continue + elif len(cap_list)<250: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg6.py b/test_t2i_dpg6.py new file mode 100644 index 0000000000000000000000000000000000000000..1b869f43c3c349cd951b7997db6c8b0589adf659 --- /dev/null +++ b/test_t2i_dpg6.py @@ -0,0 +1,637 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<250: + continue + elif len(cap_list)<300: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg7.py b/test_t2i_dpg7.py new file mode 100644 index 0000000000000000000000000000000000000000..a48b01566e25c0f7592c546a60e7eacd5b953b2a --- /dev/null +++ b/test_t2i_dpg7.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<300: + continue + elif len(cap_list)<350: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg8.py b/test_t2i_dpg8.py new file mode 100644 index 0000000000000000000000000000000000000000..00e20921070cd190190e06ca5ace2f01fa4b1ebc --- /dev/null +++ b/test_t2i_dpg8.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<350: + continue + elif len(cap_list)<400: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_dpg9.py b/test_t2i_dpg9.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5a7f4f5321b35a8f28b26e3422fcd0c02e825a --- /dev/null +++ b/test_t2i_dpg9.py @@ -0,0 +1,636 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools +import json +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./dpg_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + #print(text) + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / index / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import pandas as pd + csv_path = "/home/efs/mjw/mjw/dataset/dataset/dpg_bench/ELLA/dpg_bench/dpg_bench.csv" + df = pd.read_csv(csv_path) + cap_list = [] + + for idx, row in df.iterrows(): + + ori_caption = row["text"] + + if ori_caption not in cap_list: + cap_list.append(ori_caption) + else: + continue + + if len(cap_list)<400: + continue + elif len(cap_list)<450: + pass + else: + exit() + + name = str(row["item_id"]) + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, name, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, name, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, name, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / name / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval.py b/test_t2i_geneval.py new file mode 100644 index 0000000000000000000000000000000000000000..31b144ed16161663c8be26e9fb8fac2b9c23234a --- /dev/null +++ b/test_t2i_geneval.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][:50] + + for index, metadata in enumerate(metadatas): + #index += 100 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval1.py b/test_t2i_geneval1.py new file mode 100644 index 0000000000000000000000000000000000000000..9611b0b73a5c1d53e7180e6da194280c98357f58 --- /dev/null +++ b/test_t2i_geneval1.py @@ -0,0 +1,626 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./example_geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + feedback_path = Path(save_dir) / f"feed.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + with open(feedback_path, "w", encoding="utf-8") as f: + f.write(feedback.strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][400:] + + for index, metadata in enumerate(metadatas): + #index += 50 + ori_caption = metadata['prompt'] + + for num in range(1): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval2.py b/test_t2i_geneval2.py new file mode 100644 index 0000000000000000000000000000000000000000..981e4ece972d9f3d58d45152d4b4f5be512e69d2 --- /dev/null +++ b/test_t2i_geneval2.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][479:] + + for index, metadata in enumerate(metadatas): + index += 100 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval3.py b/test_t2i_geneval3.py new file mode 100644 index 0000000000000000000000000000000000000000..c824484ccde2b728e0d4ddcfedfa71f68143af7d --- /dev/null +++ b/test_t2i_geneval3.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][150:200] + + for index, metadata in enumerate(metadatas): + index += 150 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval4.py b/test_t2i_geneval4.py new file mode 100644 index 0000000000000000000000000000000000000000..386ae0c1ae319f1c71001e0809c791e358a8d2ae --- /dev/null +++ b/test_t2i_geneval4.py @@ -0,0 +1,627 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + #print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + #print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + #print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + print(f'eval messages:{messages}') + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + #print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + # f" → Overall={score:.3f}" + #) + #print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + + print(f'refine template:{messages}') + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + #print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + #print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + #print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + #print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][200:250] + + for index, metadata in enumerate(metadatas): + index += 200 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + #print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval5.py b/test_t2i_geneval5.py new file mode 100644 index 0000000000000000000000000000000000000000..6f82d0c273bf7e979c98893112d089d08b349b77 --- /dev/null +++ b/test_t2i_geneval5.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][250:300] + + for index, metadata in enumerate(metadatas): + index += 250 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval6.py b/test_t2i_geneval6.py new file mode 100644 index 0000000000000000000000000000000000000000..3d44f6bfd3b5702b2b082e5ee90160d116d20233 --- /dev/null +++ b/test_t2i_geneval6.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][300:350] + + for index, metadata in enumerate(metadatas): + index += 300 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval7.py b/test_t2i_geneval7.py new file mode 100644 index 0000000000000000000000000000000000000000..70a9ffc388c3cea6c73552b9fd576619ce7895ec --- /dev/null +++ b/test_t2i_geneval7.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][350:400] + + for index, metadata in enumerate(metadatas): + index += 350 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval8.py b/test_t2i_geneval8.py new file mode 100644 index 0000000000000000000000000000000000000000..75bb66961077d3fa7c2cd244299258112f546771 --- /dev/null +++ b/test_t2i_geneval8.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][400:450] + + for index, metadata in enumerate(metadatas): + index += 400 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/test_t2i_geneval9.py b/test_t2i_geneval9.py new file mode 100644 index 0000000000000000000000000000000000000000..857defec94b674aaa9d113a4aabeb05ea79daa8a --- /dev/null +++ b/test_t2i_geneval9.py @@ -0,0 +1,622 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T +import re +from shutil import copy + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +import nltk +nltk.download('averaged_perceptron_tagger_eng') +try: + nltk.data.find("tokenizers/punkt_tab") +except LookupError: + nltk.download("punkt_tab") + nltk.download("punkt") + + +from nltk import word_tokenize, pos_tag + +def extract_main_objects(prompt: str): + """ + 提取主要对象名词: + - 优先匹配 'of', 'with', 'showing', 'featuring', 'containing' 后面的名词短语 + - 过滤媒介词 (photo, picture, image, scene, view, shot, painting, drawing) + - 回退到通用名词提取 + """ + if not isinstance(prompt, str): + return [] + + prompt = prompt.strip().lower() + + # Step 1️⃣: 优先匹配介词后的核心名词短语 + # 例如 "photo of a bottle and a refrigerator" → "bottle", "refrigerator" + pattern = r"(?:of|with|showing|featuring|containing)\s+([a-z\s,]+)" + match = re.search(pattern, prompt) + candidates = [] + if match: + segment = match.group(1) + tokens = word_tokenize(segment) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 2️⃣: 如果未匹配,则通用名词提取 + if not candidates: + tokens = word_tokenize(prompt) + tagged = pos_tag(tokens) + candidates = [w for w, pos in tagged if pos.startswith("NN")] + + # Step 3️⃣: 过滤掉常见媒介词 + filter_words = { + "photo", "picture", "image", "scene", "view", + "shot", "painting", "drawing", "sketch", + "illustration", "render", "frame", "snapshot" + } + filtered = [w for w in candidates if w not in filter_words] + + # Step 4️⃣: 去重但保持顺序 + main_objects = list(dict.fromkeys(filtered)) + + return main_objects + + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i + images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_multimodal_message(root, prompt, feedback, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append((name, str(path))) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + + present_modalities = [readable_map[n] for n, _ in available] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Use all available modalities jointly to reason about the same scene rather than describing them separately. " + f"Generate an enhanced prompt that provides detailed and precise visual descriptions suitable for image generation. " + f"Your task is based on all visual modalities to improve the description for the coarse caption while strictly following its original intent: '{prompt}'. " + f"Do not include any additional commentary or evaluations. " + f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. " + f"Focus on describing the visual properties, including: " + f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, " + f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. " + f"Exclude any stylistic, environmental, emotional, or narrative information. " + f"Consider the following feedback when refining your description: '{feedback}'. " + f"Preserve the same object category as in the coarse caption and describe its fine details in a realistic, objective tone. " + f"Coarse caption: '{coarse_caption}' " + ) + + # --- 构建消息内容:在每个图像前加模态标识 --- + content = [] + for name, path in available: + readable = readable_map.get(name, "visual input") + content.append({ + "type": "text", + "text": f"This is the {readable}, which provides {get_modality_description(name)}." + }) + content.append({"type": "image", "image": path}) + + # 最后附上总任务说明 + content.append({"type": "text", "text": text_prompt}) + + messages = [{"role": "user", "content": content}] + return messages + +def get_modality_description(name: str) -> str: + """为每个模态生成一句说明,用于提示模型理解模态功能""" + desc_map = { + "image": "the main visual appearance of the scene, including color, texture, and lighting", + "annotation_lineart": "structural outlines, object contours, and fine geometry", + "annotation_edge": "strong boundaries and contrast edges between objects", + "annotation_depth": "distance and perspective information for spatial understanding", + "annotation_normal": "surface orientation and geometric curvature cues", + "annotation_albedo": "pure surface color without lighting or shading effects", + "annotation_seg_12colors": "semantic regions and object categories", + "annotation_openpose": "human body keypoints, joints, and orientation", + } + return desc_map.get(name, "complementary visual evidence") + + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', + help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', + help="Path to model checkpoint.") + parser.add_argument("--prompt", type=str, default="cat.", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--height", type=int, default=1024) + parser.add_argument("--width", type=int, default=1024) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--output_dir", type=str, default="./geneval_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ +@torch.inference_mode() +def init_t2i(args, prompt, pipe, iter_num, post_processors, modality_names, generator, index, num): + # -------------------------- + # Inference + # -------------------------- + + print(f"🚀 Generating with prompt: {prompt}") + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +@torch.inference_mode() +def evaluate_consistency(image_path, model, processor, prompt, ori_prompt, max_length=256): + + main_objects = extract_main_objects(ori_prompt) + print(main_objects) + number = len(main_objects) + main_str = ", ".join(main_objects) if main_objects else "the main described objects" + # --- 构造 Qwen 输入 --- + #eval_prompt = f""" + #You are an image–text consistency evaluator. + #Given one RGB image and a textual description, evaluate how well the description matches + #the visual evidence in the image across the following semantic dimensions: + #{number} Main described objects (core subjects): {main_str}. + #1. **Entity (E)** – Are all mentioned object categories correct and clearly visible in the image? + #2. **Attribute (A)** – Are described colors, shapes, sizes, textures, and materials accurate? + #3. **Relation (R)** – Are spatial or logical relationships (e.g., left of, above, next to) correct? + #4. **Count/State (C)** – Are the numbers of objects and their states (open/closed, sitting/standing) consistent? + #5. **Global (G)** – Does the overall scene composition and meaning match the description? + #6. **Completeness (V)** – Are the *main described objects* ({main_str}) fully and clearly visible (not cropped, truncated, or hidden)? + #7. **Salience (S)** – Are the *main described objects* visually dominant and central, rather than small, distant, or partially obscured? + #If any of the main objects are only partially visible, occluded, or treated as background, + #reduce the score for Completeness and Salience. + #Score each aspect from 0.0 to 1.0 (0=wrong, 1=perfect). + #Then provide one short feedback sentence describing which aspects could be improved. + #Return JSON strictly: + #{{ + # "Entity": , + # "Attribute": , + # "Relation": , + # "CountState": , + # "Global": , + # "Completeness": , + # "Salience": , + # "Feedback": "" + #}} + #Description: "{prompt}" + # + #""" + eval_prompt = f""" + You are an image–text alignment evaluator and visual correction advisor. + Given one RGB image evaluate how well the description "{ori_prompt}" matches what is visually shown. + Focus only on the main described objects: "{main_str}". + Each main object must appear clearly and completely in the image — not cropped, cut off, hidden, or only partially visible. + If any main object is incomplete, visual missing, has an incorrect attribute (such as color, size, or position) or only partly visible, reduce the score sharply (<0.6), + Then, give **a corrective feedback sentence that explicitly states what the object should be** according to the intended description "{ori_prompt}". + Your feedback must be **constructive**, not punitive: + Do NOT mention the current image, errors, or missing items. (No phrases like "the image does not...", "it is missing...", "there is no...", "wrong/incorrect/fail".) + Write one short imperative sentence (e.g., "Replace the lake with two hair dryers."). + Do NOT use any negative words (no 'not', 'no', 'missing', 'wrong', 'fail', etc.). + Only describe what the image should be changed to. + Start with a verb (e.g., Add/Replace/Change/Move/Center/Enlarge/Crop/Rotate/Make/Align/Set). + For example: + - If the elephant appears gray but should be purple, say: "The elephant is not gray; it should be purple, so adjust it to purple color." + - If a car appears blue but should be red, say: "The car is not blue; it should be red." + - If one of three objects is missing, say: "Only two objects are visible; add one more to make three." + + Return JSON only: + {{ + "Consistency": , + "Feedback": "" + }} + Description: "{ori_prompt}" + + """ + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image_path}, + {"type": "text", "text": eval_prompt}, + ], + } + ] + + # --- 推理 --- + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ).to(model.device) + + out_ids = model.generate(**inputs, max_new_tokens=max_length) + out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] + text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] + + # --- 解析输出 --- + try: + data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) + score = float(data.get("Consistency", 0)) + feedback = data.get("Feedback", "") + + # 👇 手动计算 Overall + #score = e + a + r + c + g + v + + except Exception: + score, feedback = 0.0, text.strip() + + print( + #f"🧮 [E={e:.2f} | A={a:.2f} | R={r:.2f} | C={c:.2f} | G={g:.2f} | V={v:.2f}]" + f" → Overall={score:.3f}" + ) + print(f"💡 Feedback: {feedback}") + return score, feedback + + +def text_refine(root, model, processor, caption, prompt, feedback, iter_num, index, num, max_length=300): + messages = build_multimodal_message(root, caption, feedback, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +def refine_prompt_with_qwen(model, processor, raw_prompt, max_length=1024): + chi_prompt = f""" + You are a visual scene enhancement expert. + Given a user prompt, rewrite it into an "Enhanced prompt" that provides vivid and realistic visual details suitable for image generation. + Guidelines: + - If the prompt is simple, enrich it with concrete details about color, lighting, materials, textures, shapes, and spatial relations. + - If the prompt is already detailed, refine and slightly polish it without changing its meaning or adding new objects. + - Do not change the original scene or invent unrelated content. + Examples: + - Input: A cat sleeping → Enhanced: A small, fluffy white cat curled up on a sunny windowsill, surrounded by potted red flowers. + - Input: A busy city street → Enhanced: A bustling city street at dusk with glowing streetlights, crowds in colorful coats, and neon reflections on wet asphalt. + Now rewrite the following prompt faithfully, adding only realistic visual detail: + User Prompt: "{raw_prompt}" + """ + + messages = [{"role": "user", "content": [{"type": "text", "text": chi_prompt}]}] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return output_text[0] + + + +def image_refine(caption, prompt, root, iter_num, modality_names, generator, index, num): + #control_images = [] + #for name in modality_names: + #control_images.append(Image.open(os.path.join(root, name + '.png')).convert("RGB")) + + print(f"🚀 Generating with prompt: {caption}") + + outputs = pipe( + images=[None] * (1 + pipe.num_conditions), + role=[0] * (1 + pipe.num_conditions), + prompt=prompt, + negative_prompt=args.negative_prompt, + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + import json + + with open('/home/efs/mjw/mjw/code/geneval/prompts/evaluation_metadata.jsonl') as fp: + metadatas = [json.loads(line) for line in fp][450:] + + for index, metadata in enumerate(metadatas): + index += 450 + ori_caption = metadata['prompt'] + + for num in range(4): + + best_score = 0 + best_dir = None + best_caption = None + + sample_seed = torch.randint(0, 100000, (1,)).item() + print(sample_seed) + + torch.manual_seed(sample_seed) + generator = torch.Generator(device=device).manual_seed(sample_seed) + + caption = refine_prompt_with_qwen(model, processor, ori_caption) + #caption = ori_caption + init_dir = init_t2i(args, caption, pipe, 0, post_processors, modality_names, generator, index, num) + + save_dir = init_dir + prompt = caption + max_length = 1024 + image_path = str(init_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + for step in range(1, args.iters): + prompt = text_refine(save_dir, model, processor, caption, prompt, feedback, step, index, num, max_length) + max_length += 100 + generator = torch.Generator(device=device).manual_seed(sample_seed) + save_dir = image_refine(caption, prompt, save_dir, step, modality_names, generator, index, num) + image_path = str(save_dir / "image.png") + score, feedback = evaluate_consistency(image_path, model, processor, prompt, ori_caption) + + if score >= best_score: + best_score = score + best_dir = save_dir + best_caption = prompt + + best_save_dir = Path(args.output_dir) / f"index_{index}" / f"sample_{num}" / f"iteration_best" + best_save_dir.mkdir(parents=True, exist_ok=True) + copy(os.path.join(best_dir,'image.png'), best_save_dir / 'image.png') + with open(best_save_dir / "caption.txt", "w", encoding="utf-8") as f: + f.write(best_caption.strip()) + with open(best_save_dir / "score.txt", "w", encoding="utf-8") as f: + f.write(str(best_score)) + + + + diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..1788e4e5be961f9d22e17a6b241d2eb818dde6ab --- /dev/null +++ b/train.py @@ -0,0 +1,709 @@ +# This file is modified from https://github.com/NVlabs/Sana + +# Copyright 2024 NVIDIA CORPORATION & AFFILIATES +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +sys.path.append(os.path.abspath('.')) + +import datetime +import hashlib +import itertools +import os.path as osp +import random +import time +import warnings +from pathlib import Path + +import numpy as np +import pyrallis +import torch +from accelerate import Accelerator, InitProcessGroupKwargs +from PIL import Image +from termcolor import colored + +warnings.filterwarnings("ignore") # ignore warning + + +from diffusion import DPMS, Scheduler +from data.builder import build_dataloader, build_dataset +from data.datasets.jodi_dataset import RandomConcatJodiDataset +from data.sampler import DistributedRangedSampler +from model.builder import build_model, get_tokenizer_and_text_encoder, get_vae, vae_decode, vae_encode +from diffusion.respace import compute_density_for_timestep_sampling +from utils.checkpoint import load_checkpoint, save_checkpoint +from utils.config import JodiConfig +from utils.data_sampler import AspectRatioBatchSampler +from utils.dist_utils import flush, get_world_size +from utils.logger import LogBuffer, get_root_logger +from utils.lr_scheduler import build_lr_scheduler +from utils.misc import DebugUnderflowOverflow, init_random_seed, set_random_seed +from utils.optimizer import auto_scale_lr, build_optimizer + +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +def detect_parameters(model): + print("\n========== Unused Parameter Debug Report ==========") + unused_modules = set() + total = 0 + unused = 0 + for name, param in model.named_parameters(): + total += 1 + if param.requires_grad: + if param.grad is None: + unused += 1 + # 找到该参数的上级模块(父模块名) + module_name = name.split('.')[0] + unused_modules.add(module_name) + print(f"❌ {name:80s} | grad=None") + + print(f"\nSummary: {unused}/{total} parameters had no grad.") + print("Modules possibly skipped in forward:") + for m in sorted(unused_modules): + print(f" - {m}") + print("===================================================\n") + +def log_validation(accelerator, config, model, logger, step, device): + torch.cuda.empty_cache() + model = accelerator.unwrap_model(model).eval() + + #chi_prompt = "\n".join(config.text_encoder.chi_prompt) + #num_chi_prompt_tokens = len(tokenizer.encode(chi_prompt)) + #max_length_all = (num_chi_prompt_tokens + config.text_encoder.model_max_length - 2) + + logger.info("Running validation... ") + + # Run sampling + latents, logits = [], [] + for prompt in validation_prompts: + max_length=300 + z = torch.randn(1, 1 + num_conditions, config.vae.vae_latent_dim, latent_size, latent_size, device=device) + + #text = chi_prompt + prompt + + print(prompt) + + cond_tokens = tokenizer( + [prompt], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + #select_index = [0] + list(range(-config.text_encoder.model_max_length + 1, 0)) + + null_tokens = tokenizer( + [''], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + + prompt_tokens = tokenizer( + [prompt], + padding="longest", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + caption_embs = prompt_tokens.input_ids + emb_masks = prompt_tokens.attention_mask + + max_length = caption_embs.shape[-1] + negative_tokens = tokenizer( + [''], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + neg_attention_masks = negative_tokens.attention_mask + null_y = negative_tokens.input_ids + + prompt_c = text_encoder_fix(cond_tokens.input_ids, attention_mask=cond_tokens.attention_mask)[0] #[:, select_index] + null_c = text_encoder_fix(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + + role = torch.zeros((1, 1 + num_conditions), dtype=torch.long, device=device) + model_kwargs = dict(mask=cond_tokens.attention_mask, role=role) + + dpm_solver = DPMS( + model.forward, + cfg_scale=4.5, + condition=prompt_c, + uncondition=null_c, + model_type="flow", + model_kwargs=model_kwargs, + schedule="FLOW", + #prompt_len=caption_embs.shape[-1] + ) + + print(caption_embs.shape, null_y.shape) + + denoised = dpm_solver.sample( + z, + steps=100, + order=2, + skip_type="time_uniform_flow", + method="multistep", + flow_shift=config.scheduler.flow_shift, + ) + latents.append(denoised) + #logits.append(autoregressived) + + torch.cuda.empty_cache() + + # Decode latents + image_logs = [] + for prompt, latent in zip(validation_prompts, latents): + latent = latent.to(torch.float16) + latent = torch.unbind(latent, dim=1) + #caption = tokenizer.decode(logit[0].long().tolist(), skip_special_tokens=True) + #print(f'caption:{caption}') + images = [] + for lat in latent: + sample = vae_decode(config.vae.vae_type, vae, lat) + sample = torch.clamp(127.5 * sample + 128.0, 0, 255) + sample = sample.permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()[0] + images.append(Image.fromarray(sample)) + image_logs.append({"validation_prompt": prompt, "images": images}) + + # Save images + def concatenate_images(image_caption, images_per_row=5, image_format="webp"): + import io + images = list(itertools.chain.from_iterable([log["images"] for log in image_caption])) + if images[0].size[0] > 1024: + images = [image.resize((1024, 1024)) for image in images] + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + total_height = sum(heights[i : i + images_per_row][0] for i in range(0, len(images), images_per_row)) + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_images = images[i : i + images_per_row] + x_offset = 0 + for img in row_images: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[i] + webp_image_bytes = io.BytesIO() + new_im.save(webp_image_bytes, format=image_format) + webp_image_bytes.seek(0) + new_im = Image.open(webp_image_bytes) + return new_im + + file_format = "png" # "webp" + local_vis_save_path = osp.join(config.work_dir, "log_vis") + os.umask(0o000) + os.makedirs(local_vis_save_path, exist_ok=True) + concatenated_image = concatenate_images(image_logs, images_per_row=num_conditions+1, image_format=file_format) + save_path = osp.join(local_vis_save_path, f"vis_{step}.{file_format}") + concatenated_image.save(save_path) + + model.train() + flush() + return image_logs + + +def train(config, args, accelerator, model, optimizer, lr_scheduler, train_dataloader, train_diffusion, logger): + if getattr(config.train, "debug_nan", False): + DebugUnderflowOverflow(model) + logger.info("NaN debugger registered. Start to detect overflow during training.") + log_buffer = LogBuffer() + + global_step = start_step + 1 + skip_step = max(config.train.skip_step, global_step) % train_dataloader_len + skip_step = skip_step if skip_step < (train_dataloader_len - 20) else 0 + loss_nan_timer = 0 + + # Now you train the model + for epoch in range(start_epoch + 1, config.train.num_epochs + 1): + time_start, last_tic = time.time(), time.time() + sampler = train_dataloader.batch_sampler.sampler + sampler.set_epoch(epoch) + sampler.set_start(max((skip_step - 1) * config.train.train_batch_size, 0)) + if skip_step > 1 and accelerator.is_main_process: + logger.info(f"Skipped Steps: {skip_step}") + skip_step = 1 + data_time_start = time.time() + data_time_all = 0 + lm_time_all = 0 + vae_time_all = 0 + model_time_all = 0 + for step, batch in enumerate(train_dataloader): + # NOTE: train_dataloader is infinite since batch_sampler is infinite + # therefore, we actually stay in this for loop until the end of training + accelerator.wait_for_everyone() + data_time_all += time.time() - data_time_start + vae_time_start = time.time() + with torch.no_grad(): + with torch.amp.autocast( + "cuda", + enabled=(config.model.mixed_precision == "fp16" or config.model.mixed_precision == "bf16"), + ): + z = [] + imgs = torch.unbind(batch["img"], dim=1) + for img in imgs: + z.append(vae_encode( + config.vae.vae_type, vae, img, config.vae.sample_posterior, accelerator.device + )) + z = torch.stack(z, dim=1) + + accelerator.wait_for_everyone() + vae_time_all += time.time() - vae_time_start + + clean_images = z + + lm_time_start = time.time() + + y = batch["inputs"].to(accelerator.device) + labels = batch["targets"].to(accelerator.device) + y_mask = batch["masks"].to(accelerator.device) + p_len = batch["prompt_length"].to(accelerator.device) + c_len = batch["caption_length"].to(accelerator.device) + + # Sample a random timestep for each image + bs = clean_images.shape[0] + timesteps = torch.randint( + 0, config.scheduler.train_sampling_steps, (bs,), device=clean_images.device + ).long() + ratio = timesteps / config.scheduler.train_sampling_steps + if config.scheduler.weighting_scheme in ["logit_normal"]: + # adapting from diffusers.training_utils + u = compute_density_for_timestep_sampling( + weighting_scheme=config.scheduler.weighting_scheme, + batch_size=bs, + logit_mean=config.scheduler.logit_mean, + logit_std=config.scheduler.logit_std, + mode_scale=None, # not used + ) + timesteps = (u * config.scheduler.train_sampling_steps).long().to(clean_images.device) + + # Get the role + role = batch["role"] + assert role.shape == (bs, 1+num_conditions) + + with torch.no_grad(): + prompt_ids = batch["prompt_ids"].to(accelerator.device) + prompt_mask = batch["prompt_masks"].to(accelerator.device) + + c = text_encoder_fix(prompt_ids, attention_mask=prompt_mask)[0] + c_mask = prompt_mask + + #c_mask = txt_tokens.attention_maskgrad_norm = None + + accelerator.wait_for_everyone() + + global_step += 1 + + if (global_step + 1) % config.train.eval_sampling_steps == 0 or (step + 1) == 1: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + img_txt_logs = log_validation( + accelerator=accelerator, + config=config, + model=model, + logger=logger, + step=global_step, + device=accelerator.device, + ) + local_vis_save_path = osp.join(config.work_dir, "log_vis") + exit() + + + data_time_start = time.time() + + if epoch % config.train.save_model_epochs == 0 or epoch == config.train.num_epochs and not config.debug: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + os.umask(0o000) + save_checkpoint( + osp.join(config.work_dir, "checkpoints"), + epoch=epoch, + step=global_step, + model=accelerator.unwrap_model(model), + optimizer=optimizer, + lr_scheduler=lr_scheduler, + generator=generator, + add_symlink=True, + ) + + accelerator.wait_for_everyone() + + +@pyrallis.wrap() +def main(cfg: JodiConfig) -> None: + global train_dataloader_len, start_epoch, start_step, vae, generator, num_replicas, rank, training_start_time + global text_encoder, tokenizer, text_encoder_fix + global max_length, validation_prompts, latent_size, valid_prompt_embed_suffix, null_embed_path + global image_size, cache_file, total_steps + global num_conditions + + config = cfg + args = cfg + + training_start_time = time.time() + + if args.debug: + config.train.log_interval = 1 + config.train.train_batch_size = min(64, config.train.train_batch_size) + args.report_to = "tensorboard" + + os.umask(0o000) + os.makedirs(config.work_dir, exist_ok=True) + + # Initialize accelerator + init_handler = InitProcessGroupKwargs() + init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug + accelerator = Accelerator( + mixed_precision=config.model.mixed_precision, + gradient_accumulation_steps=config.train.gradient_accumulation_steps, + log_with=args.report_to, + project_dir=osp.join(config.work_dir, "logs"), + kwargs_handlers=[init_handler], + ) + + validation_prompts = config.train.validation_prompts + + # Initialize tensorboard logging + log_name = "train_log.log" + logger = get_root_logger(osp.join(config.work_dir, log_name)) + logger.info(accelerator.state) + + # Set random seed + config.train.seed = init_random_seed(getattr(config.train, "seed", None)) + set_random_seed(config.train.seed + int(os.environ["LOCAL_RANK"])) + generator = torch.Generator(device="cpu").manual_seed(config.train.seed) + + if accelerator.is_main_process: + pyrallis.dump(config, open(osp.join(config.work_dir, "config.yaml"), "w"), sort_keys=False, indent=4) + + # logger.info(f"Config: \n{config}") + logger.info(f"World_size: {get_world_size()}") + logger.info(f"seed: {config.train.seed}") + logger.info(f"Initializing: DDP for training") + + # Get pretrained VAE + vae = get_vae(config.vae.vae_type, config.vae.vae_pretrained, accelerator.device).to(torch.float16) + logger.info(f"vae type: {config.vae.vae_type}") + + # Get tokenizer and text encoder + tokenizer, text_encoder, text_encoder_fix = get_tokenizer_and_text_encoder(config.text_encoder.text_encoder_name, accelerator.device) + + import copy + text_encoder_fix = copy.deepcopy(text_encoder_fix).to(accelerator.device) + + from peft import LoraConfig, get_peft_model, TaskType + from peft.tuners.lora import LoraModel + + lora_config = LoraConfig( + r=16, # 低秩分解维度 + lora_alpha=32, # 缩放系数 + target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], + lora_dropout=0.05, + bias="none", + task_type=TaskType.CAUSAL_LM, # 🔥 核心:Causal Language Model 类型 + ) + + text_encoder = get_peft_model(text_encoder, lora_config) + + for name, module in text_encoder_fix.named_modules(): + if "lora" in name.lower(): + print("⚠️ Found LoRA module in fixed_text_encoder:", name, "→", type(module)) + + #for i, layer in enumerate(text_encoder.model.layers): + # LoraModel.inject_adapter(layer, lora_config, adapter_name="default") + # print(f"✅ injected LoRA into layer {i}") + + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + + # Compute and save null embedding and validation prompts embeddings + os.makedirs(config.train.null_embed_root, exist_ok=True) + text_embed_dim = text_encoder.config.hidden_size + chi_prompt = "\n".join(config.text_encoder.chi_prompt) + # logger.info(f"Complex Human Instruct: {chi_prompt}") + max_length = config.text_encoder.model_max_length + null_embed_path = osp.join( + config.train.null_embed_root, + f"null_embed_diffusers_{config.text_encoder.text_encoder_name}_{max_length}token_{text_embed_dim}.pth", + ) + if config.train.visualize and len(config.train.validation_prompts): + # Preparing embeddings for visualization. We put it here for saving GPU memory + null_tokens = tokenizer( + "", max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(accelerator.device) + if "T5" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + elif "gemma" in config.text_encoder.text_encoder_name or "Qwen" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + else: + raise ValueError(f"{config.text_encoder.text_encoder_name} is not supported!!") + torch.save( + {"uncond_prompt_embeds": null_token_emb, "uncond_prompt_embeds_mask": null_tokens.attention_mask}, + null_embed_path, + ) + del null_token_emb + del null_tokens + flush() + + os.environ["AUTOCAST_LINEAR_ATTN"] = "true" if config.model.autocast_linear_attn else "false" + + # Build scheduler + train_diffusion = Scheduler( + str(config.scheduler.train_sampling_steps), + noise_schedule=config.scheduler.noise_schedule, + predict_v=config.scheduler.predict_v, + snr=config.train.snr_loss, + flow_shift=config.scheduler.flow_shift, + ) + logger.info(f"v-prediction: {config.scheduler.predict_v}") + logger.info(f"noise schedule: {config.scheduler.noise_schedule}") + if "flow" in config.scheduler.noise_schedule: + logger.info(f"flow shift: {config.scheduler.flow_shift}") + if config.scheduler.weighting_scheme in ["logit_normal", "mode"]: + logger.info(f"flow weighting: {config.scheduler.weighting_scheme}") + logger.info(f"logit-mean: {config.scheduler.logit_mean}") + logger.info(f"logit-std: {config.scheduler.logit_std}") + + # Build models + image_size = config.model.image_size + latent_size = int(image_size) // config.vae.vae_downsample_rate + num_conditions = len(config.data.datasets[0]["conditions"]) + logger.info(f"Number of conditions: {num_conditions}") + model_kwargs = { + "config": config, + "in_channels": config.vae.vae_latent_dim, + "mlp_ratio": config.model.mlp_ratio, + "caption_channels": text_embed_dim, + "pe_interpolation": config.model.pe_interpolation, + "model_max_length": max_length, + "qk_norm": config.model.qk_norm, + "y_norm": config.text_encoder.y_norm, + "attn_type": config.model.attn_type, + "y_norm_scale_factor": config.text_encoder.y_norm_scale_factor, + "mlp_acts": list(config.model.mlp_acts), + "use_pe": config.model.use_pe, + "linear_head_dim": config.model.linear_head_dim, + "num_conditions": num_conditions, + } + model = build_model( + config.model.model, + config.train.grad_checkpointing, + getattr(config.model, "fp32_attention", False), + input_size=latent_size, + **model_kwargs, + ).train() + logger.info( + colored( + f"{model.__class__.__name__}:{config.model.model}, " + f"Model Parameters: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M", + "green", + attrs=["bold"], + ) + ) + + # Load weights + load_from = True + state_dict = None + if args.resume_from or config.model.resume_from: + load_from = False + config.model.resume_from = dict( + checkpoint=args.resume_from or config.model.resume_from, + load_ema=False, + resume_optimizer=True, + resume_lr_scheduler=True, + ) + if args.load_from is not None: + config.model.load_from = args.load_from # type: ignore + if config.model.load_from is not None and load_from: + state_dict, _, missing, unexpected, _ = load_checkpoint( + config.model.load_from, + model, + load_ema=config.model.resume_from.get("load_ema", False), + null_embed_path=null_embed_path, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + # Build dataset + dataset = [build_dataset(d, tokenizer=tokenizer) for d in config.data.datasets] + dataset = RandomConcatJodiDataset(dataset) + logger.info(f"Dataset size: {len(dataset)}") + accelerator.wait_for_everyone() + + # Build dataloader + num_replicas = int(os.environ["WORLD_SIZE"]) + rank = int(os.environ["RANK"]) + sampler = DistributedRangedSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=True) + batch_sampler = AspectRatioBatchSampler( + sampler=sampler, + dataset=dataset, + batch_size=config.train.train_batch_size, + aspect_ratios=dataset.aspect_ratio, + ratio_nums=dataset.ratio_nums, + drop_last=True, + config=config, + caching=args.caching, + ) + train_dataloader = build_dataloader( + dataset, + batch_sampler=batch_sampler, + num_workers=config.train.num_workers, + ) + train_dataloader_len = len(train_dataloader) + + # Build optimizer and lr scheduler + lr_scale_ratio = 1 + if getattr(config.train, "auto_lr", None): + lr_scale_ratio = auto_scale_lr( + config.train.train_batch_size * get_world_size() * config.train.gradient_accumulation_steps, + config.train.optimizer, + **config.train.auto_lr, + ) + optimizer = build_optimizer(model, config.train.optimizer) + if config.train.lr_schedule_args and config.train.lr_schedule_args.get("num_warmup_steps", None): + config.train.lr_schedule_args["num_warmup_steps"] = ( + config.train.lr_schedule_args["num_warmup_steps"] * num_replicas + ) + lr_scheduler = build_lr_scheduler(config.train, optimizer, train_dataloader, lr_scale_ratio) + logger.info(f"{colored(f'Basic Setting: ', 'green', attrs=['bold'])}") + logger.info(f"lr: {config.train.optimizer['lr']:.5f}") + logger.info(f"bs: {config.train.train_batch_size}") + logger.info(f"gc: {config.train.grad_checkpointing}") + logger.info(f"gc_accum_step: {config.train.gradient_accumulation_steps}") + logger.info(f"qk norm: {config.model.qk_norm}") + logger.info(f"fp32 attn: {config.model.fp32_attention}") + logger.info(f"attn type: {config.model.attn_type}") + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + logger.info(f"precision: {config.model.mixed_precision}") + + timestamp = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + + if accelerator.is_main_process: + tracker_config = dict(vars(config)) + try: + accelerator.init_trackers(args.tracker_project_name, tracker_config) + except: + accelerator.init_trackers(f"tb_{timestamp}") + + start_epoch = 0 + start_step = 0 + total_steps = train_dataloader_len * config.train.num_epochs + + # Resume training + if config.model.resume_from is not None and config.model.resume_from["checkpoint"] is not None: + rng_state = None + ckpt_path = osp.join(config.work_dir, "checkpoints") + check_flag = osp.exists(ckpt_path) and len(os.listdir(ckpt_path)) != 0 + if config.model.resume_from["checkpoint"] == "latest": + if check_flag: + checkpoints = os.listdir(ckpt_path) + if "latest.pth" in checkpoints and osp.exists(osp.join(ckpt_path, "latest.pth")): + config.model.resume_from["checkpoint"] = osp.realpath(osp.join(ckpt_path, "latest.pth")) + else: + checkpoints = [i for i in checkpoints if i.startswith("epoch_")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.replace(".pth", "").split("_")[3])) + config.model.resume_from["checkpoint"] = osp.join(ckpt_path, checkpoints[-1]) + else: + config.model.resume_from["checkpoint"] = config.model.load_from + + if config.model.resume_from["checkpoint"] is not None: + ignore_keys = [] + if args.extend_to_new_domain: + ignore_keys = ["domain_embedding"] + state_dict, _, missing, unexpected, rng_state = load_checkpoint( + **config.model.resume_from, + model=model, + optimizer=optimizer if check_flag else None, + lr_scheduler=lr_scheduler if check_flag else None, + null_embed_path=null_embed_path, + ignore_keys=ignore_keys, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + path = osp.basename(config.model.resume_from["checkpoint"]) + try: + start_epoch = int(path.replace(".pth", "").split("_")[1]) - 1 + start_step = int(path.replace(".pth", "").split("_")[3]) + except: + pass + + # resume randomise + if rng_state: + logger.info("resuming randomise") + torch.set_rng_state(rng_state["torch"]) + np.random.set_state(rng_state["numpy"]) + random.setstate(rng_state["python"]) + generator.set_state(rng_state["generator"]) # resume generator status + try: + torch.cuda.set_rng_state_all(rng_state["torch_cuda"]) + except: + logger.warning("Failed to resume torch_cuda rng state") + + if state_dict is not None: + # resume from sana: copy x_embedder and final_layer + if args.resume_from_sana: + for x_embedder in model.x_embedders: + x_embedder.proj.weight.data.copy_(state_dict["x_embedder.proj.weight"]) + x_embedder.proj.bias.data.copy_(state_dict["x_embedder.proj.bias"]) + model.final_layers[0].linear.weight.data.copy_(state_dict["final_layer.linear.weight"]) + model.final_layers[0].linear.bias.data.copy_(state_dict["final_layer.linear.bias"]) + model.final_layers[0].scale_shift_table.data.copy_(state_dict["final_layer.scale_shift_table"]) + logger.info("Copied x_embedder and final_layer weights from pretrained model.") + # extend to new domain: copy x_embedder and final_layer from specified domain, copy domain embedding + elif args.extend_to_new_domain: + num_domains = state_dict["domain_embedding"].shape[0] + model.domain_embedding[:num_domains].data.copy_(state_dict["domain_embedding"]) + idx = args.extend_to_new_domain_copy_id + if not isinstance(idx, list): + idx = [idx] + for i in range(len(idx)): + model.x_embedders[num_domains+i].proj.weight.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.weight"]) + model.x_embedders[num_domains+i].proj.bias.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.bias"]) + model.final_layers[num_domains+i].linear.weight.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.weight"]) + model.final_layers[num_domains+i].linear.bias.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.bias"]) + model.final_layers[num_domains+i].scale_shift_table.data.copy_(state_dict[f"final_layers.{idx[i]}.scale_shift_table"]) + logger.info("Copied x_embedder, final_layer and domain embedding weights from pretrained model.") + # resume from previous checkpoint: state dicts must match + else: + #assert len(missing) == 0, f"Missing keys: {missing}" # noqa + #assert len(unexpected) == 0, f"Unexpected keys: {unexpected}" # noqa + logger.info("Successfully loaded pretrained model.") + + # Prepare everything + # There is no specific order to remember, you just need to unpack the + # objects in the same order you gave them to the prepare method. + model = accelerator.prepare(model) + optimizer, lr_scheduler = accelerator.prepare(optimizer, lr_scheduler) + + # Start Training + train( + config=config, + args=args, + accelerator=accelerator, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + train_dataloader=train_dataloader, + train_diffusion=train_diffusion, + logger=logger, + ) + + +if __name__ == "__main__": + main() diff --git a/train2.py b/train2.py new file mode 100644 index 0000000000000000000000000000000000000000..1788e4e5be961f9d22e17a6b241d2eb818dde6ab --- /dev/null +++ b/train2.py @@ -0,0 +1,709 @@ +# This file is modified from https://github.com/NVlabs/Sana + +# Copyright 2024 NVIDIA CORPORATION & AFFILIATES +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +sys.path.append(os.path.abspath('.')) + +import datetime +import hashlib +import itertools +import os.path as osp +import random +import time +import warnings +from pathlib import Path + +import numpy as np +import pyrallis +import torch +from accelerate import Accelerator, InitProcessGroupKwargs +from PIL import Image +from termcolor import colored + +warnings.filterwarnings("ignore") # ignore warning + + +from diffusion import DPMS, Scheduler +from data.builder import build_dataloader, build_dataset +from data.datasets.jodi_dataset import RandomConcatJodiDataset +from data.sampler import DistributedRangedSampler +from model.builder import build_model, get_tokenizer_and_text_encoder, get_vae, vae_decode, vae_encode +from diffusion.respace import compute_density_for_timestep_sampling +from utils.checkpoint import load_checkpoint, save_checkpoint +from utils.config import JodiConfig +from utils.data_sampler import AspectRatioBatchSampler +from utils.dist_utils import flush, get_world_size +from utils.logger import LogBuffer, get_root_logger +from utils.lr_scheduler import build_lr_scheduler +from utils.misc import DebugUnderflowOverflow, init_random_seed, set_random_seed +from utils.optimizer import auto_scale_lr, build_optimizer + +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +def detect_parameters(model): + print("\n========== Unused Parameter Debug Report ==========") + unused_modules = set() + total = 0 + unused = 0 + for name, param in model.named_parameters(): + total += 1 + if param.requires_grad: + if param.grad is None: + unused += 1 + # 找到该参数的上级模块(父模块名) + module_name = name.split('.')[0] + unused_modules.add(module_name) + print(f"❌ {name:80s} | grad=None") + + print(f"\nSummary: {unused}/{total} parameters had no grad.") + print("Modules possibly skipped in forward:") + for m in sorted(unused_modules): + print(f" - {m}") + print("===================================================\n") + +def log_validation(accelerator, config, model, logger, step, device): + torch.cuda.empty_cache() + model = accelerator.unwrap_model(model).eval() + + #chi_prompt = "\n".join(config.text_encoder.chi_prompt) + #num_chi_prompt_tokens = len(tokenizer.encode(chi_prompt)) + #max_length_all = (num_chi_prompt_tokens + config.text_encoder.model_max_length - 2) + + logger.info("Running validation... ") + + # Run sampling + latents, logits = [], [] + for prompt in validation_prompts: + max_length=300 + z = torch.randn(1, 1 + num_conditions, config.vae.vae_latent_dim, latent_size, latent_size, device=device) + + #text = chi_prompt + prompt + + print(prompt) + + cond_tokens = tokenizer( + [prompt], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + #select_index = [0] + list(range(-config.text_encoder.model_max_length + 1, 0)) + + null_tokens = tokenizer( + [''], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + + prompt_tokens = tokenizer( + [prompt], + padding="longest", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + caption_embs = prompt_tokens.input_ids + emb_masks = prompt_tokens.attention_mask + + max_length = caption_embs.shape[-1] + negative_tokens = tokenizer( + [''], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt").to(accelerator.device) + + neg_attention_masks = negative_tokens.attention_mask + null_y = negative_tokens.input_ids + + prompt_c = text_encoder_fix(cond_tokens.input_ids, attention_mask=cond_tokens.attention_mask)[0] #[:, select_index] + null_c = text_encoder_fix(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + + role = torch.zeros((1, 1 + num_conditions), dtype=torch.long, device=device) + model_kwargs = dict(mask=cond_tokens.attention_mask, role=role) + + dpm_solver = DPMS( + model.forward, + cfg_scale=4.5, + condition=prompt_c, + uncondition=null_c, + model_type="flow", + model_kwargs=model_kwargs, + schedule="FLOW", + #prompt_len=caption_embs.shape[-1] + ) + + print(caption_embs.shape, null_y.shape) + + denoised = dpm_solver.sample( + z, + steps=100, + order=2, + skip_type="time_uniform_flow", + method="multistep", + flow_shift=config.scheduler.flow_shift, + ) + latents.append(denoised) + #logits.append(autoregressived) + + torch.cuda.empty_cache() + + # Decode latents + image_logs = [] + for prompt, latent in zip(validation_prompts, latents): + latent = latent.to(torch.float16) + latent = torch.unbind(latent, dim=1) + #caption = tokenizer.decode(logit[0].long().tolist(), skip_special_tokens=True) + #print(f'caption:{caption}') + images = [] + for lat in latent: + sample = vae_decode(config.vae.vae_type, vae, lat) + sample = torch.clamp(127.5 * sample + 128.0, 0, 255) + sample = sample.permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()[0] + images.append(Image.fromarray(sample)) + image_logs.append({"validation_prompt": prompt, "images": images}) + + # Save images + def concatenate_images(image_caption, images_per_row=5, image_format="webp"): + import io + images = list(itertools.chain.from_iterable([log["images"] for log in image_caption])) + if images[0].size[0] > 1024: + images = [image.resize((1024, 1024)) for image in images] + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + total_height = sum(heights[i : i + images_per_row][0] for i in range(0, len(images), images_per_row)) + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_images = images[i : i + images_per_row] + x_offset = 0 + for img in row_images: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[i] + webp_image_bytes = io.BytesIO() + new_im.save(webp_image_bytes, format=image_format) + webp_image_bytes.seek(0) + new_im = Image.open(webp_image_bytes) + return new_im + + file_format = "png" # "webp" + local_vis_save_path = osp.join(config.work_dir, "log_vis") + os.umask(0o000) + os.makedirs(local_vis_save_path, exist_ok=True) + concatenated_image = concatenate_images(image_logs, images_per_row=num_conditions+1, image_format=file_format) + save_path = osp.join(local_vis_save_path, f"vis_{step}.{file_format}") + concatenated_image.save(save_path) + + model.train() + flush() + return image_logs + + +def train(config, args, accelerator, model, optimizer, lr_scheduler, train_dataloader, train_diffusion, logger): + if getattr(config.train, "debug_nan", False): + DebugUnderflowOverflow(model) + logger.info("NaN debugger registered. Start to detect overflow during training.") + log_buffer = LogBuffer() + + global_step = start_step + 1 + skip_step = max(config.train.skip_step, global_step) % train_dataloader_len + skip_step = skip_step if skip_step < (train_dataloader_len - 20) else 0 + loss_nan_timer = 0 + + # Now you train the model + for epoch in range(start_epoch + 1, config.train.num_epochs + 1): + time_start, last_tic = time.time(), time.time() + sampler = train_dataloader.batch_sampler.sampler + sampler.set_epoch(epoch) + sampler.set_start(max((skip_step - 1) * config.train.train_batch_size, 0)) + if skip_step > 1 and accelerator.is_main_process: + logger.info(f"Skipped Steps: {skip_step}") + skip_step = 1 + data_time_start = time.time() + data_time_all = 0 + lm_time_all = 0 + vae_time_all = 0 + model_time_all = 0 + for step, batch in enumerate(train_dataloader): + # NOTE: train_dataloader is infinite since batch_sampler is infinite + # therefore, we actually stay in this for loop until the end of training + accelerator.wait_for_everyone() + data_time_all += time.time() - data_time_start + vae_time_start = time.time() + with torch.no_grad(): + with torch.amp.autocast( + "cuda", + enabled=(config.model.mixed_precision == "fp16" or config.model.mixed_precision == "bf16"), + ): + z = [] + imgs = torch.unbind(batch["img"], dim=1) + for img in imgs: + z.append(vae_encode( + config.vae.vae_type, vae, img, config.vae.sample_posterior, accelerator.device + )) + z = torch.stack(z, dim=1) + + accelerator.wait_for_everyone() + vae_time_all += time.time() - vae_time_start + + clean_images = z + + lm_time_start = time.time() + + y = batch["inputs"].to(accelerator.device) + labels = batch["targets"].to(accelerator.device) + y_mask = batch["masks"].to(accelerator.device) + p_len = batch["prompt_length"].to(accelerator.device) + c_len = batch["caption_length"].to(accelerator.device) + + # Sample a random timestep for each image + bs = clean_images.shape[0] + timesteps = torch.randint( + 0, config.scheduler.train_sampling_steps, (bs,), device=clean_images.device + ).long() + ratio = timesteps / config.scheduler.train_sampling_steps + if config.scheduler.weighting_scheme in ["logit_normal"]: + # adapting from diffusers.training_utils + u = compute_density_for_timestep_sampling( + weighting_scheme=config.scheduler.weighting_scheme, + batch_size=bs, + logit_mean=config.scheduler.logit_mean, + logit_std=config.scheduler.logit_std, + mode_scale=None, # not used + ) + timesteps = (u * config.scheduler.train_sampling_steps).long().to(clean_images.device) + + # Get the role + role = batch["role"] + assert role.shape == (bs, 1+num_conditions) + + with torch.no_grad(): + prompt_ids = batch["prompt_ids"].to(accelerator.device) + prompt_mask = batch["prompt_masks"].to(accelerator.device) + + c = text_encoder_fix(prompt_ids, attention_mask=prompt_mask)[0] + c_mask = prompt_mask + + #c_mask = txt_tokens.attention_maskgrad_norm = None + + accelerator.wait_for_everyone() + + global_step += 1 + + if (global_step + 1) % config.train.eval_sampling_steps == 0 or (step + 1) == 1: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + img_txt_logs = log_validation( + accelerator=accelerator, + config=config, + model=model, + logger=logger, + step=global_step, + device=accelerator.device, + ) + local_vis_save_path = osp.join(config.work_dir, "log_vis") + exit() + + + data_time_start = time.time() + + if epoch % config.train.save_model_epochs == 0 or epoch == config.train.num_epochs and not config.debug: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + os.umask(0o000) + save_checkpoint( + osp.join(config.work_dir, "checkpoints"), + epoch=epoch, + step=global_step, + model=accelerator.unwrap_model(model), + optimizer=optimizer, + lr_scheduler=lr_scheduler, + generator=generator, + add_symlink=True, + ) + + accelerator.wait_for_everyone() + + +@pyrallis.wrap() +def main(cfg: JodiConfig) -> None: + global train_dataloader_len, start_epoch, start_step, vae, generator, num_replicas, rank, training_start_time + global text_encoder, tokenizer, text_encoder_fix + global max_length, validation_prompts, latent_size, valid_prompt_embed_suffix, null_embed_path + global image_size, cache_file, total_steps + global num_conditions + + config = cfg + args = cfg + + training_start_time = time.time() + + if args.debug: + config.train.log_interval = 1 + config.train.train_batch_size = min(64, config.train.train_batch_size) + args.report_to = "tensorboard" + + os.umask(0o000) + os.makedirs(config.work_dir, exist_ok=True) + + # Initialize accelerator + init_handler = InitProcessGroupKwargs() + init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug + accelerator = Accelerator( + mixed_precision=config.model.mixed_precision, + gradient_accumulation_steps=config.train.gradient_accumulation_steps, + log_with=args.report_to, + project_dir=osp.join(config.work_dir, "logs"), + kwargs_handlers=[init_handler], + ) + + validation_prompts = config.train.validation_prompts + + # Initialize tensorboard logging + log_name = "train_log.log" + logger = get_root_logger(osp.join(config.work_dir, log_name)) + logger.info(accelerator.state) + + # Set random seed + config.train.seed = init_random_seed(getattr(config.train, "seed", None)) + set_random_seed(config.train.seed + int(os.environ["LOCAL_RANK"])) + generator = torch.Generator(device="cpu").manual_seed(config.train.seed) + + if accelerator.is_main_process: + pyrallis.dump(config, open(osp.join(config.work_dir, "config.yaml"), "w"), sort_keys=False, indent=4) + + # logger.info(f"Config: \n{config}") + logger.info(f"World_size: {get_world_size()}") + logger.info(f"seed: {config.train.seed}") + logger.info(f"Initializing: DDP for training") + + # Get pretrained VAE + vae = get_vae(config.vae.vae_type, config.vae.vae_pretrained, accelerator.device).to(torch.float16) + logger.info(f"vae type: {config.vae.vae_type}") + + # Get tokenizer and text encoder + tokenizer, text_encoder, text_encoder_fix = get_tokenizer_and_text_encoder(config.text_encoder.text_encoder_name, accelerator.device) + + import copy + text_encoder_fix = copy.deepcopy(text_encoder_fix).to(accelerator.device) + + from peft import LoraConfig, get_peft_model, TaskType + from peft.tuners.lora import LoraModel + + lora_config = LoraConfig( + r=16, # 低秩分解维度 + lora_alpha=32, # 缩放系数 + target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], + lora_dropout=0.05, + bias="none", + task_type=TaskType.CAUSAL_LM, # 🔥 核心:Causal Language Model 类型 + ) + + text_encoder = get_peft_model(text_encoder, lora_config) + + for name, module in text_encoder_fix.named_modules(): + if "lora" in name.lower(): + print("⚠️ Found LoRA module in fixed_text_encoder:", name, "→", type(module)) + + #for i, layer in enumerate(text_encoder.model.layers): + # LoraModel.inject_adapter(layer, lora_config, adapter_name="default") + # print(f"✅ injected LoRA into layer {i}") + + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + + # Compute and save null embedding and validation prompts embeddings + os.makedirs(config.train.null_embed_root, exist_ok=True) + text_embed_dim = text_encoder.config.hidden_size + chi_prompt = "\n".join(config.text_encoder.chi_prompt) + # logger.info(f"Complex Human Instruct: {chi_prompt}") + max_length = config.text_encoder.model_max_length + null_embed_path = osp.join( + config.train.null_embed_root, + f"null_embed_diffusers_{config.text_encoder.text_encoder_name}_{max_length}token_{text_embed_dim}.pth", + ) + if config.train.visualize and len(config.train.validation_prompts): + # Preparing embeddings for visualization. We put it here for saving GPU memory + null_tokens = tokenizer( + "", max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(accelerator.device) + if "T5" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + elif "gemma" in config.text_encoder.text_encoder_name or "Qwen" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + else: + raise ValueError(f"{config.text_encoder.text_encoder_name} is not supported!!") + torch.save( + {"uncond_prompt_embeds": null_token_emb, "uncond_prompt_embeds_mask": null_tokens.attention_mask}, + null_embed_path, + ) + del null_token_emb + del null_tokens + flush() + + os.environ["AUTOCAST_LINEAR_ATTN"] = "true" if config.model.autocast_linear_attn else "false" + + # Build scheduler + train_diffusion = Scheduler( + str(config.scheduler.train_sampling_steps), + noise_schedule=config.scheduler.noise_schedule, + predict_v=config.scheduler.predict_v, + snr=config.train.snr_loss, + flow_shift=config.scheduler.flow_shift, + ) + logger.info(f"v-prediction: {config.scheduler.predict_v}") + logger.info(f"noise schedule: {config.scheduler.noise_schedule}") + if "flow" in config.scheduler.noise_schedule: + logger.info(f"flow shift: {config.scheduler.flow_shift}") + if config.scheduler.weighting_scheme in ["logit_normal", "mode"]: + logger.info(f"flow weighting: {config.scheduler.weighting_scheme}") + logger.info(f"logit-mean: {config.scheduler.logit_mean}") + logger.info(f"logit-std: {config.scheduler.logit_std}") + + # Build models + image_size = config.model.image_size + latent_size = int(image_size) // config.vae.vae_downsample_rate + num_conditions = len(config.data.datasets[0]["conditions"]) + logger.info(f"Number of conditions: {num_conditions}") + model_kwargs = { + "config": config, + "in_channels": config.vae.vae_latent_dim, + "mlp_ratio": config.model.mlp_ratio, + "caption_channels": text_embed_dim, + "pe_interpolation": config.model.pe_interpolation, + "model_max_length": max_length, + "qk_norm": config.model.qk_norm, + "y_norm": config.text_encoder.y_norm, + "attn_type": config.model.attn_type, + "y_norm_scale_factor": config.text_encoder.y_norm_scale_factor, + "mlp_acts": list(config.model.mlp_acts), + "use_pe": config.model.use_pe, + "linear_head_dim": config.model.linear_head_dim, + "num_conditions": num_conditions, + } + model = build_model( + config.model.model, + config.train.grad_checkpointing, + getattr(config.model, "fp32_attention", False), + input_size=latent_size, + **model_kwargs, + ).train() + logger.info( + colored( + f"{model.__class__.__name__}:{config.model.model}, " + f"Model Parameters: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M", + "green", + attrs=["bold"], + ) + ) + + # Load weights + load_from = True + state_dict = None + if args.resume_from or config.model.resume_from: + load_from = False + config.model.resume_from = dict( + checkpoint=args.resume_from or config.model.resume_from, + load_ema=False, + resume_optimizer=True, + resume_lr_scheduler=True, + ) + if args.load_from is not None: + config.model.load_from = args.load_from # type: ignore + if config.model.load_from is not None and load_from: + state_dict, _, missing, unexpected, _ = load_checkpoint( + config.model.load_from, + model, + load_ema=config.model.resume_from.get("load_ema", False), + null_embed_path=null_embed_path, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + # Build dataset + dataset = [build_dataset(d, tokenizer=tokenizer) for d in config.data.datasets] + dataset = RandomConcatJodiDataset(dataset) + logger.info(f"Dataset size: {len(dataset)}") + accelerator.wait_for_everyone() + + # Build dataloader + num_replicas = int(os.environ["WORLD_SIZE"]) + rank = int(os.environ["RANK"]) + sampler = DistributedRangedSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=True) + batch_sampler = AspectRatioBatchSampler( + sampler=sampler, + dataset=dataset, + batch_size=config.train.train_batch_size, + aspect_ratios=dataset.aspect_ratio, + ratio_nums=dataset.ratio_nums, + drop_last=True, + config=config, + caching=args.caching, + ) + train_dataloader = build_dataloader( + dataset, + batch_sampler=batch_sampler, + num_workers=config.train.num_workers, + ) + train_dataloader_len = len(train_dataloader) + + # Build optimizer and lr scheduler + lr_scale_ratio = 1 + if getattr(config.train, "auto_lr", None): + lr_scale_ratio = auto_scale_lr( + config.train.train_batch_size * get_world_size() * config.train.gradient_accumulation_steps, + config.train.optimizer, + **config.train.auto_lr, + ) + optimizer = build_optimizer(model, config.train.optimizer) + if config.train.lr_schedule_args and config.train.lr_schedule_args.get("num_warmup_steps", None): + config.train.lr_schedule_args["num_warmup_steps"] = ( + config.train.lr_schedule_args["num_warmup_steps"] * num_replicas + ) + lr_scheduler = build_lr_scheduler(config.train, optimizer, train_dataloader, lr_scale_ratio) + logger.info(f"{colored(f'Basic Setting: ', 'green', attrs=['bold'])}") + logger.info(f"lr: {config.train.optimizer['lr']:.5f}") + logger.info(f"bs: {config.train.train_batch_size}") + logger.info(f"gc: {config.train.grad_checkpointing}") + logger.info(f"gc_accum_step: {config.train.gradient_accumulation_steps}") + logger.info(f"qk norm: {config.model.qk_norm}") + logger.info(f"fp32 attn: {config.model.fp32_attention}") + logger.info(f"attn type: {config.model.attn_type}") + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + logger.info(f"precision: {config.model.mixed_precision}") + + timestamp = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + + if accelerator.is_main_process: + tracker_config = dict(vars(config)) + try: + accelerator.init_trackers(args.tracker_project_name, tracker_config) + except: + accelerator.init_trackers(f"tb_{timestamp}") + + start_epoch = 0 + start_step = 0 + total_steps = train_dataloader_len * config.train.num_epochs + + # Resume training + if config.model.resume_from is not None and config.model.resume_from["checkpoint"] is not None: + rng_state = None + ckpt_path = osp.join(config.work_dir, "checkpoints") + check_flag = osp.exists(ckpt_path) and len(os.listdir(ckpt_path)) != 0 + if config.model.resume_from["checkpoint"] == "latest": + if check_flag: + checkpoints = os.listdir(ckpt_path) + if "latest.pth" in checkpoints and osp.exists(osp.join(ckpt_path, "latest.pth")): + config.model.resume_from["checkpoint"] = osp.realpath(osp.join(ckpt_path, "latest.pth")) + else: + checkpoints = [i for i in checkpoints if i.startswith("epoch_")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.replace(".pth", "").split("_")[3])) + config.model.resume_from["checkpoint"] = osp.join(ckpt_path, checkpoints[-1]) + else: + config.model.resume_from["checkpoint"] = config.model.load_from + + if config.model.resume_from["checkpoint"] is not None: + ignore_keys = [] + if args.extend_to_new_domain: + ignore_keys = ["domain_embedding"] + state_dict, _, missing, unexpected, rng_state = load_checkpoint( + **config.model.resume_from, + model=model, + optimizer=optimizer if check_flag else None, + lr_scheduler=lr_scheduler if check_flag else None, + null_embed_path=null_embed_path, + ignore_keys=ignore_keys, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + path = osp.basename(config.model.resume_from["checkpoint"]) + try: + start_epoch = int(path.replace(".pth", "").split("_")[1]) - 1 + start_step = int(path.replace(".pth", "").split("_")[3]) + except: + pass + + # resume randomise + if rng_state: + logger.info("resuming randomise") + torch.set_rng_state(rng_state["torch"]) + np.random.set_state(rng_state["numpy"]) + random.setstate(rng_state["python"]) + generator.set_state(rng_state["generator"]) # resume generator status + try: + torch.cuda.set_rng_state_all(rng_state["torch_cuda"]) + except: + logger.warning("Failed to resume torch_cuda rng state") + + if state_dict is not None: + # resume from sana: copy x_embedder and final_layer + if args.resume_from_sana: + for x_embedder in model.x_embedders: + x_embedder.proj.weight.data.copy_(state_dict["x_embedder.proj.weight"]) + x_embedder.proj.bias.data.copy_(state_dict["x_embedder.proj.bias"]) + model.final_layers[0].linear.weight.data.copy_(state_dict["final_layer.linear.weight"]) + model.final_layers[0].linear.bias.data.copy_(state_dict["final_layer.linear.bias"]) + model.final_layers[0].scale_shift_table.data.copy_(state_dict["final_layer.scale_shift_table"]) + logger.info("Copied x_embedder and final_layer weights from pretrained model.") + # extend to new domain: copy x_embedder and final_layer from specified domain, copy domain embedding + elif args.extend_to_new_domain: + num_domains = state_dict["domain_embedding"].shape[0] + model.domain_embedding[:num_domains].data.copy_(state_dict["domain_embedding"]) + idx = args.extend_to_new_domain_copy_id + if not isinstance(idx, list): + idx = [idx] + for i in range(len(idx)): + model.x_embedders[num_domains+i].proj.weight.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.weight"]) + model.x_embedders[num_domains+i].proj.bias.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.bias"]) + model.final_layers[num_domains+i].linear.weight.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.weight"]) + model.final_layers[num_domains+i].linear.bias.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.bias"]) + model.final_layers[num_domains+i].scale_shift_table.data.copy_(state_dict[f"final_layers.{idx[i]}.scale_shift_table"]) + logger.info("Copied x_embedder, final_layer and domain embedding weights from pretrained model.") + # resume from previous checkpoint: state dicts must match + else: + #assert len(missing) == 0, f"Missing keys: {missing}" # noqa + #assert len(unexpected) == 0, f"Unexpected keys: {unexpected}" # noqa + logger.info("Successfully loaded pretrained model.") + + # Prepare everything + # There is no specific order to remember, you just need to unpack the + # objects in the same order you gave them to the prepare method. + model = accelerator.prepare(model) + optimizer, lr_scheduler = accelerator.prepare(optimizer, lr_scheduler) + + # Start Training + train( + config=config, + args=args, + accelerator=accelerator, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + train_dataloader=train_dataloader, + train_diffusion=train_diffusion, + logger=logger, + ) + + +if __name__ == "__main__": + main() diff --git a/train_from_jodi.sh b/train_from_jodi.sh new file mode 100644 index 0000000000000000000000000000000000000000..728ffde633894d0c349220c1209b5f01ad606eb1 --- /dev/null +++ b/train_from_jodi.sh @@ -0,0 +1,14 @@ +export CUDA_LAUNCH_BLOCKING=1 +export CUDA_VISIBLE_DEVICES="4,5" +export NGPU=2 +export WORK_DIR=./output/train_from_jodi +export LOAD_FROM=hf://VIPL-GENUN/Jodi/Jodi.pth + +torchrun --nproc_per_node=$NGPU --master_port=21542 scripts/train.py \ + --config_path $1 \ + --model.load_from $LOAD_FROM \ + --work_dir $WORK_DIR \ + --resume_from latest \ + --resume_from_sana false \ + --train.train_batch_size 8 \ + --model.use_pe true diff --git a/train_from_sana.sh b/train_from_sana.sh new file mode 100644 index 0000000000000000000000000000000000000000..f34f995e29778cf58e52129afc3fde69b7ea8933 --- /dev/null +++ b/train_from_sana.sh @@ -0,0 +1,13 @@ +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" +export NGPU=8 +export WORK_DIR=./output/train_from_sana +export LOAD_FROM=hf://Efficient-Large-Model/Sana_1600M_1024px_BF16/checkpoints/Sana_1600M_1024px_BF16.pth + +torchrun --nproc_per_node=$NGPU --master_port=21540 scripts/train.py \ + --config_path $1 \ + --model.load_from $LOAD_FROM \ + --work_dir $WORK_DIR \ + --resume_from latest \ + --resume_from_sana true \ + --train.train_batch_size 4 \ + --model.use_pe true diff --git a/train_o.py b/train_o.py new file mode 100644 index 0000000000000000000000000000000000000000..4736974585507673ae73cc735bb01de4bb380908 --- /dev/null +++ b/train_o.py @@ -0,0 +1,781 @@ +# This file is modified from https://github.com/NVlabs/Sana + +# Copyright 2024 NVIDIA CORPORATION & AFFILIATES +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +sys.path.append(os.path.abspath('.')) + +import datetime +import hashlib +import itertools +import os.path as osp +import random +import time +import warnings +from pathlib import Path + +import numpy as np +import pyrallis +import torch +from accelerate import Accelerator, InitProcessGroupKwargs +from PIL import Image +from termcolor import colored + +warnings.filterwarnings("ignore") # ignore warning + + +from diffusion import DPMS, Scheduler +from data.builder import build_dataloader, build_dataset +from data.datasets.jodi_dataset import RandomConcatJodiDataset +from data.sampler import DistributedRangedSampler +from model.builder import build_model, get_tokenizer_and_text_encoder, get_vae, vae_decode, vae_encode +from diffusion.respace import compute_density_for_timestep_sampling +from utils.checkpoint import load_checkpoint, save_checkpoint +from utils.config import JodiConfig +from utils.data_sampler import AspectRatioBatchSampler +from utils.dist_utils import flush, get_world_size +from utils.logger import LogBuffer, get_root_logger +from utils.lr_scheduler import build_lr_scheduler +from utils.misc import DebugUnderflowOverflow, init_random_seed, set_random_seed +from utils.optimizer import auto_scale_lr, build_optimizer + +os.environ["TOKENIZERS_PARALLELISM"] = "false" + + +@torch.inference_mode() +def log_validation(accelerator, config, model, logger, step, device): + torch.cuda.empty_cache() + model = accelerator.unwrap_model(model).eval() + null_y = torch.load(null_embed_path, map_location="cpu") + null_y = null_y["uncond_prompt_embeds"].to(device) + + logger.info("Running validation... ") + + # Run sampling + latents = [] + for prompt in validation_prompts: + z = torch.randn(1, 1 + num_conditions, config.vae.vae_latent_dim, latent_size, latent_size, device=device) + embed = torch.load( + osp.join(config.train.valid_prompt_embed_root, f"{prompt[:50]}_{valid_prompt_embed_suffix}"), + map_location="cpu", + ) + caption_embs, emb_masks = embed["caption_embeds"].to(device), embed["emb_mask"].to(device) + role = torch.zeros((1, 1 + num_conditions), dtype=torch.long, device=device) + model_kwargs = dict(mask=emb_masks, role=role) + dpm_solver = DPMS( + model.forward, + condition=caption_embs, + uncondition=null_y, + cfg_scale=4.5, + model_type="flow", + model_kwargs=model_kwargs, + schedule="FLOW", + ) + denoised = dpm_solver.sample( + z, + steps=20, + order=2, + skip_type="time_uniform_flow", + method="multistep", + flow_shift=config.scheduler.flow_shift, + ) + latents.append(denoised) + torch.cuda.empty_cache() + + # Decode latents + image_logs = [] + for prompt, latent in zip(validation_prompts, latents): + latent = latent.to(torch.float16) + latent = torch.unbind(latent, dim=1) + images = [] + for lat in latent: + sample = vae_decode(config.vae.vae_type, vae, lat) + sample = torch.clamp(127.5 * sample + 128.0, 0, 255) + sample = sample.permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()[0] + images.append(Image.fromarray(sample)) + image_logs.append({"validation_prompt": prompt, "images": images}) + + # Save images + def concatenate_images(image_caption, images_per_row=5, image_format="webp"): + import io + images = list(itertools.chain.from_iterable([log["images"] for log in image_caption])) + if images[0].size[0] > 1024: + images = [image.resize((1024, 1024)) for image in images] + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + total_height = sum(heights[i : i + images_per_row][0] for i in range(0, len(images), images_per_row)) + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_images = images[i : i + images_per_row] + x_offset = 0 + for img in row_images: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[i] + webp_image_bytes = io.BytesIO() + new_im.save(webp_image_bytes, format=image_format) + webp_image_bytes.seek(0) + new_im = Image.open(webp_image_bytes) + return new_im + + file_format = "png" # "webp" + local_vis_save_path = osp.join(config.work_dir, "log_vis") + os.umask(0o000) + os.makedirs(local_vis_save_path, exist_ok=True) + concatenated_image = concatenate_images(image_logs, images_per_row=num_conditions+1, image_format=file_format) + save_path = osp.join(local_vis_save_path, f"vis_{step}.{file_format}") + concatenated_image.save(save_path) + + model.train() + flush() + return image_logs + + +def train(config, args, accelerator, model, optimizer, lr_scheduler, train_dataloader, train_diffusion, logger): + if getattr(config.train, "debug_nan", False): + DebugUnderflowOverflow(model) + logger.info("NaN debugger registered. Start to detect overflow during training.") + log_buffer = LogBuffer() + + global_step = start_step + 1 + skip_step = max(config.train.skip_step, global_step) % train_dataloader_len + skip_step = skip_step if skip_step < (train_dataloader_len - 20) else 0 + loss_nan_timer = 0 + + # Now you train the model + for epoch in range(start_epoch + 1, config.train.num_epochs + 1): + time_start, last_tic = time.time(), time.time() + sampler = train_dataloader.batch_sampler.sampler + sampler.set_epoch(epoch) + sampler.set_start(max((skip_step - 1) * config.train.train_batch_size, 0)) + if skip_step > 1 and accelerator.is_main_process: + logger.info(f"Skipped Steps: {skip_step}") + skip_step = 1 + data_time_start = time.time() + data_time_all = 0 + lm_time_all = 0 + vae_time_all = 0 + model_time_all = 0 + for step, batch in enumerate(train_dataloader): + # NOTE: train_dataloader is infinite since batch_sampler is infinite + # therefore, we actually stay in this for loop until the end of training + accelerator.wait_for_everyone() + data_time_all += time.time() - data_time_start + vae_time_start = time.time() + with torch.no_grad(): + with torch.amp.autocast( + "cuda", + enabled=(config.model.mixed_precision == "fp16" or config.model.mixed_precision == "bf16"), + ): + z = [] + imgs = torch.unbind(batch["img"], dim=1) + for img in imgs: + z.append(vae_encode( + config.vae.vae_type, vae, img, config.vae.sample_posterior, accelerator.device + )) + z = torch.stack(z, dim=1) + + accelerator.wait_for_everyone() + vae_time_all += time.time() - vae_time_start + + clean_images = z + + lm_time_start = time.time() + if "T5" in config.text_encoder.text_encoder_name: + with torch.no_grad(): + txt_tokens = tokenizer( + batch["text"], max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(accelerator.device) + y = text_encoder(txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask)[0][:, None] + y_mask = txt_tokens.attention_mask[:, None, None] + elif ( + "gemma" in config.text_encoder.text_encoder_name or "Qwen" in config.text_encoder.text_encoder_name + ): + with torch.no_grad(): + chi_prompt = "\n".join(config.text_encoder.chi_prompt) + prompt = [chi_prompt + i for i in batch["text"]] + num_chi_prompt_tokens = len(tokenizer.encode(chi_prompt)) + max_length_all = ( + num_chi_prompt_tokens + config.text_encoder.model_max_length - 2 + ) # magic number 2: [bos], [_] + txt_tokens = tokenizer( + prompt, + padding="max_length", + max_length=max_length_all, + truncation=True, + return_tensors="pt", + ).to(accelerator.device) + select_index = [0] + list( + range(-config.text_encoder.model_max_length + 1, 0) + ) # first bos and end N-1 + y = text_encoder(txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask)[0][:, None][ + :, :, select_index + ] + y_mask = txt_tokens.attention_mask[:, None, None][:, :, :, select_index] + else: + print("error") + exit() + + # Sample a random timestep for each image + bs = clean_images.shape[0] + timesteps = torch.randint( + 0, config.scheduler.train_sampling_steps, (bs,), device=clean_images.device + ).long() + if config.scheduler.weighting_scheme in ["logit_normal"]: + # adapting from diffusers.training_utils + u = compute_density_for_timestep_sampling( + weighting_scheme=config.scheduler.weighting_scheme, + batch_size=bs, + logit_mean=config.scheduler.logit_mean, + logit_std=config.scheduler.logit_std, + mode_scale=None, # not used + ) + timesteps = (u * config.scheduler.train_sampling_steps).long().to(clean_images.device) + + # Get the role + role = batch["role"] + assert role.shape == (bs, 1+num_conditions) + + grad_norm = None + accelerator.wait_for_everyone() + lm_time_all += time.time() - lm_time_start + model_time_start = time.time() + with accelerator.accumulate(model): + # Predict the noise residual + optimizer.zero_grad() + loss_term = train_diffusion.training_losses( + model, clean_images, timesteps, + model_kwargs=dict(y=y, role=role, mask=y_mask, clean_x=clean_images), + ) + loss = loss_term["loss"].mean() + accelerator.backward(loss) + if accelerator.sync_gradients: + grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.train.gradient_clip) + optimizer.step() + lr_scheduler.step() + accelerator.wait_for_everyone() + model_time_all += time.time() - model_time_start + + if torch.any(torch.isnan(loss)): + loss_nan_timer += 1 + lr = lr_scheduler.get_last_lr()[0] + logs = {args.loss_report_name: accelerator.gather(loss).mean().item()} + if grad_norm is not None: + logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) + log_buffer.update(logs) + if (step + 1) % config.train.log_interval == 0 or (step + 1) == 1: + accelerator.wait_for_everyone() + t = (time.time() - last_tic) / config.train.log_interval + t_d = data_time_all / config.train.log_interval + t_m = model_time_all / config.train.log_interval + t_lm = lm_time_all / config.train.log_interval + t_vae = vae_time_all / config.train.log_interval + avg_time = (time.time() - time_start) / (step + 1) + eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - global_step - 1)))) + eta_epoch = str( + datetime.timedelta( + seconds=int( + avg_time + * (train_dataloader_len - sampler.step_start // config.train.train_batch_size - step - 1) + ) + ) + ) + log_buffer.average() + + current_step = ( + global_step - sampler.step_start // config.train.train_batch_size + ) % train_dataloader_len + current_step = train_dataloader_len if current_step == 0 else current_step + info = ( + f"Epoch: {epoch} | Global Step: {global_step} | Local Step: {current_step} // {train_dataloader_len}, " + f"total_eta: {eta}, epoch_eta:{eta_epoch}, time: all:{t:.3f}, model:{t_m:.3f}, data:{t_d:.3f}, " + f"lm:{t_lm:.3f}, vae:{t_vae:.3f}, lr:{lr:.3e}, " + ) + info += ( + f"s:({model.module.h}, {model.module.w}), " + if hasattr(model, "module") + else f"s:({model.h}, {model.w}), " + ) + + info += ", ".join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) + last_tic = time.time() + log_buffer.clear() + data_time_all = 0 + model_time_all = 0 + lm_time_all = 0 + vae_time_all = 0 + if accelerator.is_main_process: + logger.info(info) + + logs.update(lr=lr) + if accelerator.is_main_process: + accelerator.log(logs, step=global_step) + + global_step += 1 + + if loss_nan_timer > 20: + raise ValueError("Loss is NaN too much times. Break here.") + if ( + global_step % config.train.save_model_steps == 0 + or (time.time() - training_start_time) / 3600 > config.train.training_hours + ): + accelerator.wait_for_everyone() + if accelerator.is_main_process: + os.umask(0o000) + save_checkpoint( + osp.join(config.work_dir, "checkpoints"), + epoch=epoch, + step=global_step, + model=accelerator.unwrap_model(model), + optimizer=optimizer, + lr_scheduler=lr_scheduler, + generator=generator, + add_symlink=True, + ) + + if (time.time() - training_start_time) / 3600 > config.train.training_hours: + logger.info(f"Stopping training at epoch {epoch}, step {global_step} due to time limit.") + return + if config.train.visualize and (global_step % config.train.eval_sampling_steps == 0 or (step + 1) == 1): + accelerator.wait_for_everyone() + if accelerator.is_main_process: + log_validation( + accelerator=accelerator, + config=config, + model=model, + logger=logger, + step=global_step, + device=accelerator.device, + ) + + data_time_start = time.time() + + if epoch % config.train.save_model_epochs == 0 or epoch == config.train.num_epochs and not config.debug: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + os.umask(0o000) + save_checkpoint( + osp.join(config.work_dir, "checkpoints"), + epoch=epoch, + step=global_step, + model=accelerator.unwrap_model(model), + optimizer=optimizer, + lr_scheduler=lr_scheduler, + generator=generator, + add_symlink=True, + ) + + accelerator.wait_for_everyone() + + +@pyrallis.wrap() +def main(cfg: JodiConfig) -> None: + global train_dataloader_len, start_epoch, start_step, vae, generator, num_replicas, rank, training_start_time + global text_encoder, tokenizer + global max_length, validation_prompts, latent_size, valid_prompt_embed_suffix, null_embed_path + global image_size, cache_file, total_steps + global num_conditions + + config = cfg + args = cfg + + training_start_time = time.time() + + if args.debug: + config.train.log_interval = 1 + config.train.train_batch_size = min(64, config.train.train_batch_size) + args.report_to = "tensorboard" + + os.umask(0o000) + os.makedirs(config.work_dir, exist_ok=True) + + # Initialize accelerator + init_handler = InitProcessGroupKwargs() + init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug + accelerator = Accelerator( + mixed_precision=config.model.mixed_precision, + gradient_accumulation_steps=config.train.gradient_accumulation_steps, + log_with=args.report_to, + project_dir=osp.join(config.work_dir, "logs"), + kwargs_handlers=[init_handler], + ) + + # Initialize tensorboard logging + log_name = "train_log.log" + logger = get_root_logger(osp.join(config.work_dir, log_name)) + logger.info(accelerator.state) + + # Set random seed + config.train.seed = init_random_seed(getattr(config.train, "seed", None)) + set_random_seed(config.train.seed + int(os.environ["LOCAL_RANK"])) + generator = torch.Generator(device="cpu").manual_seed(config.train.seed) + + if accelerator.is_main_process: + pyrallis.dump(config, open(osp.join(config.work_dir, "config.yaml"), "w"), sort_keys=False, indent=4) + if args.report_to == "wandb": + import wandb + wandb.init(project=args.tracker_project_name, name=args.name, resume="allow", id=args.name) + + # logger.info(f"Config: \n{config}") + logger.info(f"World_size: {get_world_size()}") + logger.info(f"seed: {config.train.seed}") + logger.info(f"Initializing: DDP for training") + + # Get pretrained VAE + vae = get_vae(config.vae.vae_type, config.vae.vae_pretrained, accelerator.device).to(torch.float16) + logger.info(f"vae type: {config.vae.vae_type}") + + # Get tokenizer and text encoder + tokenizer, text_encoder = get_tokenizer_and_text_encoder(config.text_encoder.text_encoder_name, accelerator.device) + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + + # Compute and save null embedding and validation prompts embeddings + os.makedirs(config.train.null_embed_root, exist_ok=True) + text_embed_dim = text_encoder.config.hidden_size + chi_prompt = "\n".join(config.text_encoder.chi_prompt) + # logger.info(f"Complex Human Instruct: {chi_prompt}") + max_length = config.text_encoder.model_max_length + null_embed_path = osp.join( + config.train.null_embed_root, + f"null_embed_diffusers_{config.text_encoder.text_encoder_name}_{max_length}token_{text_embed_dim}.pth", + ) + if config.train.visualize and len(config.train.validation_prompts): + # Preparing embeddings for visualization. We put it here for saving GPU memory + valid_prompt_embed_suffix = f"{max_length}token_{config.text_encoder.text_encoder_name}_{text_embed_dim}.pth" + validation_prompts = config.train.validation_prompts + skip = True + uuid_chi_prompt = hashlib.sha256(chi_prompt.encode()).hexdigest() + config.train.valid_prompt_embed_root = osp.join(config.train.valid_prompt_embed_root, uuid_chi_prompt) + Path(config.train.valid_prompt_embed_root).mkdir(parents=True, exist_ok=True) + + # Save complex human instruct to a file + chi_prompt_file = osp.join(config.train.valid_prompt_embed_root, "chi_prompt.txt") + with open(chi_prompt_file, "w", encoding="utf-8") as f: + f.write(chi_prompt) + + for prompt in validation_prompts: + prompt_embed_path = osp.join( + config.train.valid_prompt_embed_root, f"{prompt[:50]}_{valid_prompt_embed_suffix}" + ) + if not (osp.exists(prompt_embed_path) and osp.exists(null_embed_path)): + skip = False + logger.info("Preparing Visualization prompt embeddings...") + break + if accelerator.is_main_process and not skip: + for prompt in validation_prompts: + prompt_embed_path = osp.join( + config.train.valid_prompt_embed_root, f"{prompt[:50]}_{valid_prompt_embed_suffix}" + ) + if "T5" in config.text_encoder.text_encoder_name: + txt_tokens = tokenizer( + prompt, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(accelerator.device) + caption_emb = text_encoder(txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask)[0] + caption_emb_mask = txt_tokens.attention_mask + elif ( + "gemma" in config.text_encoder.text_encoder_name or "Qwen" in config.text_encoder.text_encoder_name + ): + chi_prompt = "\n".join(config.text_encoder.chi_prompt) + prompt = chi_prompt + prompt + num_chi_prompt_tokens = len(tokenizer.encode(chi_prompt)) + max_length_all = ( + num_chi_prompt_tokens + config.text_encoder.model_max_length - 2 + ) # magic number 2: [bos], [_] + + txt_tokens = tokenizer( + prompt, + max_length=max_length_all, + padding="max_length", + truncation=True, + return_tensors="pt", + ).to(accelerator.device) + select_index = [0] + list(range(-config.text_encoder.model_max_length + 1, 0)) + caption_emb = text_encoder(txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask)[0][ + :, select_index + ] + caption_emb_mask = txt_tokens.attention_mask[:, select_index] + else: + raise ValueError(f"{config.text_encoder.text_encoder_name} is not supported!!") + + torch.save({"caption_embeds": caption_emb, "emb_mask": caption_emb_mask}, prompt_embed_path) + + null_tokens = tokenizer( + "", max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" + ).to(accelerator.device) + if "T5" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + elif "gemma" in config.text_encoder.text_encoder_name or "Qwen" in config.text_encoder.text_encoder_name: + null_token_emb = text_encoder(null_tokens.input_ids, attention_mask=null_tokens.attention_mask)[0] + else: + raise ValueError(f"{config.text_encoder.text_encoder_name} is not supported!!") + torch.save( + {"uncond_prompt_embeds": null_token_emb, "uncond_prompt_embeds_mask": null_tokens.attention_mask}, + null_embed_path, + ) + del null_token_emb + del null_tokens + flush() + + os.environ["AUTOCAST_LINEAR_ATTN"] = "true" if config.model.autocast_linear_attn else "false" + + # Build scheduler + train_diffusion = Scheduler( + str(config.scheduler.train_sampling_steps), + noise_schedule=config.scheduler.noise_schedule, + predict_v=config.scheduler.predict_v, + snr=config.train.snr_loss, + flow_shift=config.scheduler.flow_shift, + ) + logger.info(f"v-prediction: {config.scheduler.predict_v}") + logger.info(f"noise schedule: {config.scheduler.noise_schedule}") + if "flow" in config.scheduler.noise_schedule: + logger.info(f"flow shift: {config.scheduler.flow_shift}") + if config.scheduler.weighting_scheme in ["logit_normal", "mode"]: + logger.info(f"flow weighting: {config.scheduler.weighting_scheme}") + logger.info(f"logit-mean: {config.scheduler.logit_mean}") + logger.info(f"logit-std: {config.scheduler.logit_std}") + + # Build models + image_size = config.model.image_size + latent_size = int(image_size) // config.vae.vae_downsample_rate + num_conditions = len(config.data.datasets[0]["conditions"]) + logger.info(f"Number of conditions: {num_conditions}") + model_kwargs = { + "config": config, + "in_channels": config.vae.vae_latent_dim, + "mlp_ratio": config.model.mlp_ratio, + "caption_channels": text_embed_dim, + "pe_interpolation": config.model.pe_interpolation, + "model_max_length": max_length, + "qk_norm": config.model.qk_norm, + "y_norm": config.text_encoder.y_norm, + "attn_type": config.model.attn_type, + "y_norm_scale_factor": config.text_encoder.y_norm_scale_factor, + "mlp_acts": list(config.model.mlp_acts), + "use_pe": config.model.use_pe, + "linear_head_dim": config.model.linear_head_dim, + "num_conditions": num_conditions, + } + model = build_model( + config.model.model, + config.train.grad_checkpointing, + getattr(config.model, "fp32_attention", False), + input_size=latent_size, + **model_kwargs, + ).train() + logger.info( + colored( + f"{model.__class__.__name__}:{config.model.model}, " + f"Model Parameters: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M", + "green", + attrs=["bold"], + ) + ) + + # Load weights + load_from = True + state_dict = None + if args.resume_from or config.model.resume_from: + load_from = False + config.model.resume_from = dict( + checkpoint=args.resume_from or config.model.resume_from, + load_ema=False, + resume_optimizer=True, + resume_lr_scheduler=True, + ) + if args.load_from is not None: + config.model.load_from = args.load_from # type: ignore + if config.model.load_from is not None and load_from: + state_dict, _, missing, unexpected, _ = load_checkpoint( + config.model.load_from, + model, + load_ema=config.model.resume_from.get("load_ema", False), + null_embed_path=null_embed_path, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + # Build dataset + dataset = [build_dataset(d) for d in config.data.datasets] + dataset = RandomConcatJodiDataset(dataset) + logger.info(f"Dataset size: {len(dataset)}") + accelerator.wait_for_everyone() + + # Build dataloader + num_replicas = int(os.environ["WORLD_SIZE"]) + rank = int(os.environ["RANK"]) + sampler = DistributedRangedSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=True) + batch_sampler = AspectRatioBatchSampler( + sampler=sampler, + dataset=dataset, + batch_size=config.train.train_batch_size, + aspect_ratios=dataset.aspect_ratio, + ratio_nums=dataset.ratio_nums, + drop_last=True, + config=config, + caching=args.caching, + ) + train_dataloader = build_dataloader( + dataset, + batch_sampler=batch_sampler, + num_workers=config.train.num_workers, + ) + train_dataloader_len = len(train_dataloader) + + # Build optimizer and lr scheduler + lr_scale_ratio = 1 + if getattr(config.train, "auto_lr", None): + lr_scale_ratio = auto_scale_lr( + config.train.train_batch_size * get_world_size() * config.train.gradient_accumulation_steps, + config.train.optimizer, + **config.train.auto_lr, + ) + optimizer = build_optimizer(model, config.train.optimizer) + if config.train.lr_schedule_args and config.train.lr_schedule_args.get("num_warmup_steps", None): + config.train.lr_schedule_args["num_warmup_steps"] = ( + config.train.lr_schedule_args["num_warmup_steps"] * num_replicas + ) + lr_scheduler = build_lr_scheduler(config.train, optimizer, train_dataloader, lr_scale_ratio) + logger.info(f"{colored(f'Basic Setting: ', 'green', attrs=['bold'])}") + logger.info(f"lr: {config.train.optimizer['lr']:.5f}") + logger.info(f"bs: {config.train.train_batch_size}") + logger.info(f"gc: {config.train.grad_checkpointing}") + logger.info(f"gc_accum_step: {config.train.gradient_accumulation_steps}") + logger.info(f"qk norm: {config.model.qk_norm}") + logger.info(f"fp32 attn: {config.model.fp32_attention}") + logger.info(f"attn type: {config.model.attn_type}") + logger.info(f"text encoder: {config.text_encoder.text_encoder_name}") + logger.info(f"precision: {config.model.mixed_precision}") + + timestamp = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + + if accelerator.is_main_process: + tracker_config = dict(vars(config)) + try: + accelerator.init_trackers(args.tracker_project_name, tracker_config) + except: + accelerator.init_trackers(f"tb_{timestamp}") + + start_epoch = 0 + start_step = 0 + total_steps = train_dataloader_len * config.train.num_epochs + + # Resume training + if config.model.resume_from is not None and config.model.resume_from["checkpoint"] is not None: + rng_state = None + ckpt_path = osp.join(config.work_dir, "checkpoints") + check_flag = osp.exists(ckpt_path) and len(os.listdir(ckpt_path)) != 0 + if config.model.resume_from["checkpoint"] == "latest": + if check_flag: + checkpoints = os.listdir(ckpt_path) + if "latest.pth" in checkpoints and osp.exists(osp.join(ckpt_path, "latest.pth")): + config.model.resume_from["checkpoint"] = osp.realpath(osp.join(ckpt_path, "latest.pth")) + else: + checkpoints = [i for i in checkpoints if i.startswith("epoch_")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.replace(".pth", "").split("_")[3])) + config.model.resume_from["checkpoint"] = osp.join(ckpt_path, checkpoints[-1]) + else: + config.model.resume_from["checkpoint"] = config.model.load_from + + if config.model.resume_from["checkpoint"] is not None: + ignore_keys = [] + if args.extend_to_new_domain: + ignore_keys = ["domain_embedding"] + state_dict, _, missing, unexpected, rng_state = load_checkpoint( + **config.model.resume_from, + model=model, + optimizer=optimizer if check_flag else None, + lr_scheduler=lr_scheduler if check_flag else None, + null_embed_path=null_embed_path, + ignore_keys=ignore_keys, + ) + logger.warning(f"Missing keys: {missing}") + logger.warning(f"Unexpected keys: {unexpected}") + + path = osp.basename(config.model.resume_from["checkpoint"]) + try: + start_epoch = int(path.replace(".pth", "").split("_")[1]) - 1 + start_step = int(path.replace(".pth", "").split("_")[3]) + except: + pass + + # resume randomise + if rng_state: + logger.info("resuming randomise") + torch.set_rng_state(rng_state["torch"]) + np.random.set_state(rng_state["numpy"]) + random.setstate(rng_state["python"]) + generator.set_state(rng_state["generator"]) # resume generator status + try: + torch.cuda.set_rng_state_all(rng_state["torch_cuda"]) + except: + logger.warning("Failed to resume torch_cuda rng state") + + if state_dict is not None: + # resume from sana: copy x_embedder and final_layer + if args.resume_from_sana: + for x_embedder in model.x_embedders: + x_embedder.proj.weight.data.copy_(state_dict["x_embedder.proj.weight"]) + x_embedder.proj.bias.data.copy_(state_dict["x_embedder.proj.bias"]) + model.final_layers[0].linear.weight.data.copy_(state_dict["final_layer.linear.weight"]) + model.final_layers[0].linear.bias.data.copy_(state_dict["final_layer.linear.bias"]) + model.final_layers[0].scale_shift_table.data.copy_(state_dict["final_layer.scale_shift_table"]) + logger.info("Copied x_embedder and final_layer weights from pretrained model.") + # extend to new domain: copy x_embedder and final_layer from specified domain, copy domain embedding + elif args.extend_to_new_domain: + num_domains = state_dict["domain_embedding"].shape[0] + model.domain_embedding[:num_domains].data.copy_(state_dict["domain_embedding"]) + idx = args.extend_to_new_domain_copy_id + if not isinstance(idx, list): + idx = [idx] + for i in range(len(idx)): + model.x_embedders[num_domains+i].proj.weight.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.weight"]) + model.x_embedders[num_domains+i].proj.bias.data.copy_(state_dict[f"x_embedders.{idx[i]}.proj.bias"]) + model.final_layers[num_domains+i].linear.weight.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.weight"]) + model.final_layers[num_domains+i].linear.bias.data.copy_(state_dict[f"final_layers.{idx[i]}.linear.bias"]) + model.final_layers[num_domains+i].scale_shift_table.data.copy_(state_dict[f"final_layers.{idx[i]}.scale_shift_table"]) + logger.info("Copied x_embedder, final_layer and domain embedding weights from pretrained model.") + # resume from previous checkpoint: state dicts must match + else: + assert len(missing) == 0, f"Missing keys: {missing}" # noqa + assert len(unexpected) == 0, f"Unexpected keys: {unexpected}" # noqa + logger.info("Successfully loaded pretrained model.") + + # Prepare everything + # There is no specific order to remember, you just need to unpack the + # objects in the same order you gave them to the prepare method. + model = accelerator.prepare(model) + optimizer, lr_scheduler = accelerator.prepare(optimizer, lr_scheduler) + + # Start Training + train( + config=config, + args=args, + accelerator=accelerator, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + train_dataloader=train_dataloader, + train_diffusion=train_diffusion, + logger=logger, + ) + + +if __name__ == "__main__": + main() diff --git a/train_ori_jodi.sh b/train_ori_jodi.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac7448a0e0ff34676a52e08b02c5c0b92460e181 --- /dev/null +++ b/train_ori_jodi.sh @@ -0,0 +1,13 @@ +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" +export NGPU=8 +export WORK_DIR=./output/train_from_jodi +export LOAD_FROM=hf://VIPL-GENUN/Jodi/Jodi.pth + +torchrun --nproc_per_node=$NGPU --master_port=21540 scripts/train.py \ + --config_path $1 \ + --model.load_from $LOAD_FROM \ + --work_dir $WORK_DIR \ + --resume_from latest \ + --resume_from_sana false \ + --train.train_batch_size 4 \ + --model.use_pe true diff --git a/vqa.py b/vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec3233adcf1db5f4f9e4d43157594066c3bc7dd --- /dev/null +++ b/vqa.py @@ -0,0 +1,461 @@ +import os +import sys +import argparse +from pathlib import Path +from PIL import Image +from typing import Any +import torch +import torchvision.transforms as T + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +os.environ["GRADIO_TEMP_DIR"] = "./tmp" + +from jodi_pipeline import JodiPipeline +from model.postprocess import ( + ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, + NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, +) +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + Qwen3VLForConditionalGeneration, + Qwen3VLMoeForConditionalGeneration +) +from transformers import AutoProcessor, Trainer +from pathlib import Path +import itertools + +def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): + """ + 将多个图像拼接成一张大图并保存。 + Args: + image_paths: List[str] 图像路径列表 + save_path: 保存路径(包括文件名) + images_per_row: 每行图像数量(默认为全部在一行) + image_format: 保存格式 + """ + from PIL import Image + import io + + # 读取图像 + images = [Image.open(p).convert("RGB") for p in image_paths] + + if images_per_row is None: + images_per_row = len(images) + + # 调整尺寸(可选) + target_size = min(1024, images[0].size[0]) + images = [img.resize((target_size, target_size)) for img in images] + + # 拼接 + widths, heights = zip(*(img.size for img in images)) + max_width = max(widths) + rows = (len(images) + images_per_row - 1) // images_per_row + total_height = sum(heights[:images_per_row]) * rows + + new_im = Image.new("RGB", (max_width * images_per_row, total_height)) + y_offset = 0 + for i in range(0, len(images), images_per_row): + row_imgs = images[i:i+images_per_row] + x_offset = 0 + for img in row_imgs: + new_im.paste(img, (x_offset, y_offset)) + x_offset += max_width + y_offset += heights[0] + + os.makedirs(os.path.dirname(save_path), exist_ok=True) + new_im.save(save_path, format=image_format.upper()) + print(f"🧩 Saved merged image → {save_path}") + return save_path + + +def build_vqa_message(root, prompt, question): + """ + Build Qwen3-VL message for multi-modal caption refinement. Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides complementary information about the same visual content: " + f"- The RGB image conveys color, texture, lighting, and the overall visual appearance. " + f"- The line drawing highlights object outlines, shapes, and fine structures. " + f"- The edge map emphasizes boundaries and contours. " + f"- The depth map reveals spatial distances, perspective, and 3D relationships. " + f"- The normal map shows surface orientation and geometric curvature. " + f"- The albedo map presents true surface color without illumination or shadows. " + f"- The segmentation map divides the scene into semantic regions and object categories. " + f"- The human pose map indicates body orientation, structure, and articulation. " + f"Together, these modalities offer a unified, rich understanding of the scene, covering its appearance, structure, and spatial layout. " + f"Scene description: \"{prompt}\" " + f"Now, based on both the multimodal visual information and the given scene description, " + f"analyze the scene carefully to answer a question. " + f"Your analysis should proceed in two stages:\n\n" + f"**Stage 1 — Modality-wise Observation:**\n" + f"For each provided modality image, analyze what specific visual information it contributes " + f"based on the above definitions. Describe what can be directly observed from each modality, " + f"such as color, shape, structure, spatial depth, or object positions. " + f"Then use visual reasoning grounded in the image evidence and contextual understanding from the description answer the follow question: " + f"Question: \"{question}\" " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + + +def build_multimodal_message(root, coarse_caption="a generic scene"): + """ + Build Qwen3-VL message for multi-modal caption refinement. + Automatically detects available modalities under root. + """ + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # --- 检查存在的模态 --- + available = [] + for name in modality_names: + # 优先匹配 .png 或 .jpg + for ext in [".png", ".jpg", ".jpeg"]: + path = Path(root) / f"{name}{ext}" + if path.exists(): + available.append(str(path)) + break + + # --- 构建模态说明 --- + readable_map = { + "image": "RGB image", + "annotation_lineart": "line drawing", + "annotation_edge": "edge map", + "annotation_depth": "depth map", + "annotation_normal": "normal map", + "annotation_albedo": "albedo map", + "annotation_seg_12colors": "segmentation map", + "annotation_openpose": "human pose map", + } + present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] + + # --- 构造文本指令 --- + text_prompt = ( + f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " + f"Each modality provides distinct types of visual information that together describe the same subject: " + f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " + f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " + f"- The edge map highlights object boundaries and contours. " + f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " + f"- The normal map captures fine surface orientation, curvature, and geometric details. " + f"- The albedo map shows true surface colors without lighting or shadow effects. " + f"- The segmentation map provides semantic regions and object boundaries for scene composition. " + f"- The human pose map shows body structure, orientation, and posture of subjects. " + f"For each provided modality image, analyze it according to the above definitions and describe " + f"the specific visual information it contributes in this particular case. " + f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " + f"Do NOT describe each modality separately or mention modality names. " + f"Focus on merging their information into a single coherent image description. " + #f"the subject’s appearance, lighting, form, and spatial depth. " + f"Refine the coarse caption into a more detailed and accurate image description. " + f"Coarse caption: '{coarse_caption}' " + + " ".join([""] * len(available)) + ) + + # --- 构建 Qwen3-VL 消息格式 --- + messages = [ + { + "role": "user", + "content": [{"type": "image", "image": path} for path in available] + + [{"type": "text", "text": text_prompt}], + } + ] + return messages + +# ------------------------------ +# Argument Parser +# ------------------------------ +def get_parser(): + parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") + parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") + parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") + parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") + parser.add_argument("--image_path", type=str, default="./assets/test_images/pexels-pixabay-280221.jpg", help="Prompt text for generation.") + parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") + parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") + parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") + parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") + parser.add_argument("--guidance_scale", type=float, default=4.5) + parser.add_argument("--seed", type=int, default=1234) + parser.add_argument("--output_dir", type=str, default="./demo_vqa_outputs", help="Directory to save results.") + return parser + + +# ------------------------------ +# Main Inference Function +# ------------------------------ + +@torch.inference_mode() +def init_i2t(model, processor, image_path, iter_num, max_length=300): + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path, + }, + {"type": "text", "text": "Describe this image."}, + ], + } + ] + + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def text_refine(root, model, processor, prompt, iter_num, max_length=300): + messages = build_multimodal_message(root, prompt) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + + return output_text[0] + +@torch.inference_mode() +def vqa(root, model, processor, prompt, question, iter_num, max_length=300): + messages = build_vqa_message(root, prompt, question) + inputs = processor.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt" + ) + inputs = inputs.to(model.device) + generated_ids = model.generate(**inputs, max_new_tokens=max_length) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] + output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + print(output_text) + os.makedirs(args.output_dir, exist_ok=True) + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + caption_path = Path(save_dir) / f"caption.txt" + with open(caption_path, "w", encoding="utf-8") as f: + f.write(output_text[0].strip()) + return output_text[0] + +@torch.inference_mode() +def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width): + + print(f"🚀 Generating with prompt: {prompt}") + #prompt = args.prompt + ' ' + prompt + outputs = pipe( + images=images, + role=role, + prompt=prompt, + negative_prompt=args.negative_prompt, + height=height, + width=width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + num_images_per_prompt=1, + generator=generator, + task='t2i' + ) + + # Apply post-processing for each modality + results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] + results = torch.stack(results, dim=1).reshape(-1, 3, height, width) + results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] + + # -------------------------- + # Save results + # -------------------------- + os.makedirs(args.output_dir, exist_ok=True) + + save_dir = Path(args.output_dir) / f"iteration_{iter_num}" + save_dir.mkdir(parents=True, exist_ok=True) + + for idx, img in enumerate(results): + name = modality_names[idx] + save_path = save_dir / f"{name}.png" + img.save(save_path) + print(f"💾 Saved {name} → {save_path}") + + merged_path = save_dir / f"merged_iteration_{iter_num}.png" + concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) + + print(f"\n✅ All results saved in: {save_dir}\n") + return save_dir + + +# ------------------------------ +# Entry Point +# ------------------------------ +if __name__ == "__main__": + args = get_parser().parse_args() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"✅ Using device: {device}") + + processor = AutoProcessor.from_pretrained( + args.model_name_or_path, + ) + + model = Qwen3VLForConditionalGeneration.from_pretrained( + args.text_model_path, + attn_implementation="flash_attention_2", + dtype=(torch.bfloat16), + ).to(device) + + pipe = JodiPipeline(args.config) + pipe.from_pretrained(args.model_path) + + modality_names = [ + "image", + "annotation_lineart", + "annotation_edge", + "annotation_depth", + "annotation_normal", + "annotation_albedo", + "annotation_seg_12colors", + "annotation_openpose", + ] + + # Build post-processors + post_processors: list[Any] = [ImagePostProcessor()] + for condition in pipe.config.conditions: # type: ignore + if condition == "lineart": + post_processors.append(LineartPostProcessor()) + elif condition == "edge": + post_processors.append(EdgePostProcessor()) + elif condition == "depth": + post_processors.append(DepthPostProcessor()) + elif condition == "normal": + post_processors.append(NormalPostProcessor()) + elif condition == "albedo": + post_processors.append(AlbedoPostProcessor()) + elif condition == "segmentation": + post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) + elif condition == "openpose": + post_processors.append(OpenposePostProcessor()) + else: + print(f"⚠️ Warning: Unknown condition: {condition}") + post_processors.append(ImagePostProcessor()) + + torch.manual_seed(args.seed) + generator = torch.Generator(device=device).manual_seed(args.seed) + import glob + image_path = args.image_path + question = args.question + + control_images = [Image.open(image_path).convert("RGB")] + [None] * pipe.num_conditions + + role=[1] + [0] * pipe.num_conditions + print(role) + + max_length = 1024 + + input_img = Image.open(image_path).convert("RGB") + width, height = input_img.size + print(f'ori width:{width}', f'ori height:{height}') + + prompt = init_i2t(model, processor, image_path, 0, max_length) + + for step in range(1, args.iters): + save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width) + max_length += 100 + prompt = text_refine(save_dir, model, processor, prompt, step, max_length) + result = vqa(save_dir, model, processor, prompt, question, args.iters, max_length) + print(f'result:{result}') +