File size: 9,689 Bytes
4f9aeec 1835872 c3afc00 1835872 c3afc00 1835872 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
---
license: apache-2.0
language:
- en
base_model:
- Qwen/Qwen2.5-VL-3B-Instruct
pipeline_tag: image-text-to-text
---
# 🩺 ImageDoctor: Diagnosing Text-to-Image Generation via Grounded Image Reasoning
**ImageDoctor** is a unified **evaluation model** for **text-to-image (T2I) generation**, capable of producing both **multi-aspect scalar scores** and **spatially grounded heatmaps**.
It follows a **“look–think–predict”** reasoning paradigm that mimics human visual diagnosis — first localizing flaws, then reasoning about them, and finally producing an interpretable judgment.
## 🔍 Key Features
- **🧠 Multi-Aspect Scoring**
Predicts four fine-grained quality dimensions:
- *Plausibility*
- *Text–Image Alignment*
- *Aesthetics*
- *Overall Quality*
- **📍 Spatially Grounded Feedback**
Generates **heatmaps** highlighting potential artifacts and misalignments for more interpretable feedback.
- **🗣️ Grounded Image Reasoning**
Provides **step-by-step reasoning** that explains how the model arrives at its evaluation.
ImageDoctor not only describes *why* an image succeeds or fails but also **zooms in** on flawed regions—offering spatially grounded visual evidence that highlights artifacts or misalignments.
This localized reasoning enhances interpretability, transparency, and trustworthiness.
- **⚙️ Reinforcement-Finetuned with GRPO**
Trained with **Group Relative Policy Optimization (GRPO)** for richer, more stable preference alignment.
- **💡 Versatile Applications**
- As a **metric** for dense image–text evaluation
- As a **verifier** in test-time scaling setups
- As a **reward model** in reinforcement learning (e.g., DenseFlow-GRPO)
## 🖼️ Quick Start
```python
from transformers import AutoProcessor, AutoModelForCausalLM
from PIL import Image
import os
import math
from qwen_vl_utils import process_vision_info
import torch
import numpy as np
def build_messages(image, task_prompt):
return [{
"role": "user",
"content": [
{"type": "image", "image": image},
{
"type": "text",
"text": f"Given a caption and an image generated based on this caption, please analyze the provided image in detail. Evaluate it on various dimensions including Semantic Alignment (How well the image content corresponds to the caption), Aesthetics (composition, color usage, and overall artistic quality), Plausibility (realism and attention to detail), and Overall Impression (General subjective assessment of the image's quality). For each evaluation dimension, provide a score between 0-1 and provide a concise rationale for the score. Use a chain-of-thought process to detail your reasoning steps, and enclose all potential important areas and detailed reasoning within <think> and </think> tags. The important areas are represented in following format: \” I need to focus on the bounding box area. Proposed regions (xyxy): ..., which is an enumerated list in the exact format:1.[x1,y1,x2,y2];\n2.[x1,y1,x2,y2];\n3.[x1,y1,x2,y2]… Here, x1,y1 is the top-left corner, and x2,y2 is the bottom-right corner. Then, within the <answer> and </answer> tags, summarize your assessment in the following format: \"Semantic Alignment score: ... \nMisalignment Locations: ...\nAesthetic score: ...\nPlausibility score: ... nArtifact Locations: ...\nOverall Impression score: ...\". No additional text is allowed in the answer section.\n\n Your actual evaluation should be based on the quality of the provided image.**\n\nYour task is provided as follows:\nText Caption: [{task_prompt}]"
}
]
}]
checkpoint = "GYX97/ImageDoctor"
image_path = "path_to_image"
prompt = 'prompt_for_image_generation'
img = Image.open(image_path).convert("RGB")
r = math.sqrt(512*512 / (img.width * img.height))
new_size = (max(1, int(img.width * r)), max(1, int(img.height * r)))
img = img.resize(new_size, resample=Image.BICUBIC)
processor = AutoProcessor.from_pretrained(checkpoint, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", trust_remote_code=True)
messages = build_messages(img, prompt)
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
output_dir = 'path_to_save_heatmaps'
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
).to(model.device)
# 2) Generate
gen_kwargs = dict(
max_new_tokens=20000,
use_cache=True,
return_dict_in_generate=True,
output_hidden_states=True,
)
outputs = model.generate(**inputs, **gen_kwargs)
# Decode assistant output (strip prompt tokens)
generated_ids = outputs.sequences
trimmed = [out[len(inp):] for inp, out in zip(inputs.input_ids, generated_ids)]
decoded = processor.batch_decode(trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
print(decoded.strip())
has_tokens = all(
hasattr(model.config, a) for a in ["image_token_id", "misalignment_token_id", "artifact_token_id"]
)
has_heads = all(
hasattr(model, a) for a in ["text_hidden_fcs", "image_hidden_fcs", "prompt_encoder", "heatmap", "sigmoid"]
)
if has_tokens and has_heads and outputs.hidden_states is not None:
true_generated = generated_ids[:, inputs.input_ids.shape[1]:]
# Find special tokens
misalignment_mask = (true_generated[:, 1:] == model.config.misalignment_token_id)
artifact_mask = (true_generated[:, 1:] == model.config.artifact_token_id)
if misalignment_mask.any() or artifact_mask.any():
# Gather final-layer hidden states across decoding steps
step_states = []
for step in outputs.hidden_states[1:]: # skip encoder states at index 0
step_states.append(step[-1]) # last layer [B, 1, H]
all_gen_h = torch.cat(step_states, dim=1) # [B, T, H]
# Map text hidden → special token embeddings
last_hidden_state = model.text_hidden_fcs[0](all_gen_h) # [B, T, H’]
# Index by masks (flatten batch/time)
mis_tokens = last_hidden_state[misalignment_mask].unsqueeze(1) if misalignment_mask.any() else None
art_tokens = last_hidden_state[artifact_mask].unsqueeze(1) if artifact_mask.any() else None
# Visual embeddings (grid features)
image_embeds = model.visual(
inputs["pixel_values"].to(model.device),
grid_thw=inputs["image_grid_thw"].to(model.device)
)
img_hidden = model.image_hidden_fcs[0](image_embeds.unsqueeze(0)) # [1, L, C]
# reshape to low-res feature map (18x18 matches your original; change if your head differs)
img_hidden = img_hidden.transpose(1, 2).view(1, -1, 18, 18)
def run_heatmap(text_tokens):
sparse_embeddings, dense_embeddings = model.prompt_encoder(
points=None, boxes=None, masks=None, text_embeds=text_tokens
)
low_res = model.heatmap(
image_embeddings=img_hidden,
image_pe=model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings.to(img_hidden.dtype),
dense_prompt_embeddings=dense_embeddings,
multimask_output=False
)
return model.sigmoid(low_res) # [N, 1, H, W]
artifact_np_path = None
misalign_np_path = None
if output_dir:
os.makedirs( output_dir, exist_ok=True)
if mis_tokens is not None and art_tokens is not None:
fused = torch.cat([mis_tokens, art_tokens], dim=0)
pred = run_heatmap(fused)
mis_pred = pred[0:1, 0] # [1,H,W] pick first
art_pred = pred[1:2, 0] # [1,H,W] pick second
mis_np = mis_pred[0].detach().cpu().float().numpy()
art_np = art_pred[0].detach().cpu().float().numpy()
if output_dir:
misalign_np_path = os.path.join( output_dir, f"misalignment.npy")
artifact_np_path = os.path.join( output_dir, f"artifact.npy")
np.save(misalign_np_path, mis_np)
np.save(artifact_np_path, art_np)
elif art_tokens is not None:
pred = run_heatmap(art_tokens[:1])
art_np = pred[0, 0].detach().cpu().float().numpy()
if output_dir:
artifact_np_path = os.path.join( output_dir, f"artifact.npy")
np.save(artifact_np_path, art_np)
elif mis_tokens is not None:
pred = run_heatmap(mis_tokens[:1])
mis_np = pred[0, 0].detach().cpu().float().numpy()
if output_dir:
misalign_np_path = os.path.join( output_dir, f"misalignment.npy")
np.save(misalign_np_path, mis_np)
```
## 📚 Citation
If you use **ImageDoctor**, please cite:
```bibtex
@misc{guo2025imagedoctordiagnosingtexttoimagegeneration,
author = {Yuxiang Guo, Jiang Liu, Ze Wang, Hao Chen, Ximeng Sun, Yang Zhao, Jialian Wu, Xiaodong Yu, Zicheng Liu and Emad Barsoum},
title = {ImageDoctor: Diagnosing Text-to-Image Generation via Grounded Image Reasoning},
eprint = {2510.01010},
archivePrefix={arXiv},
year = {2025},
url = {https://arxiv.org/abs/2510.01010},
'''
|