Add MinerU batch fbf0d5cc-8e0b-4ce4-8611-62e6fd38f6c1
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +32 -0
- data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_content_list.json +0 -0
- data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_model.json +0 -0
- data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19075/full.md +298 -0
- data/2025/2503_19xxx/2503.19075/images.zip +3 -0
- data/2025/2503_19xxx/2503.19075/layout.json +0 -0
- data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_content_list.json +1556 -0
- data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_model.json +0 -0
- data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19092/full.md +333 -0
- data/2025/2503_19xxx/2503.19092/images.zip +3 -0
- data/2025/2503_19xxx/2503.19092/layout.json +0 -0
- data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_content_list.json +0 -0
- data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_model.json +0 -0
- data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19108/full.md +452 -0
- data/2025/2503_19xxx/2503.19108/images.zip +3 -0
- data/2025/2503_19xxx/2503.19108/layout.json +0 -0
- data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_content_list.json +1462 -0
- data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_model.json +0 -0
- data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19199/full.md +339 -0
- data/2025/2503_19xxx/2503.19199/images.zip +3 -0
- data/2025/2503_19xxx/2503.19199/layout.json +0 -0
- data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_content_list.json +0 -0
- data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_model.json +0 -0
- data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19206/full.md +0 -0
- data/2025/2503_19xxx/2503.19206/images.zip +3 -0
- data/2025/2503_19xxx/2503.19206/layout.json +0 -0
- data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_content_list.json +0 -0
- data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_model.json +0 -0
- data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19213/full.md +448 -0
- data/2025/2503_19xxx/2503.19213/images.zip +3 -0
- data/2025/2503_19xxx/2503.19213/layout.json +0 -0
- data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_content_list.json +0 -0
- data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_model.json +0 -0
- data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19296/full.md +370 -0
- data/2025/2503_19xxx/2503.19296/images.zip +3 -0
- data/2025/2503_19xxx/2503.19296/layout.json +0 -0
- data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_content_list.json +1867 -0
- data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_model.json +0 -0
- data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_origin.pdf +3 -0
- data/2025/2503_19xxx/2503.19312/full.md +362 -0
- data/2025/2503_19xxx/2503.19312/images.zip +3 -0
- data/2025/2503_19xxx/2503.19312/layout.json +0 -0
- data/2025/2503_19xxx/2503.19325/4614b9a9-a2b0-4e28-a1af-5ad0467d40ad_content_list.json +1760 -0
.gitattributes
CHANGED
|
@@ -1931,3 +1931,35 @@ data/2025/2504_01xxx/2504.01995/fe557db9-352a-4123-8c28-0451fd3cb7d7_origin.pdf
|
|
| 1931 |
data/2025/2504_03xxx/2504.03724/d1216b63-e233-4ad6-8bd7-5e960fe3afba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1932 |
data/2025/2504_03xxx/2504.03733/11778de7-f8b2-427a-bf08-74f3062546ad_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1933 |
data/2025/2504_13xxx/2504.13186/b1e85054-a09b-4d6f-a9ca-4490b66b2dd8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1931 |
data/2025/2504_03xxx/2504.03724/d1216b63-e233-4ad6-8bd7-5e960fe3afba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1932 |
data/2025/2504_03xxx/2504.03733/11778de7-f8b2-427a-bf08-74f3062546ad_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1933 |
data/2025/2504_13xxx/2504.13186/b1e85054-a09b-4d6f-a9ca-4490b66b2dd8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1934 |
+
data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1935 |
+
data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1936 |
+
data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1937 |
+
data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1938 |
+
data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1939 |
+
data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1940 |
+
data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1941 |
+
data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1942 |
+
data/2025/2503_19xxx/2503.19325/4614b9a9-a2b0-4e28-a1af-5ad0467d40ad_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1943 |
+
data/2025/2503_19xxx/2503.19334/bdb1d99d-b854-4c34-a5e7-0f5c7b4f7a1f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1944 |
+
data/2025/2503_19xxx/2503.19380/758c647a-8649-4f36-ba8d-1220bac2c808_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1945 |
+
data/2025/2503_19xxx/2503.19383/4ec5649b-a9da-4913-b8a4-c1bdd0c86212_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1946 |
+
data/2025/2503_19xxx/2503.19385/3ebf32c4-23d4-409d-a939-cdfb84dacecc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1947 |
+
data/2025/2503_19xxx/2503.19470/d62bb0a8-7af5-4e74-acb4-0eb938db271e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1948 |
+
data/2025/2503_19xxx/2503.19506/843396ec-825c-4abd-bc1f-d7b2162ec06b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1949 |
+
data/2025/2503_19xxx/2503.19551/8a3f2c94-015a-4805-be18-7765adcd1b1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1950 |
+
data/2025/2503_19xxx/2503.19595/40ff4bee-2e86-4e69-be72-50ae453cf274_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1951 |
+
data/2025/2503_19xxx/2503.19602/5a641e7d-3c68-40dd-a4e0-1377b4b6f2e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1952 |
+
data/2025/2503_19xxx/2503.19611/21b9b317-5f86-4e37-b424-d1b6ef1eebbf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1953 |
+
data/2025/2503_19xxx/2503.19622/9af07c85-a8ff-4624-8fa5-7e1f8e8becdd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1954 |
+
data/2025/2503_19xxx/2503.19633/f5c168e4-35d6-4edf-aa72-823f79684d4f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1955 |
+
data/2025/2503_19xxx/2503.19755/7fa1b5cf-eff9-452f-aad7-f7891b8c0806_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1956 |
+
data/2025/2503_19xxx/2503.19757/ceb14df2-de89-44d9-b593-3212c367568c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1957 |
+
data/2025/2503_19xxx/2503.19786/d9576e91-a434-44ee-8998-17d64b3a5d2e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1958 |
+
data/2025/2503_19xxx/2503.19800/277a4aa1-51b3-4daa-8227-bffa2403ab4a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1959 |
+
data/2025/2503_19xxx/2503.19839/b6b6c209-bbc7-40f2-b564-ec101e404ef6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1960 |
+
data/2025/2503_19xxx/2503.19855/3eb2da38-512b-4a5d-81d0-6c7929f2546c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1961 |
+
data/2025/2503_19xxx/2503.19877/fb7bf038-e94d-48f8-ba81-a10c8c2f1a20_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1962 |
+
data/2025/2503_19xxx/2503.19903/0906a2ad-9088-4fc5-8324-00e6a98a949f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1963 |
+
data/2025/2503_19xxx/2503.19907/6d3e71a9-f367-43e8-9154-5a3f1c98eccc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1964 |
+
data/2025/2503_20xxx/2503.20807/9709de1a-2664-4358-b8ae-7fae53399612_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1965 |
+
data/2025/2503_20xxx/2503.20823/460bf0cd-81d7-4046-966b-33a5a80b3f9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19075/b6914274-fd53-4c53-8362-52c6adb6830c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4ef5f4b629ea36d3b4ef5affc6a6870faf735966cdac0f50d0638e191ea51d8
|
| 3 |
+
size 269238
|
data/2025/2503_19xxx/2503.19075/full.md
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The Case for 'Thick Evaluations' of Cultural Representation in AI
|
| 2 |
+
|
| 3 |
+
RIDA QADRI, MARK DIAZ, DING WANG, and MICHAEL MADAIO, Google Research, USA
|
| 4 |
+
|
| 5 |
+
Generative AI image models have been increasingly evaluated for their (in)ability to represent non-Western cultures. We argue that these evaluations operate through reductive ideals of representation, abstracted from how people define their own representation and neglecting the inherently interpretive and contextual nature of cultural representation. In contrast to these 'thin' evaluations, we introduce the idea of 'thick evaluations: a more granular, situated, and discursive measurement framework for evaluating representations of social worlds in AI images, steeped in communities' own understandings of representation. We develop this evaluation framework through workshops in South Asia, by studying the 'thick' ways in which people interpret and assign meaning to images of their own cultures. We introduce practices for thicker evaluations of representation that expand the understanding of representation underpinning AI evaluations and by co-constructing metrics with communities, bringing measurement in line with the experiences of communities on the ground.
|
| 6 |
+
|
| 7 |
+
CCS Concepts: $\cdot$ Human-centered computing $\rightarrow$ HCI design and evaluation methods; Empirical studies in HCI; $\cdot$ Applied computing $\rightarrow$ Law, social and behavioral sciences.
|
| 8 |
+
|
| 9 |
+
Additional Key Words and Phrases: Generative AI, Global South, cultural representation, qualitative evaluations, text-to-image
|
| 10 |
+
|
| 11 |
+
# ACM Reference Format:
|
| 12 |
+
|
| 13 |
+
Rida Qadri, Mark Diaz, Ding Wang, and Michael Madaio. 2018. The Case for 'Thick Evaluations' of Cultural Representation in AI. In . ACM, New York, NY, USA, 19 pages. https://doi.org/XXXXXX.XXXXXXXXXX
|
| 14 |
+
|
| 15 |
+
# 1 INTRODUCTION
|
| 16 |
+
|
| 17 |
+
Generative AI (genAI) models, despite their popularity, have been shown to fail at inclusively representing different cultures in generated output, both images [52, 56, 66] and text [46, 60, 63, 76, 83], much like failures of representation of other types of AI systems [e.g., 18, 21, 43, 44, 74, 82]. To address these gaps, prior work has sought to evaluate the cultural representations within AI generated output, but with few exceptions [30, 67], mostly through quantified, metricized approaches to representation such as statistical similarities and benchmark-style scoring [49, 84]. However, the use of these methods presumes that representation is an objective construct with an empirical, definitive ground truth that outputs can be compared against [e.g., 42, 84] [for a critique of ground truth, see 59]. Given limitations of these computational methods, evaluation of representation is reduced to basic recognition or factual generation of artifacts. Even when human feedback on representation is sought, it is solicited through narrow, constrained, quantitative scales from anonymized crowdworkers who often do not have the lived experiences to evaluate nuances of cultural representation of other cultures.
|
| 18 |
+
|
| 19 |
+
However, this approach to measuring representation is in contravention to decades of scholarship in the social sciences that emphasizes the subjective nature of representation, where judgments about representation in visual media are constructed in conversation with the viewer's lived experiences and the broader context within which an image is
|
| 20 |
+
|
| 21 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 22 |
+
|
| 23 |
+
© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM. Manuscript submitted to ACM
|
| 24 |
+
|
| 25 |
+
viewed and published. [33, 34]. The evaluation categories many AI researchers use are also abstracted from the experiences of communities, constructed by AI researchers without engaging with communities to understand what axes of representation might be salient in their contexts. For instance, while many representational evaluations focus on skin tone, skin tone based racial categories might not be salient categories of differentiation in many cultures. Thus, while existing approaches to evaluating representation in AI/ML might be useful for evaluating the accuracy of visual depictions of the physical world, they may not allow us to engage with communities' diverse desires for representations of their social worlds.
|
| 26 |
+
|
| 27 |
+
As generative image models increasingly are used to represent social worlds, what new evaluation approaches are necessary to meaningfully account for the diverse ways that people interpret and evaluate cultural representation in AI?
|
| 28 |
+
|
| 29 |
+
We argue that effectively evaluating cultural representation in AI images requires 'thick evaluations'—a more granular, situated, discursively constructed approach to measurement, steeped in communities' own understanding of appropriate cultural representation. Our analysis draws on the 'thick vs thin' dichotomy introduced by philosopher Gilbert Ryle [70], later taken on by Clifford Geertz [29] and others [38, 51, 61, 69] to characterize descriptions of social worlds. Ryle's classic example of a wink illustrates this distinction: a "thin description" merely describes the physical act of closing one eye by focusing on observable details, while a "thick description" unpacks the social meaning of the act and its significance as a signal to interlocutors. Seen through this framework, emerging methods for evaluating cultural representation in AI images involve 'thin evaluations'; i.e., suited to evaluating the observable aspects of the physical world contained in AI-generated images, but not necessarily the social signals embedded in that physical visuals.
|
| 30 |
+
|
| 31 |
+
To develop a 'thick evaluation' approach for cultural representation in AI images, we turned to the communities represented in images we seek to evaluate, recognizing their expertise and stakes in visual representations of their cultures. We conducted workshops with 37 participants in 3 South Asian countries to study the culturally situated ways people assign meaning to, interpret, and evaluate the representation of their cultures in AI-generated images. We find that 1) people evaluate representation of social worlds not just through a singular category such as accuracy, but through multi-dimensional, fine-grained axes; 2) people's goals for evaluating cultural representation are situated in their social context, negotiated through dialogue with others and in response to broader societal discourse about their cultures; and 3) people deploy situated social knowledge and experiences with social worlds to evaluate varying social meanings of images.
|
| 32 |
+
|
| 33 |
+
These findings demonstrate that thin evaluations alone cannot measure thick constructs like cultural representation and raise critical questions about the adequacy of existing evaluation paradigms for measuring cultural representation in AI images. As evaluation standards are being created for AI evaluations, this is a crucial moment to call for a thicker AI evaluation practice that reflexively interrogates epistemological underpinnings of AI evaluation practices, fundamentally rethinks whose expertise is included in the evaluation of AI systems, and opens up space for more interpretive qualitative methods. We provide pathways for such thickness by showing how AI researchers can interrogate the construct of cultural representation and the ways that that unobservable construct is operationalized in evaluation methods [cf. 39]. We provide empirical data on varied categories of representation people might evaluate images for, which shows the need for co-constructing evaluation methods with members of communities whose culture is being represented to bring measurement in line with their experiences of AI images. We thus help AI practice move towards an ecosystem of 'thick' and 'thin' evaluations, encouraging congruence between the construct being evaluated and the evaluation methods used. As AI technology is entering into the space of cultural production, our work suggests the
|
| 34 |
+
|
| 35 |
+
need to develop new, thicker forms of evaluations of cultural representation, to better reflect how people consume and interpret images of their cultures.
|
| 36 |
+
|
| 37 |
+
# 2 RELATED WORK
|
| 38 |
+
|
| 39 |
+
# 2.1 Evaluating Cultural Representation in AI
|
| 40 |
+
|
| 41 |
+
In order to understand the nature of AI models' failures of representation [e.g., 52, 66], researchers have developed methods for evaluating representational failures (e.g., stereotyping) of AI systems [e.g., 18, 21, 36, 43, 44, 74, 82]. Primarily, these evaluations have focused on language models [e.g., 1, 11, 18, 37, 43, 82], but they have also begun to include image models. For instance, emerging approaches for evaluating representation in images include developing quantified benchmark scores for goodness of representation (e.g., diversity [42]), through methods like creating statistical similarity between generated images and reference images [84], calculating correlations in keywords [49], measuring model frequency of generating stereotypical and offensive images of nationality groups [40], calculating differences in frequency between the most and least common identities referenced in a set of model outputs [48]. Human feedback has been sought via, for instance, using anonymized crowdworkers to score cultural biases in images [7]. For representation in images, these measures often focus on visual demographic diversity characteristics—for instance, Cho et al. [19] use automated skin tone and gender presentation classifiers to evaluate diversity of generated images.
|
| 42 |
+
|
| 43 |
+
However, this line of work represents a technosolutionist, positivist [20] conception of representation, treating it as something objective, stable across time and contexts, and quantifiable. For instance, using skin tone to measure diversity presumes skin tone is a useful proxy for race and race is a universal category of differentiation, which may not be appropriate in all global cultures being represented. In contrast, researchers at the intersection of HCI and FAccT have conducted qualitative evaluations of representation in genAI image models, identifying failures of representation through more participatory and community-centered methods, with evaluators who are from the same culture as the images they are evaluating [31, 56, 67]. However, while these efforts have demonstrated the value of qualitative approaches for evaluating representation in generative image models, they do not examine what is meant by representation as a construct nor do they critically interrogate what the process of interpretive meaning-making that participants in such studies undertake suggests for broader approaches to evaluations of cultural representation in AI.
|
| 44 |
+
|
| 45 |
+
Recent research has shed new light on the complex process of measuring unobservable social constructs such as fairness, as well as the potential harms of reductive measurement approaches. For instance, Selfst et al. [73] critique the definitions of fairness that lead to misleading abstractions in its evaluation. Recognizing the contested nature of the construct of fairness, Smith et al. [77] argue for bringing in stakeholders' expertise for more meaningful and contextually grounded evaluations. Scholars like Jacobs and Wallach [39] advocate for utilizing approaches to measurement modeling from the social sciences to bridge the gap between unobservable constructs and their operationalization in evaluations of fairness in algorithmic systems—a framework we draw on in this paper through our discussion of the construct of cultural representation and how it is operationalized in evaluations.
|
| 46 |
+
|
| 47 |
+
Jacobs and Wallach [39] argue that measurement modeling begins with a clear and robust articulation of the construct being evaluated—in their paper, fairness, and in our paper, cultural representation. However, in prior work evaluating cultural representation in text-to-image models [e.g., 7, 40, 42, 48, 84], the construct of representation is often not interrogated or explicitly defined. Although often left implicit, prior work operationalizes representation in various ways, as lack of bias [81], as geo-cultural similarity [7], and as imbalance [79]. More broadly than for cultural
|
| 48 |
+
|
| 49 |
+
representation in text-to-image models, Chasalow and Levy [17] argue that researchers often fail to make explicit how they conceptualize 'representative' or 'representation.' Such various ways of operationalizing the same construct may lead to evaluations that optimize for disparate goals (e.g., cultural awareness, cultural diversity, avoiding "cultural bias" [49], etc), making it difficult to compare the effectiveness of different approaches [39].
|
| 50 |
+
|
| 51 |
+
# 2.2 Situated Conceptualizations of Representation
|
| 52 |
+
|
| 53 |
+
Decades of scholarship from the humanities have argued that representation in media, particularly visual media, cannot be defined through positivist approaches [e.g., 6, 33, 34]. They argue that representation is not a static goal that can be objectively and computationally evaluated and achieved, but is instead an ongoing interpretive act whose meaning is contested and negotiated. In media studies, visual media, such as paintings, photography, or digital art, do not represent some objective reality but instead communicate multiple shades of meaning about the world, known as polysemy [34]. That is, images have both denotative meanings (i.e., the literal subject of the image) as well as connotative meanings (i.e., the symbolic meaning) the image was designed to convey—or the meaning it invokes in the viewers, which may be different than the intended meaning of the image [6, 34].
|
| 54 |
+
|
| 55 |
+
As a result, for media studies scholars, representation in visual media is a process of meaning-making, not a one-to-one depiction of the world as it is—and thus representation is a site for struggle over meaning [25, 33, 34]. In this way, even a photograph of an event does not objectively convey a singular true meaning [34], but instead conveys a "positional truth" [2] (or multiple such truths, given the polysemic nature of images), shaped by the creators' and viewers' cultural and historical context. Thus, visual understanding itself relies on a slippery relationship between words, images, and the concepts signified by those images [6]. Indeed, as media studies scholars remind us, representation in public culture is a "zone of contestation" [25], characterized by contests over which peoples and perspectives are made visible, how they are portrayed (and by whom), contests which are shaped by who has the power to control narratives of representation. Thus, in this paper, we explore how people evaluate representation in AI-generated images and what this representation means to them in their culture.
|
| 56 |
+
|
| 57 |
+
# 2.3 Thick vs Thin Modes of Understanding the Social
|
| 58 |
+
|
| 59 |
+
The 'thick/thin' dichotomy, originating in Gilbert Ryle's philosophical work [70], has become a cornerstone of anthropological and social scientific inquiry. He argued that understanding human behavior requires moving beyond mere observation of physical details to interpretation, recognizing the "many-layered sandwich" of meaning embedded within even seemingly simple actions [70]. This concept was further elaborated upon by Clifford Geertz, who emphasized the importance of thick description in ethnographic research [29]. Geertz argued that researchers must not only describe actions but also interpret them within their social or cultural context. A wink, for instance, could signify a conspiratorial gesture, a flirt, a parody, or a mere rehearsal, each carrying distinct meanings depending on the social context.
|
| 60 |
+
|
| 61 |
+
Sherry Ortner, in a chapter on "thick resistance," highlighted the limitations of thin approaches in capturing the complexities of social life, which require the "richness, texture, and detail," of thickness [61, 62]. She argued that thin descriptions often overlook the "internal politics of dominated groups, thin on the cultural richness of those groups, thin on the subjectivity—the intentions, desires, fears, projects—of the actors engaged in these dramas" [61]. Ortner advocated for thick descriptions that delve into the "production, circulation, and consumption" of cultural practices, recognizing their dynamic interplay with social and political realities. While the value of thickness in capturing the nuances of
|
| 62 |
+
|
| 63 |
+
social phenomena has been widely acknowledged, scholars have also recognized the importance of thinness in discerning patterns and broader social structures. Annelise Riles [69], in her work on multi-sited ethnography, explored the challenges of achieving thick description when studying global phenomena dispersed across diverse cultures, calling attention to "the limits of thickness as a disciplinary trope" and to pay more attention to what the use of "thin composition" could yield. Anthropologist John Jackson similarly emphasized the value of thin description in providing a "baseline empiricism" and a starting point for social investigation [38]. Thus, while thick and thin both have their place in social analysis, thinness can only get you so far. In this paper, we draw on the thick/thin dichotomy to provide a framework for navigating the complexities of social inquiry when considering multiple approaches to evaluating cultural representation in AI images.
|
| 64 |
+
|
| 65 |
+
# 3 METHODS
|
| 66 |
+
|
| 67 |
+
In this research, we treat the evaluation process as an object of study itself. We engaged with participants from three South Asian countries (Sri Lanka, Pakistan, and India) through a series of individual online evaluations and collective in-person workshops. These methods were chosen to allow us to move beyond simplistic evaluation metrics; we instead sought to capture in-depth, qualitative reflections and discussions participants have as they interpret and evaluate AI-generated images. Given the discursive and collective nature of representation, we chose group workshops instead of individual interviews. To preserve participants' individual perspectives, we included an individual evaluation task prior to the workshops. Additionally, to ensure the annotation process reflected local cultural norms—including platform choice, framing of questions, and participant composition—we collaborated with local research partners in each region.
|
| 68 |
+
|
| 69 |
+
# 3.1 Participants and Sites
|
| 70 |
+
|
| 71 |
+
We chose our research sites for two main reasons. First, we aim to contribute to a growing body of scholarship that seeks to expand the evaluation of AI beyond Western-centric perspectives [41, 57, 66, 71]. Second, we intend to move beyond the common "USA vs. India" dichotomy often found in comparative analyses including the Global South (e.g., [45, 68]). To this end, we focused on three South Asian countries: Sri Lanka, Pakistan, and India. They were selected due to the shared cultural histories, while also recognizing the vast diversity within South Asia. We recruited participants through collaborations with local research partners deeply embedded in their communities. Our partners in Sri Lanka and Pakistan were university researchers with extensive research experience with marginalized local communities, while our partner in India was a research manager experienced in conducting workshops and focus groups. Recruitment began with emails to targeted lists, including local institutions, previous research participants that worked with those partners, and the partners' professional networks. Interested individuals completed a screening form detailing demographics and background.
|
| 72 |
+
|
| 73 |
+
Rather than attempting to exhaustively represent any single culture, our aim was to capture nuanced diversity and similarities about how people from different cultures interpret and assign meaning to their cultural representation. Thus our selection criteria took a purposive sampling approach [10], aiming to identify a diverse set of individuals with various intersecting identities relevant to the local context. We were not prescriptive regarding the exact numbers and form of diversity in our sample, but instead we relied on our partners to guide us towards axes of marginalization and social differentiation that might influence socio-cultural experiences in the country. These led to a sample that was highly contextualized in its diversity but not necessarily exhaustive of all forms of diversity. Minority and diversity here are interpreted in the context of the respective country, for instance, Hinduism is not a minority religion in India, but it is in Sri Lanka and Pakistan, while being Muslim is not a minority in Pakistan, but it is in Sri Lanka. 15 participants per
|
| 74 |
+
|
| 75 |
+
country were selected. Ultimately, 11 participants each from Sri Lanka and Pakistan and 15 from India (spread across two workshops) took part. Participants' backgrounds are detailed in the appendix.
|
| 76 |
+
|
| 77 |
+
# 3.2 Study Activities
|
| 78 |
+
|
| 79 |
+
Our study comprised three parts: a pre-workshop survey to elicit prompts for culturally relevant images, a pre-workshop evaluation task to evaluate those images, and a workshop to discuss cultural representation in AI images.
|
| 80 |
+
|
| 81 |
+
3.2.1 Pre-workshop surveys. We asked each participant to create prompts for AI image generation, drawing on their unique cultural experiences by mixing salient identity markers (e.g., "Punjabi woman"), regional landmark or cultural event, resulting in prompts such as "a group of Punjabi women in front of Wazir Khan Mosque in Lahore." This ensured both culturally-relevant and diverse prompts, and positioned participants as experts in evaluating the resulting images.
|
| 82 |
+
|
| 83 |
+
3.2.2 Individual evaluation task. We wanted to give each participant space to individually evaluate the images about their cultures generated by their prompts, before joining the workshop. Thus, after we gathered the prompts from the participants, one of the authors generated images based on each participant's prompts and compiled a personal collection of prompt and images for each person. Participants were invited to reflect on the image and leave commentary on whether the image was a good representation of their cultural experience. We purposefully did not define "good" or "representation," since those were the terms we wanted participants to define for us in the workshop.
|
| 84 |
+
|
| 85 |
+
3.2.3 Workshop. To facilitate collective reflection and discussion on cultural representation in AI images, we convened four-hour workshops across three sites: two in-person workshops in Pakistan and Sri Lanka, and two online workshops in India where logistical constraints necessitated a virtual format. Online workshops were held via Google Meet. Each workshop lasted roughly four hours and had three sections. First, participants engaged in a reflective discussion on their representational goals, drawing from the image-prompt pairs they had evaluated before the workshop. Next, they participated in prompting exercises, iteratively refining prompts through multi-turn interactions with a generative AI model capable of text-to-text, image-to-text, image-to-image, and text-to-image tasks. Finally, participants shared their experiences with image generation and representation in an evaluation discussion. The pre-workshop evaluation task complemented the workshop by providing participants with space for individual reflection, priming them for more informed group discussions. This combination allowed us to explore both individual interpretations and collective understandings of cultural representation, positioning participants as experts in their cultural contexts. Individual evaluations captured participants' diverse perspectives on AI-generated cultural representation, while the workshops encouraged dialogue to uncover collective interpretations and contestations in meaning-making.
|
| 86 |
+
|
| 87 |
+
# 3.3 Data Analysis
|
| 88 |
+
|
| 89 |
+
All workshops were recorded and transcribed, and where required, translated by authors or local partners. All four authors participated in data preparation (e.g., data cleaning and transcribing) and analysis. We took a reflexive thematic analysis approach to analyze the workshop data, following Braun and Clarke [12, 13]—this includes the transcripts as well as participants' responses to the individual pre-workshop evaluation task. We met regularly as a group throughout the analysis period to inductively generate themes that captured patterns of shared meaning across workshop sections and workshops [13]. We used the digital whiteboard Mural to iteratively cluster the codes into larger themes, discussing the relationship between codes and themes as we went and resolving any disagreements in synchronous
|
| 90 |
+
|
| 91 |
+
group discussions. To attribute the quote we used in the following section to the participants, we have used anonymized participant identifiers: country code (IN for India, PK for Pakistan, and LK for Sri Lanka) concatenated with a number.
|
| 92 |
+
|
| 93 |
+
# 4 FINDINGS
|
| 94 |
+
|
| 95 |
+
In section 4.1, we show the granular, multi-dimensional categories of cultural representation people draw on when evaluating images, complicating the singular constructs of representation in thin evaluations. In section 4.2, we explore how participants' goals for appropriate cultural representation were deeply situated within their specific cultural contexts, dynamically change over time, and were discursively constructed through collective dialogues. In section 4.3, we examine the situated forms of social knowledge that underpinned these evaluations, emphasizing the importance of thick evaluations that draw on people's lived experiences and cultural expertise.
|
| 96 |
+
|
| 97 |
+
# 4.1 People evaluate cultural representation through multi-dimensional categories
|
| 98 |
+
|
| 99 |
+
When asked to evaluate cultural representativeness of AI-generated images, our participants constructed multiple dimensions of representation to evaluate, demonstrating the need for more fine-grained categories of cultural representation than singular constructs like accuracy of representation [cf. 84]. We distill these into five dimensions, which point to the richer categories that thick evaluations could evaluate for: incorrectness (the accuracy of depicted physical objects—the closest to existing approaches [e.g., 84]), missingness (absence of iconic and expected cultural elements), specificity (whether the subject of the image was specific to their particular sub-culture), coherence (whether all elements of an image were appropriate given cultural and social norms), and connotation (the symbolic meanings and interpretations associated with an image). These expanded categories of cultural representation demonstrate the need for constructs that go beyond the physical world to capture the social worlds of a culture.
|
| 100 |
+
|
| 101 |
+
4.1.1 Incorrectness. Participants evaluated the (in)correct representation of specific artifacts that have a singular corresponding physical existence outside the image, such as a building or a landmark or a local landscapes. This category has the most congruence with existing thin evaluations, as it seeks to evaluate depictions of artifacts with respect to their existence, and can often be answered with a binary or quantifiable metric since the 'ground truth' is more easily determinable than the following dimensions of representation. For instance, participants talked about how the depiction of the Lotus Tower in Sri Lanka can be incorrect or correct since there is only one Lotus Tower with a very particular form. As such, when attempting to generate images, Sri Lankan participants reflected on how they were not able to produce correct images of the Lotus Tower, a famous landmark and the tallest structure in Sri Lanka. However while incorrectness was relevant for evaluating objects with clear counterparts in the physical world, it was not used to assess other forms of cultural representation of the social world, indicating that evaluations of cultural representation encompass more than just factual correctness.
|
| 102 |
+
4.1.2 Missingness. Similarly, participants evaluated whether images lacked the expected cultural elements that they felt contributed to a place's identity—what we refer to as missingness [cf. "erasure:" 43, 65, 66, 74]. Participants felt that the absence of common features and structures in images meant to represent their culture meant that a given culture or location's essence or identity would not be communicated. Thus LK-5, a participant from Sri Lanka, noted scenes that are commonplace in Sri Lanka that were curiously missing across a number of generated images: "iconic places were never represented, like post offices and railway stations. Food items in Sri Lanka were never represented. Beaches were never reflected in any images, [but] we are an island." In a similar vein, LK-11 pointed out that even nationally significant elements of the Sri Lankan landscape—such as visually distinct species of trees—were absent, but should be
|
| 103 |
+
|
| 104 |
+
represented: "so it should reflect more of our national items if we were to ask it to generate things about Sri Lanka, such as the Naa tree, Bo tree, national bird, animals." This sense of missingness was closely tied to participants' ability to distinguish one place from another and capture its unique cultural character—or its iconicity [cf. 5].
|
| 105 |
+
|
| 106 |
+
4.1.3 Specificity. Participants evaluated the specificity of cultural representation-i.e., whether the depiction of cultural elements was specific to their contexts—especially for elements indicative of intersectional identities or subcultures, or cases where cultural artifacts were shared across cultures (but manifested in slightly different ways). For example, when evaluating images of women wearing saris, participants identified the regional nuances of saris that make them specific to their culture: "I would say it's Marathi. A Marathi sari is a little different. So, the saris also have very different variations. Right. So these saris, particularly makes me feel like it's from the Northern region. Like that is more, I think, mainstream, India" (IN-3). Evaluating for cultural specificity meant that participants paid close attention to nuanced details that distinguished the cultural practices of their social worlds from others. For other participants, when evaluating cultural representation, it was important to point out that a style of dress was more common in another part of the world than theirs: "this has more Arab cultural influence, like wearing long dresses and wearing hijabs" (PK-8). Similar to missingness, evaluations of specificity were crucial for accurately conveying the unique aspects of a particular culture, bringing with them layers of meaning and symbolism from their social worlds.
|
| 107 |
+
|
| 108 |
+
4.1.4 Coherence. Participants also evaluated the coherence of the images of their culture—considering the extent to which various elements in an image were in alignment about which culture they were representing and in what ways. They evaluated (in)coherence in multiple ways, such as evaluating images for unrealistic combinations of cultural elements, misaligned behaviors with social and cultural norms, and anachronistic elements of their culture. Participants identified images that contained a mishmash of cultural signifiers that did not typically occur together, such as merging cultural details from different (sub)regions, subcultures, religions, or nationalities into one image: "So the attire is of the people of Tamil. And the lanterns look Chinese, and no symbol of Sinhal[ese] new year" (LK-10). Other participants in this workshop elaborated on the incoherence, pointing out that: "the kids are wearing what normally Sri Lankan kids [are] wearing. But she is also having the [bindhi], and the elder person is kind of Sri Lankan, the mom is kind of European again. And the other kid is having totally different [clothing]—a sari on a male kid I think? So I kind of gave it a two out of five." (LK-1)
|
| 109 |
+
|
| 110 |
+
(In)coherence was also about (mis)alignments of the perceived social behaviour in an image, where people were behaving or engaging in activities that did not align with participants' own experiences or perceptions of what was considered appropriate or logical to do in their cultures, given local social and cultural norms:
|
| 111 |
+
|
| 112 |
+
- "I've never seen women wearing jewelry in [the] Press Club. Women go there for protest, [but they are] looking like they are here for a picnic." [PK-8]
|
| 113 |
+
- "Even the second picture, that is also wrong. That is shown like people are boating in that place. It is never done in a religious place like a gurudwara. This is never done." [IN-5]
|
| 114 |
+
- "Most people go to such tombs for tourism, but it seems there's a religious ceremony or speech or proselytizing happening here in these images [like] 'tableegh,' which does not happen in these tombs..." [PK-9]
|
| 115 |
+
|
| 116 |
+
Another form of (in)coherence was temporal, when elements from different time periods were placed together, creating anachronisms. IN-5 noted that some visuals generated in images were correct once, but no longer so: "No one covers their head [now]. My mother used to cover her head. My granny used to cover her head, but I have not seen any woman who's living in a village or in a rural area [do that] nowadays. It was a part of Punjabi culture, but not anymore."
|
| 117 |
+
|
| 118 |
+
4.1.5 Connotations. Finally, participants evaluated the connotations evoked by the images, recognizing that representation in visual media is not just depiction, but also a communicative act. While certain elements of images may be accurate in the sense of potentially occurring in the physical world, participants discussed how those images evoked connotations of the social world that they felt were not appropriate representations of their culture. In the workshop in Pakistan, participants noted that depictions of beards in Pakistani images evoked Western stereotypes about Pakistani, and they raised concerns about promoting narrow ideas of Pakistani culture: "Everyone has beards in these photos. Many people in Pakistan don't have a beard. Even in this [workshop] group... this brother does not have a beard, even my beard is so small" (PK-11). Similarly, participants interpreted connotations of poverty from the representation of particular cultural artifacts, which were not inherent to the denotations of the visual images. For example, showing women in saris in India was associated with "a typical traditional thinking about the Indian women since long ancient times." In some evaluations, participants explicitly expressed frustration as to these images being a signal of 'them' seeing 'us' in a particular light: "In none of the pictures we see women. I don't know what they think of us. Like we are from the 19th century and live 200 years ago" (PK-11).
|
| 119 |
+
|
| 120 |
+
# 4.2 Goals for cultural representation are situated, dynamic, and negotiated
|
| 121 |
+
|
| 122 |
+
4.2.1 Goals for representation are situated and dynamic. The previous section showed the more granular categories needed to evaluate the construct of cultural representation in AI images. In this section, we identify how what is considered the goal when evaluating those constructs of cultural representation are developed through a dynamic conversation between the participants about their lived experiences and broader messaging about their culture. Participants emphasized that there was no 'objective' ideal for representation, but instead what constitutes meaningful cultural representation varies both across and within cultures and contexts. For instance, diversity of representation is one example of a representational goal generative models might aim for, but one participant noted how identities considered diverse in other contexts were, in fact, the dominant identities in her own: "what might be diversity in the First World might actually be monotony in my area... what is diversity in a First World context or in the rest of India is the dominant culture where I come from" (IN-15). For IN-15, when the model attempted to achieve a decontextualized form of diversity from a Western lens, it was inadvertently just depicting dominant cultures: "In the First World, there are attempts at sort of including Muslim representations and all of that... But for example when I give a prompt as a Kashmiri woman, Muslims are the majority. Here [in Kashmir] diversity would look like something else... So here I don't want just women with hijabs represented. I also want Muslims without hijabs and non-Muslim women represented" (IN-15). Thus, while representational diversity was important for participants, there was no one-size-fits all approach to diversity, as it needed to be contextualized within the social worlds of the users to be meaningful.
|
| 123 |
+
|
| 124 |
+
Representational goals dynamically shifted not just across countries but also within a country. For instance, IN10 from India stated the difficulty of evaluating whether the model had successfully represented a holiday, because people celebrated the holiday so differently in different regions: "Even in the South, Ganesh chaturthi is celebrated, but the scale of it is very different. So if you look at Mumbai has very massive celebration during Ganesh chaturthi. I don't think (the celebration in) Bangalore is as big and I think some families would do a quiet celebration" (IN-10). Participants also highlighted how even for the same person, visual representations of their own behavior would have to vary based on differences in the context they were in—such as, for instance, urban and rural, public and private, and variance in their individual adherence to cultural norms. To exemplify this, participant IN-3 mentioned how their attire would change if they were in rural India vs. urban India, saying "I would be wearing this in my village, yes [but not in the city]" (IN-3). Similarly, looking at a generated image of a woman in traditional Pakistani clothes, PK-7 noted that what a woman
|
| 125 |
+
|
| 126 |
+
in Pakistan might wear in the bazaar would be different from what she wore elsewhere: "We do see women around us wearing Western clothes, but in the bazaar you would see [women] dressed like this [in traditional clothes], so I don't expect women [in generated images] wearing a crop top, even though that is very me."
|
| 127 |
+
|
| 128 |
+
Social worlds themselves, and thus their ideal representations, also dynamically change over time, constantly evolving in ways that are not as easy to evaluate as changes to the purely physical world. Participants noted that some aspects of cultural representation that models had generated could have been considered representative at one time, but were no longer the case, as the social world it was representing was evolving: "[This] seems like Pakistan from the 70s. Pakistan has evolved. This is a very old Pakistan. We have a Western touch now also" (PK-11). These issues with representation across time included attire that participants thought was outdated, architectural and building styles that felt like they were from another time, and modes of transport that did not exist anymore.
|
| 129 |
+
|
| 130 |
+
4.2.2 Goals for representation are constructed through discursive negotiation. Participants' goals for and understanding of cultural representation was being actively negotiated in conversation with broader social narratives they had encountered in other visual media and through dialogue with other participants during the workshop. Even seemingly objective judgements of (in)correctness were contextualized by participants within perceived messaging they had encountered outside the specific image at hand. Some participants developed goals for representation in response to stereotypes about their cultures in the media—such as perceptions that South Asian cultures were undeveloped (e.g., PK-11), leading them to want generated images that showed more modernity. Or, when evaluating the failure of models to generate important landmarks from their cultures, participants were contextualizing these failures within perceived general power differentials between Global North and Global South that they had experienced.
|
| 131 |
+
|
| 132 |
+
Such representational goals were also reflected upon and negotiated in dialogue with other participants. One example of such negotiation was a debate that played out in one workshop on the tensions between combating stereotypes with what one participant referred to as 'very clean' positive depictions compared to what another participant termed as 'realism.' One participant asked their group about the desire for positive representations of their cityscapes: "What do we expect out of AI? Do we want [the images] to wash away our sins? Not have electricity poles? Do we want [the images] to be very clean?" (PK-4). This participant then went on to say they would not want a positive representation at all times because it would not be an accurate image. One participant echoed this conclusion, noting that "there is a tension between creativity and artistic expression and realism. If you ask it to be too real, you are putting restrictions... on AI artistic capability" (PK-2).
|
| 133 |
+
|
| 134 |
+
This tension between goals for representations to combat stereotypes or achieve realism emerged concretely in images that were interpreted as depicting a less modern version of their culture. For instance, in generated images of Pakistani women, some participants noted that women wearing traditional clothes and not more 'Western clothes' would convey the impression that Pakistan was not a modern place. However, other participants felt this image still reflected some women, even if it did not represent everyone, and thus was important to retain. For instance, PK-7 argued "You have to represent the culture as opposed to a small minority of women. To which PK-11 responded, "But in all pictures [of Pakistani women] we are seeing the same culture. One of the four images could be of a woman from a modern society." Participants also acknowledged that there may not be consensus on what constituted desired cultural representation, but instead, people's goals for representation may be based on the communicative intentions of the group being asked. For instance, PK-1 noted "country officials or ambassadors... want the best images of Pakistan, they will want [images to] be whitewashed, but we want to have stray cats and [electricity] poles" (PK-1).
|
| 135 |
+
|
| 136 |
+
While not every dialogue reached consensus, the act of dialogue helped shape their respective goals for representation. Participants noted the importance of creating spaces for discursive engagement in the evaluation of cultural representation, allowing for the emergence of diverse perspectives and the co-construction of meaning. As one participant noted, "If you gave the same image to all five of us, we would be pointing out different elements of it... So, [in] a group discussion, you can do it more fruitfully than an individual even with a guideline. And when we move on to the next time, we will all have a more keen eye on it" (LK-11). This suggests that approaches to evaluating cultural representation should engage with the discursively constructed and negotiated goals for what appropriate representation looks like.
|
| 137 |
+
|
| 138 |
+
# 4.3 People draw on situated knowledge and experiences to evaluate representation
|
| 139 |
+
|
| 140 |
+
In this section, we examine the specific types of knowledge participants drew upon for different evaluative categories outlined in Section 4.1. As participants moved from evaluating more empirically demonstrable aspects of the physical world (e.g., is this the Lotus Tower?) to more culturally-situated judgments of the relationship between the physical and social worlds (e.g., is this scene a positive representative of my city?), participants relied on diverse forms of knowledge and deep experience of their social worlds. For instance, evaluations of incorrectness required less cultural and social knowledge, becoming almost a form of pattern recognition. However, other forms of evaluation such as coherence or connotation required more specific knowledge, since they sought to evaluate not just thin concepts of physicality but thicker concepts of sociality. Understanding where social realities were (in)coherently displayed needed an understanding of appropriate behaviors and cultural norms that are often unstated (e.g., what lakes you would boat in and which you would not, or which generation of women might not be covering their head anymore). Even when visual elements of the physical world were being evaluated, they were often linked to social cues relying on hyper-specific visual elements: from the specific type of jewelry you would wear at particular celebrations to recognizing indigenous trees, local hairstyles, how people sit in a particular space, or even the birds that fly above a particular mosque and the potholes that litter a street.
|
| 141 |
+
|
| 142 |
+
Participants themselves explicitly underscored the need for cultural knowledge gained through lived experience within particular social worlds to be able to evaluate the representation of different elements of those social worlds. Participants noted that, given the rich and complex relationship between cultural artifacts and social worlds, it would be difficult for foreigners to understand the connotations or relationship between specific types of jewelry, clothing, food and cultural traditions like religious festivals. This knowledge was also often innate and implicit, and was not necessarily able to be made explicit in an evaluation rubric. When prompted to explain why a certain representation was wrong or problematic or unsatisfactory, people struggled to articulate exactly why what they were seeing was inaccurate or contextually inappropriate, saying expressions like, "there's an energy" (PK-7) or "there's a vibe" (IN-3, IN-14, LK-1). Participants also noted that tourists or other outsiders may have a preconceived notion of their countries that don't match the local reality. They pointed out that relying on people to evaluate cultural representation of images from cultures other than their own would lead to worse evaluations, as outsiders might fail to recognize harmful stereotypes embedded in images, instead mistaking them for accurate portrayals.
|
| 143 |
+
|
| 144 |
+
This recognition of outsiders' limitations also prompted participants to reflect on their own inability to judge the same axes of representation for other cultures. "If someone asks me to evaluate Africa, I already have a bias of Africa and I will evaluate accordingly" (PK-11). While participants emphasized the importance of lived experience for accurate cultural representation, they also recognized the need for this experience to be granular and specific to intra-country social worlds. Even being from the same country, participants admitted that their personal experiences with other subcultures within their own country were often limited. Many shared examples of cultural norms and knowledge that they
|
| 145 |
+
|
| 146 |
+
were not previously familiar with before this workshop. For instance, in the Pakistan workshop, participants discovered different ways of celebrating the holiday of Nauroz, surprised at their own discoveries in conversation with each other:
|
| 147 |
+
|
| 148 |
+
PK-10: In Skardu we celebrate it differently. We have contests with eggs. The one whose egg breaks loses.
|
| 149 |
+
|
| 150 |
+
We have 10 day long celebrations.
|
| 151 |
+
|
| 152 |
+
PK-6: We celebrate it very differently. We have people gathering, people playing music, playing sports. We have different instruments. Like tambourine or rubab.
|
| 153 |
+
|
| 154 |
+
PK-7: I've never heard of Nauroz in Lahore or Karachi.
|
| 155 |
+
|
| 156 |
+
These findings highlight the deeply situated cultural and social knowledge needed for evaluating the desired or appropriate cultural representation of social worlds in generated images.
|
| 157 |
+
|
| 158 |
+
# 5 DISCUSSION
|
| 159 |
+
|
| 160 |
+
In this discussion section, we reflect on three implications of our findings for the enterprise of AI evaluations: (1) the need to reconsider the construct of representation underpinning AI evaluations and creating congruence between concept and measurement, (2) the importance of co-constructing methods for evaluating representation with communities, and (3) the tensions inherent in attempts to "thicken" evaluation practices for cultural representation in AI.
|
| 161 |
+
|
| 162 |
+
# 5.1 Thick Methods for Thick Constructs of Cultural Representation
|
| 163 |
+
|
| 164 |
+
Our findings highlight the urgent need for a critical re-evaluation of how we conceptualize and operationalize representation in AI evaluations. Instead of representation being a singular fixed concept or objective truth, participants unpack it in situated and negotiated ways into multiple granular categories. Yet, as Chasalow and Levy [17] argue, the conceptualization of representation is often taken for granted in AI evaluations, as it is a "suitcase word" encompassing a multitude of meanings and lineages, including political, statistical, and here, cultural. Our participants empirically demonstrated that that what gets bundled into this suitcase word are multiple unobservable constructs for cultural representation, each requiring different approaches to measurement and metrics. Our findings thus suggest that AI evaluations typically conflate two constructs of representation in one—one that is thinner, which focuses on empirical accuracy of physical worlds, and one that is thicker, such as "coherence," which reflects the internal social consistency and plausibility of a depicted scene.
|
| 165 |
+
|
| 166 |
+
While thin evaluations may be suitable for assessing the correspondence of "thinner" constructs like factual incorrectness with a base empirical reality or ground truth (e.g., landmarks), thicker constructs, such as those related to cultural norms, social relations, and power dynamics, necessitate thick evaluations that can capture the deeper layers of social meaning embedded within images. We saw in our findings that as the representational category became "thicker" (e.g., moving from "incorrectness" to "connotation"), the evaluation process became more subjective and reliant on shared cultural understanding. Yet the field of AI is increasingly using thin metrics to evaluate thick concepts-i.e., using metrics (such as accuracy) and benchmarks that are better suited for evaluations of the physical world to instead evaluate the representation of social worlds. We thus advocate for an evaluation ecosystem that creates congruence between the construct (i.e., cultural representation, or its dimensions) and the measurement method, which complements thin evaluations with thick evaluations. In the words of Jackson, "[T]hin description is the necessary starting point for social investigation but not nearly enough all by itself" [38], or, as Ryle argues, "thick description is a many layered sandwich, of which only the bottom slice is catered for by the thinnest description" [70].
|
| 167 |
+
|
| 168 |
+
# 5.2 Co-Constructing Evaluations of AI's Cultural Representation with Communities
|
| 169 |
+
|
| 170 |
+
Our study shows that there is no objective conceptualization of representation to optimize for in images. What should ideally appear when a model is prompted for "Indian," "Pakistani," or "American" is a culturally- and contextually-contingent choice. Thus, most representational evaluations require evaluators to make interpretations, involving culturally-situated judgment, rooted in social experience. Who gets to decide what representation is and how a culture should be depicted? Who has the power to shape meaning in the struggle over meaning-making that is representation in media [cf. 2, 25, 34]? Currently, the power of this choice lies with AI researchers and anonymous evaluators who often have limited experience with the social worlds subject to evaluation. We argue, as with AI fairness work, that we must "bring the people back in" to AI measurement of representation [24, 28], to make it more contextual and remove translation gaps between abstract metrics and the actual risks and harms they seek to measure [39].
|
| 171 |
+
|
| 172 |
+
Fairness literature has argued for the importance of including stakeholder perspectives in the design of fairness metrics [77]. While prior work in AI has focused on qualitative evaluations of representation, they have primarily sought human participation in evaluations to compare differences of judgment across groups and against gold standards or to distill people's own definitions of representation. These approaches center on stakeholders' judgments with less focus on differences in the underlying evaluative processes that inform those judgments. As an example, disagreement itself is a robust area of research in NLP and AI, with much focus on the sociocultural influences that shape human judgment [23, 26]. Studying the culturally-situated ways that people evaluate and understand representation in our study allowed us to suggest metrics and measures of representation that are more in line with the ways people who actually experience and consume images will measure their own representation. Thus, our findings echo researchers like Smith et al. [77] who call for AI measurement to come in line with the actual experiences of people on the ground—not just the experiences of researchers—and integrate their experiences, knowledge, and expertise in the measurement process. The aim is not to simply apply the same metrics but to co-construct metrics and measures with them. Integration of expertise for representation must go beyond large-scale, globally-deployed surveys often used by AI researchers. This approach offers only a very limited mechanism to integrate cultural knowledge from particular communities into evaluative frameworks, because the metrics themselves are developed by AI researchers. As prior work argues, annotation evaluation pipelines are structured to align resulting data with what data requesters define to be ground truth [16, 55, 64]. As a result, evaluators must constrain their assessments within the feedback categories set by researchers. Evaluations outside of these constraints for evaluation tasks are conflated with other concerns, or dismissed altogether.
|
| 173 |
+
|
| 174 |
+
Building on work on subjectivity in annotation [15, 23] and work in HCI that contests the idea of singular ground truth [32, 59], our study suggests opportunities for developing evaluation methods for representation that leverage discursivity and deliberation [cf. 75]. Bergman et al. [8] have highlighted the value of discursive deliberation in eliciting individuals' justifications for the views they espouse. To do this, evaluative methods might actively encourage collective conversation and disagreement among evaluators through interpretive methods like workshops in addition to thin methods like benchmarks. In our study, discursive evaluation helped to fill knowledge gaps, such as whether people in different regions celebrated certain occasions in the same way. However, our discursive evaluations also revealed and helped to resolve disagreements that were ontological in nature, about the very nature of representation. Ontological disagreement here was not simply a matter of knowledge gaps, but rather fundamental differences in understanding what a depiction represents and what it should represent. Discursive evaluation processes both shed light on ontological disagreements and shift understandings in real time. Thus, dialogue and discussion can change or
|
| 175 |
+
|
| 176 |
+
reaffirm evaluative judgments evaluators might have held before-changing the nature of feedback evaluators might give in individual annotation tasks.
|
| 177 |
+
|
| 178 |
+
# 5.3 Making Space for Thickness in AI Practice
|
| 179 |
+
|
| 180 |
+
As we build the case for thick evaluations for cultural representations, we also reflect on the epistemological hurdles that will emerge if thick evaluations were to be adopted by AI developers. For one, thick evaluations are at odds with values like generalization and scale in computing [9, 35]. Thick evaluations demand a deep engagement with specific cultural contexts, requiring time and investment, and acknowledging the diversity of interpretations and meanings associated with representation. They embrace subjectivity and multiple interpretations, acknowledging that there is no single "correct" way to represent a social world. This all limits their scalability and generalizability—as a result, they also empirically demonstrate the limits of scale and generalizability. As Tsing reminds us, the multiplicity of social worlds resists attempts at hegemonic structuring [80].
|
| 181 |
+
|
| 182 |
+
For representational evaluations to become 'thicker,' AI practitioners and researchers will also have to stop drawing on the epistemic cultures of positivism and techno-solutionism prevalent in ML and computing [22, 50, 58]. These epistemologies in AI have been extensively critiqued in AI fairness. For instance, annotation practices rarely account for social histories of race and gender [72], and more broadly, algorithms fail to capture social nuances and lived realities [e.g., 14, 54]. The critiques of thin-ness our findings proffer are also in line with critiques of positivism in the social sciences. For instance, Babones argues against treating humans and society as a "knowable objective reality, represented reasonably well by the variables" [4] and Kitchin argues this work produces social analysis that "is reductionist, functionalist and ignores the effects of culture, politics, policy, governance and capital" [47]. In this paper, we demonstrate how current approaches to evaluating representation thus ignore extensive critiques of quantitative measurement in the social sciences, which caution against attempting to formalize fundamentally social processes, like representation, into quantifiable metrics or objective functions that assume one base empirical reality [78]. Our findings demonstrate empirically how limited the prevailing paradigms of objectivity and quantifiability are when faced with evaluating social worlds, and how much richer AI evaluations could be if we embraced the pluralism of thick evaluations, with their emphasis on situated context, subjectivity, and lived experiences. Such a move would require de-centering objectivity, acknowledging the situatedness of evaluation and cultural representation.
|
| 183 |
+
|
| 184 |
+
Our findings also show the need for a fundamental shift in how we understand and value knowledge in AI practice. The practice of thickness needs communities' participation in co-constructing evaluations. That is why in this paper we draw on interpretive, dialogue-based methods to capture the discursive processes involved in negotiations of the meaning of images. Our methods also recognized the expertise brought by local experts and stakeholders who could partner with us to create a sample situated in the diverse social worlds of participants. However, adopting this practice would require AI researchers to recognize and embrace pluralistic ways of knowing and understanding the world. Integrating qualitative, situated knowledge into the predominantly quantitative realm of AI necessitates a shift in mental models and a confrontation with deeply ingrained assumptions about considerations of expertise [cf. 27, 53].
|
| 185 |
+
|
| 186 |
+
We take inspiration from calls like those of Agre who called for a critical technical practice around computing [3], to call for a practice of thickness not just in measurement but also in computing writ large. This would require questioning the singular focus on scale and generalization, embracing qualitative methods and epistemologies, actively co-creating AI with communities and reflexively interrogating the epistemological values embedded in our evaluation practices.
|
| 187 |
+
|
| 188 |
+
# 6 CONCLUSION
|
| 189 |
+
|
| 190 |
+
As AI technologies are being proposed to engage in cultural production, and research continues to highlight the failures of AI to adequately represent all cultures, our work suggests the need to develop new, thicker forms of evaluations of cultural representation, to better reflect how people consume and interpret images of their cultures. Thus, in this paper, we introduce a framework for thick evaluations of cultural representation, developed in conversation with participants in South Asia, that can move the field beyond existing reductive ideals of representation. Showing that representation is a process of meaning-making through the interaction between image, viewer and context, we argue that measurement of representation in AI needs to encompass more than just factual correctness, creating practices that bridge the disconnect between constructing representation as a technical goal and understanding it as a social goal. Developing congruence between the thickness of representation as a concept and thickness of the evaluation method will ensure AI practitioners avoid creating brittle, impoverished evaluations and mitigations for representation in the design of AI systems, and, ideally, lead towards AI systems that are better able to represent the plurality of social worlds of diverse cultures.
|
| 191 |
+
|
| 192 |
+
# REFERENCES
|
| 193 |
+
|
| 194 |
+
[1] Mohsen Abbasi, Sorelle A Friedler, Carlos Scheidegger, and Suresh Venkatasubramanian. 2019. Fairness in representation: quantifying stereotyping as a representational harm. In Proceedings of the 2019 SIAM International Conference on Data Mining. SIAM, 801-809.
|
| 195 |
+
[2] Janet L Abu-Lughod. 1991. Before European hegemony: the world system AD 1250-1350. Oxford University Press, USA.
|
| 196 |
+
[3] Philip E Agre. 2014. Toward a critical technical practice: Lessons learned in trying to reform AI. In Social science, technical systems, and cooperative work. Psychology Press, 131-157.
|
| 197 |
+
[4] Salvatore Babones. 2016. Interpretive quantitative methods for the social sciences. Sociology 50, 3 (2016), 453-469.
|
| 198 |
+
[5] Moshe Barasch and Luci Serrano. 1992. Icon: Studies in the History of an Idea. NYU Press.
|
| 199 |
+
[6] Roland Barthes. 1999. Rhetoric of the Image. Visual culture: The reader (1999), 33-40.
|
| 200 |
+
[7] Abipsa Basu, R Venkatesh Babu, and Danish Pruthi. 2023. Inspecting the geographical representativeness of images from text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 5136-5147.
|
| 201 |
+
[8] Stevie Bergman, Nahema Marchal, John Mellor, Shakir Mohamed, Jason Gabriel, and William Isaac. 2024. STELA: a community-centred approach to norm elicitation for AI alignment. Scientific Reports 14, 1 (2024), 6616.
|
| 202 |
+
[9] Abeba Birhane, Pratyusha Kalluri, Dallas Card, William Agnew, Ravit Dotan, and Michelle Bao. 2022. The values encoded in machine learning research. In Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency. 173-184.
|
| 203 |
+
[10] Ann Blandford, Dominic Furniss, and Stephann Makri. 2016. Qualitative HCI research: Going behind the scenes. Morgan & Claypool Publishers.
|
| 204 |
+
[11] Su Lin Blodgett, Gilsinia Lopez, Alexandra Olteanu, Robert Sim, and Hanna Wallach. 2021. Stereotyping Norwegian salmon: An inventory of pitfalls in fairness benchmark datasets. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). 1004-1015.
|
| 205 |
+
[12] Virginia Braun and Victoria Clarke. 2006. Using thematic analysis in psychology. Qualitative research in psychology 3, 2 (2006), 77-101.
|
| 206 |
+
[13] Virginia Braun and Victoria Clarke. 2021. One size fits all? What counts as quality practice in (reflexive) thematic analysis? Qualitative research in psychology 18, 3 (2021), 328-352.
|
| 207 |
+
[14] Meredith Broussard. 2018. Artificial intelligence: How computers misunderstand the world. mit Press.
|
| 208 |
+
[15] Federico Cabitza, Andrea Campagner, and Valerio Basile. 2023. Toward a perspectivist turn in ground truthing for predictive computing. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 6860-6868.
|
| 209 |
+
[16] Srravya Chandhiramowuli, Alex S Taylor, Sara Heitlinger, and Ding Wang. 2024. Making Data Work Count. Proceedings of the ACM on Human-Computer Interaction 8, CSCW1 (2024), 1-26.
|
| 210 |
+
[17] Kyla Chasalow and Karen Levy. 2021. Representativeness in statistics, politics, and machine learning. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency. 77-89.
|
| 211 |
+
[18] Jennifer Chien and David Danks. 2024. Beyond Behaviorist Representational Harms: A Plan for Measurement and Mitigation. In The 2024 ACM Conference on Fairness, Accountability, and Transparency. 933-946.
|
| 212 |
+
[19] Jaemin Cho, Abhay Zala, and Mohit Bansal. 2022. DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generative Transformers. CoRR abs/2202.04053 (2022). arXiv:2202.04053 https://arxiv.org/abs/2202.04053
|
| 213 |
+
[20] Andy Crabtree. 2024. H is for Human and How (Not) To Evaluate Qualitative Research in HCI. arXiv preprint arXiv:2409.01302 (2024).
|
| 214 |
+
[21] Kate Crawford. 2017. The trouble with bias. In Conference on Neural Information Processing Systems, invited speaker.
|
| 215 |
+
|
| 216 |
+
[22] Jay Cunningham, Gabrielle Benabdallah, Daniela Rosner, and Alex Taylor. 2023. On the Grounds of Solutionism: Ontologies of Blackness and HCI. ACM Trans. Comput.-Hum. Interact. 30, 2, Article 20 (apr 2023), 17 pages. https://doi.org/10.1145/3557890
|
| 217 |
+
[23] Aida Mostafazadeh Davani, Mark Diaz, and Vinodkumar Prabhakaran. 2022. Dealing with disagreements: Looking beyond the majority vote in subjective annotations. Transactions of the Association for Computational Linguistics 10 (2022), 92-110.
|
| 218 |
+
[24] Emily Denton, Alex Hanna, Razvan Amironesei, Andrew Smart, Hilary Nicole, and Morgan Klaus Scheuerman. 2020. Bringing the people back in: Contesting benchmark machine learning datasets. arXiv preprint arXiv:2007.07399 (2020).
|
| 219 |
+
[25] Dipti Desai. 2000. Imaging difference: The politics of representation in multicultural art education. Studies in Art Education 41, 2 (2000), 114-129.
|
| 220 |
+
[26] Mark Diaz, Ian Kivlichan, Rachel Rosen, Dylan Baker, Razvan Amironesei, Vinodkumar Prabhakaran, and Emily Denton. 2022. Crowdsheets: Accounting for individual and collective identities underlying crowdsourced dataset annotation. In Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency. 2342-2351.
|
| 221 |
+
[27] Mark Diaz and Angela DR Smith. 2024. What Makes An Expert? Reviewing How ML Researchers Define "Expert". In Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society, Vol. 7. 358-370.
|
| 222 |
+
[28] Catherine D'ignazio and Lauren F Klein. 2023. Data feminism. MIT press.
|
| 223 |
+
[29] Clifford Geertz. 2008. Thick description: Toward an interpretive theory of culture. In The cultural geography reader. Routledge, 41-51.
|
| 224 |
+
[30] Sourojit Ghosh. 2024. Interpretations, Representations, and Stereotypes of Caste within Text-to-Image Generators. arXiv:2408.01590 [cs.CY] https://arxiv.org/abs/2408.01590
|
| 225 |
+
[31] Sourojit Ghosh, Nina Lutz, and Aylin Caliskan. 2024. "I don't see myself represented here at all": User Experiences of Stable Diffusion Outputs Containing Representational Harms across Gender Identities and Nationalities. arXiv preprint arXiv:2408.01594 (2024).
|
| 226 |
+
[32] Mitchell L Gordon, Michelle S Lam, Joon Sung Park, Kayur Patel, Jeff Hancock, Tatsunori Hashimoto, and Michael S Bernstein. 2022. Jury learning: Integrating dissenting voices into machine learning models. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems. 1-19.
|
| 227 |
+
[33] Stuart Hall. 1989. Cultural identity and cinematic representation. Framework: The Journal of Cinema and Media 36 (1989), 68-81.
|
| 228 |
+
[34] Stuart Hall. 1997. Representation: Cultural Representations and Signifying Practices. SAGE Publications, London.
|
| 229 |
+
[35] Alex Hanna, Emily Denton, Andrew Smart, and Jamila Smith-Loud. 2020. Towards a critical race methodology in algorithmic fairness. In Proceedings of the 2020 conference on fairness, accountability, and transparency. 501-512.
|
| 230 |
+
[36] Emma Harvey, Emily Sheng, Su Lin Blodgett, Alexandra Chouldechova, Jean Garcia-Gathright, Alexandra Olteanu, and Hanna Wallach. 2024. Gaps Between Research and Practice When Measuring Representational Harms Caused by LLM-Based Systems. arXiv preprint arXiv:2411.15662 (2024).
|
| 231 |
+
[37] Saghar Hosseini, Hamid Palangi, and Ahmed Hassan Awadallah. 2023. An empirical study of metrics to measure representational harms in pretrained language models. arXiv preprint arXiv:2301.09211 (2023).
|
| 232 |
+
[38] John L Jackson. 2013. Thin description: ethnography and the African Hebrew Israelites of Jerusalem. Harvard University Press.
|
| 233 |
+
[39] Abigail Z Jacobs and Hanna Wallach. 2021. Measurement and fairness. In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency. 375-385.
|
| 234 |
+
[40] Akshita Jha, Vinodkumar Prabhakaran, Remi Denton, Sarah Laszlo, Shachi Dave, Rida Qadri, Chandan Reddy, and Sunipa Dev. 2024. ViSAGE: A Global-Scale Analysis of Visual Stereotypes in Text-to-Image Generation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 12333–12347.
|
| 235 |
+
[41] Amba Kak. 2020. "The Global South is everywhere, but also always somewhere" National Policy Narratives and AI Justice. In Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society. 307-312.
|
| 236 |
+
[42] Nithish Kannen, Arif Ahmad, Marco Andreetto, Vinodkumar Prabhakaran, Utsav Prabhu, Adji Bousso Dieng, Pushpak Bhattacharyya, and Shachi Dave. 2024. Beyond Aesthetics: Cultural Competence in Text-to-Image Models. arXiv preprint arXiv:2407.06863 (2024).
|
| 237 |
+
[43] Jared Katzman, Angelina Wang, Morgan Scheuerman, Su Lin Blodgett, Kristen Laird, Hanna Wallach, and Solon Barocas. 2023. Taxonomizing and measuring representational harms: A look at image tagging. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 14277-14285.
|
| 238 |
+
[44] Matthew Kay, Cynthia Matuszek, and Sean A Munson. 2015. Unequal representation and gender stereotypes in image search results for occupations. In Proceedings of the 33rd annual acm conference on human factors in computing systems. 3819-3828.
|
| 239 |
+
[45] Durriya HZ Khairullah and Zahid Y Khairullah. 2009. Cross-cultural analysis of gender roles: Indian and US advertisements. Asia Pacific Journal of Marketing and Logistics 21, 1 (2009), 58-75.
|
| 240 |
+
[46] Simran Khanuja, Diksha Bansal, Sarvesh Mehtani, Savya Khosla, Atreyee Dey, Balaji Gopalan, Dilip Kumar Margam, Pooja Aggarwal, Rajiv Teja Nagipogu, Shachi Dave, Shruti Gupta, Subhash Chandra Bose Gali, Vish Subramanian, and Partha Talukdar. 2021. MuRIL: Multilingual Representations for Indian Languages. arXiv:2103.10730 [cs.CL] https://arxiv.org/abs/2103.10730
|
| 241 |
+
[47] Rob Kitchin. 2014. Big Data, new epistemologies and paradigm shifts. Big Data /& Society 1, 1 (Apr-Jun 2014), 1-12. https://doi.org/10.1177/2053951714528481
|
| 242 |
+
[48] Preethi Lahoti, Nicholas Blumm, Xiao Ma, Raghavendra Kotikalapudi, Sahitya Potluri, Qijun Tan, Hansa Srinivasan, Ben Packer, Ahmad Beirami, Alex Beutel, et al. 2023. Improving diversity of demographic representation in large language models via collective-critiques and self-voting. arXiv preprint arXiv:2310.16523 (2023).
|
| 243 |
+
[49] Huihan Li, Liwei Jiang, Nouha Dziri, Xiang Ren, and Yejin Choi. 2024. CULTURE-GEN: Revealing Global Cultural Perception in Language Models through Natural Language Prompting. arXiv preprint arXiv:2404.10199 (2024).
|
| 244 |
+
|
| 245 |
+
[50] Silvia Lindtner, Shaowen Bardzell, and Jeffrey Bardzell. 2016. Reconstituting the utopian vision of making: HCI after technosolutionism. In Proceedings of the 2016 chi conference on human factors in computing systems. 1390-1402.
|
| 246 |
+
[51] Heather Love. 2013. Close reading and thin description. Public culture 25, 3 (2013), 401-434.
|
| 247 |
+
[52] Kelly Avery Mack, Rida Qadri, Remi Denton, Shaun K Kane, and Cynthia L Bennett. 2024. "They only care to show us the wheelchair": disability representation in text-to-image AI models. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-23.
|
| 248 |
+
[53] Michael Madaio, Shivani Kapania, Rida Qadri, Ding Wang, Andrew Zaldivar, Remi Denton, and Lauren Wilcox. 2024. Learning about Responsible AI On-The-Job: Learning Pathways, Orientations, and Aspirations. In The 2024 ACM Conference on Fairness, Accountability, and Transparency. 1544-1558.
|
| 249 |
+
[54] Shannon Mattern. 2021. A City Is Not a Computer: Other Urban Intelligences. Princeton University Press.
|
| 250 |
+
[55] Milagros Miceli, Martin Schuessler, and Tianling Yang. 2020. Between subjectivity and imposition: Power dynamics in data annotation for computer vision. Proceedings of the ACM on Human-Computer Interaction 4, CSCW2 (2020), 1–25.
|
| 251 |
+
[56] Nusrat Jahan Mim, Dipannita Nandi, Sadaf Sumyia Khan, Arundhuti Dey, and Syed Ishtiaque Ahmed. 2024. In-Between Visuals and Visible: The Impacts of Text-to-Image Generative AI Tools on Digital Image-making Practices in the Global South. In Proceedings of the CHI Conference on Human Factors in Computing Systems (Honolulu, HI, USA) (CHI '24). Association for Computing Machinery, New York, NY, USA, Article 474, 18 pages. https://doi.org/10.1145/3613904.3641951
|
| 252 |
+
[57] Shakir Mohamed, Marie-Therese Png, and William Isaac. 2020. Decolonial AI: Decolonial theory as sociotechnical foresight in artificial intelligence. Philosophy & Technology 33 (2020), 659-684.
|
| 253 |
+
[58] Evgeny Morozov. 2013. To save everything, click here: The folly of technological solutionism. PublicAffairs.
|
| 254 |
+
[59] Michael Muller, Christine T Wolf, Josh Andres, Michael Desmond, Narendra Nath Joshi, Zahra Ashktorab, Aabhas Sharma, Kristina Brimijoin, Qian Pan, Evelyn Duesterwald, et al. 2021. Designing ground truth and the social life of labels. In Proceedings of the 2021 CHI conference on human factors in computing systems. 1-16.
|
| 255 |
+
[60] Junho Myung, Nayeon Lee, Yi Zhou, Jiho Jin, Rifki Afina Putri, Dimosthenis Antypas, Hsuvas Borkakoty, Eunsu Kim, Carla Perez-Almendros, Abinew Ali Ayele, Vctor Gutierrez-Basulto, Yazmin Ibanez-Garcia, Hwaran Lee, Shamsuddeen Hassan Muhammad, Kiwoong Park, Anar Sabuhi Rzayev, Nina White, Seid Muhie Yimam, Mohammad Taher Pilehvar, Nedjma Ousidhoum, Jose Camacho-Collados, and Alice Oh. 2025. BLEnD: A Benchmark for LLMs on Everyday Knowledge in Diverse Cultures and Languages. arXiv:2406.09948 [cs.CL] https://arxiv.org/abs/2406.09948
|
| 256 |
+
[61] Sherry B Ortner. 1995. Resistance and the problem of ethnographic refusal. Comparative studies in society and history 37, 1 (1995), 173-193.
|
| 257 |
+
[62] Sherry B Ortner. 1997. Thick resistance: Death and the cultural construction of agency in Himalayan mountaineering. Representations 59 (1997), 0.
|
| 258 |
+
[63] Siddhesh Pawar, Junyeong Park, Jiho Jin, Arnav Arora, Junho Myung, Srishti Yadav, Faiz Ghifari Haznitrama, Inhwa Song, Alice Oh, and Isabelle Augenstein. 2024. Survey of Cultural Awareness in Language Models: Text and Beyond. arXiv:2411.00860 [cs.CL] https://arxiv.org/abs/2411.00860
|
| 259 |
+
[64] Julian Posada. 2023. Platform Authority and Data Quality: Who Decides What Counts in Data Production for Artificial Intelligence. Technical Report. Technical Report. Berggruen Institute and Global Affairs Canada.
|
| 260 |
+
[65] Rida Qadri, Aida M Davani, Kevin Robinson, and Vinodkumar Prabhakaran. 2025. Risks of Cultural Erasure in Large Language Models. arXiv preprint arXiv:2501.01056 (2025).
|
| 261 |
+
[66] Rida Qadri, Renee Shelby, Cynthia L. Bennett, and Emily Denton. 2023. AI's Regimes of Representation: A Community-centered Study of Text-to-Image Models in South Asia. In Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency (Chicago, IL, USA) (FAccT '23). Association for Computing Machinery, New York, NY, USA, 506-517. https://doi.org/10.1145/3593013.3594016
|
| 262 |
+
[67] Rida Qadri, Renee Shelby, Cynthia L Bennett, and Emily Denton. 2023. AI's regimes of representation: A community-centered study of text-to-image models in South Asia. In Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency. 506-517.
|
| 263 |
+
[68] Priya Raman, Jake Harwood, Deborah Weis, Judith L Anderson, and Grace Miller. 2008. Portrayals of older adults in US and Indian magazine advertisements: A cross-cultural comparison. The Howard Journal of Communications 19, 3 (2008), 221-240.
|
| 264 |
+
[69] Annelise Riles. 2000. The Network Inside Out. University of Michigan Press.
|
| 265 |
+
[70] Gilbert Ryle. 1968. The thinking of thoughts. Number 18. [Saskatoon]: University of Saskatchewan.
|
| 266 |
+
[71] Nithya Sambasivan, Erin Arnesen, Ben Hutchinson, Tulsee Doshi, and Vinodkumar Prabhakaran. 2021. Re-imagining algorithmic fairness in india and beyond. In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency. 315–328.
|
| 267 |
+
[72] Morgan Klaus Scheuerman, Kandrea Wade, Caitlin Lustig, and Jed R Brubaker. 2020. How we've taught algorithms to see identity: Constructing race and gender in image databases for facial analysis. Proceedings of the ACM on Human-computer Interaction 4, CSCW1 (2020), 1-35.
|
| 268 |
+
[73] Andrew D Selbst, Danah Boyd, Sorelle A Friedler, Suresh Venkatasubramanian, and Janet Vertesi. 2019. Fairness and abstraction in sociotechnical systems. In Proceedings of the conference on fairness, accountability, and transparency. 59-68.
|
| 269 |
+
[74] Renee Shelby, Shaleh Rismani, Kathryn Henne, AJung Moon, Negar Rostamzadeh, Paul Nicholas, N'Mah Yilla-Akbari, Jess Gallegos, Andrew Smart, Emilio Garcia, et al. 2023. Sociotechnical harms of algorithmic systems: Scoping a taxonomy for harm reduction. In Proceedings of the 2023 AAAI/ACM Conference on AI, Ethics, and Society. 723-741.
|
| 270 |
+
[75] Hong Shen, Leijie Wang, Wesley H Deng, Ciell Brusse, Ronald Velgersdijk, and Haiyi Zhu. 2022. The model card authoring toolkit: Toward community-centered, deliberation-driven AI design. In Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency. 440-451.
|
| 271 |
+
|
| 272 |
+
[76] Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David I. Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, Enzo Ferrante, Marzieh Fadaee, Beyza Ermis, and Sara Hooker. 2024. Global MMLU: Understanding and Addressing Cultural and Linguistic Biases in Multilingual Evaluation. arXiv:2412.03304 [cs.CL] https://arxiv.org/abs/2412.03304
|
| 273 |
+
[77] Jessie J. Smith, Aishwarya Satwani, Robin Burke, and Casey Fiesler. 2024. Recommend Me? Designing Fairness Metrics with Providers. In Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency (Rio de Janeiro, Brazil) (FAccT '24). Association for Computing Machinery, New York, NY, USA, 2389-2399. https://doi.org/10.1145/3630106.3659044
|
| 274 |
+
[78] George Steinmetz. 2005. Introduction. Positivism and its others in the social sciences. (2005).
|
| 275 |
+
[79] Peng Su, Wenji Mao, Daniel Zeng, Xiaochen Li, and Fei-Yue Wang. 2009. Handling class imbalance problem in cultural modeling. In 2009 IEEE international conference on intelligence and security informatics. IEEE, 251-256.
|
| 276 |
+
[80] Anna Lowenhaupt Tsing. 2012. On nonscalability: The living world is not amenable to precision-nested scales. Common knowledge 18, 3 (2012), 505-524.
|
| 277 |
+
[81] Yixin Wan, Arjun Subramonian, Anaelia Ovalle, Zongyu Lin, Ashima Suvarna, Christina Chance, Hritik Bansal, Rebecca Pattichis, and Kai-Wei Chang. 2024. Survey of Bias In Text-to-Image Generation: Definition, Evaluation, and Mitigation. arXiv preprint arXiv:2404.01030 (2024).
|
| 278 |
+
[82] Angelina Wang, Xuechunzi Bai, Solon Barocas, and Su Lin Bludgett. 2024. Measuring machine learning harms from stereotypes: requires understanding who is being harmed by which errors in what ways. arXiv preprint arXiv:2402.04420 (2024).
|
| 279 |
+
[83] Wenxuan Wang, Wenxiang Jiao, Jingyuan Huang, Ruyi Dai, Jen-tse Huang, Zhaopeng Tu, and Michael Lyu. 2024. Not All Countries Celebrate Thanksgiving: On the Cultural Dominance in Large Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Lun-Wei Ku, Andre Martins, and Vivek Srikumar (Eds.). Association for Computational Linguistics, Bangkok, Thailand, 6349-6384. https://doi.org/10.18653/v1/2024.acl-long.345
|
| 280 |
+
[84] Lili Zhang, Xi Liao, Zaijia Yang, Baihang Gao, Chunjie Wang, Qiuling Yang, and Deshun Li. 2024. Partiality and Misconception: Investigating Cultural Representativeness in Text-to-Image Models. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-25.
|
| 281 |
+
|
| 282 |
+
# 7 APPENDIX
|
| 283 |
+
|
| 284 |
+
We include here a table of detailed demographic information of our participants from our study. We present their details by countries, please see the tables below:
|
| 285 |
+
|
| 286 |
+
<table><tr><td>Participants</td><td>Country</td><td>Age</td><td>Gender</td><td>Educational Level</td><td>Region</td><td>Language</td></tr><tr><td>LK-1</td><td>Sri Lanka</td><td>25–34</td><td>Male</td><td>Undergraduate</td><td>North Western province</td><td>Sinhala</td></tr><tr><td>LK-2</td><td>Sri Lanka</td><td>18–24</td><td>Male</td><td>Undergraduate</td><td>North Western Province</td><td>Sinhala</td></tr><tr><td>LK-3</td><td>Sri Lanka</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Western Province</td><td>Tamil, English</td></tr><tr><td>LK-4</td><td>Sri Lanka</td><td>18–24</td><td>Male</td><td>Secondary</td><td>Western Province</td><td>Sinhala</td></tr><tr><td>LK-5</td><td>Sri Lanka</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Western Province</td><td>Sinhala</td></tr><tr><td>LK-6</td><td>Sri Lanka</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Western Province/ Central Province</td><td>Sinhala</td></tr><tr><td>LK-7</td><td>Sri Lanka</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Central Province/ Western Province</td><td>Sinhala, Tamil and English</td></tr><tr><td>LK-8</td><td>Sri Lanka</td><td>25–34</td><td>Male</td><td>Undergraduate</td><td>Western Province/ North Western Province</td><td>Tamil</td></tr><tr><td>LK-9</td><td>Sri Lanka</td><td>18–24</td><td>Male</td><td>Undergraduate</td><td>Western Province/ Southern Province/ Central province</td><td>Sinhala</td></tr><tr><td>LK-10</td><td>Sri Lanka</td><td>35–44</td><td>Male</td><td>Undergraduate</td><td>Western Province</td><td>Sinhala</td></tr><tr><td>LK-11</td><td>Sri Lanka</td><td>45–54</td><td>Female</td><td>Secondary</td><td>Western Province</td><td>Sinhala</td></tr></table>
|
| 287 |
+
|
| 288 |
+
Table 1. Participant Information from Sri Lanka
|
| 289 |
+
|
| 290 |
+
<table><tr><td>Participants</td><td>Country</td><td>Age</td><td>Gender</td><td>Educational Level</td><td>Region</td><td>Language</td></tr><tr><td>PK-1</td><td>Pakistan</td><td>25–34</td><td>Female</td><td>Post-Graduate</td><td>Punjab</td><td>Urdu</td></tr><tr><td>PK-2</td><td>Pakistan</td><td>25–34</td><td>Male</td><td>Undergraduate</td><td>Gilgit-Baltistan</td><td>Balti</td></tr><tr><td>PK-3</td><td>Pakistan</td><td>25–34</td><td>Male</td><td>Post-Graduate</td><td>Rajanpur</td><td>Balochi, Siraiki, Urdu</td></tr><tr><td>PK-4</td><td>Pakistan</td><td>35–44</td><td>Transgender Man</td><td>Undergraduate</td><td>Punjab</td><td>Urdu</td></tr><tr><td>PK-5</td><td>Pakistan</td><td>18–24</td><td>Transgender woman</td><td>Undergraduate</td><td>Punjab</td><td>Punjabi, Urdu</td></tr><tr><td>PK-6</td><td>Pakistan</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Gilgit</td><td>Burushaski</td></tr><tr><td>PK-7</td><td>Pakistan</td><td>18–24</td><td>Female</td><td>Secondary</td><td>Punjab</td><td>Urdu, Punjabi</td></tr><tr><td>PK-8</td><td>Pakistan</td><td>18–24</td><td>Female</td><td>Undergraduate</td><td>Punjab</td><td>Urdu, Punjabi</td></tr><tr><td>PK-9</td><td>Pakistan</td><td>18–24</td><td>Male</td><td>Secondary</td><td>Gilgit Baltistan</td><td>Balti</td></tr><tr><td>PK-10</td><td>Pakistan</td><td>18–24</td><td>Male</td><td>Secondary</td><td>Gilgit Baltistan</td><td>Balti</td></tr><tr><td>PK-11</td><td>Pakistan</td><td>18–24</td><td>Male</td><td>Undergraduate</td><td>South Punjab</td><td>Saraiki</td></tr></table>
|
| 291 |
+
|
| 292 |
+
Table 2. Participant Information from Pakistan
|
| 293 |
+
|
| 294 |
+
Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009
|
| 295 |
+
|
| 296 |
+
<table><tr><td>Participants</td><td>Country</td><td>Age</td><td>Gender</td><td>Educational Level</td><td>Region</td><td>Language</td></tr><tr><td>IN-1</td><td>India</td><td>25–34</td><td>Female</td><td>Post-Graduate</td><td>Kerala</td><td>Malayalam</td></tr><tr><td>IN-2</td><td>India</td><td>55–64</td><td>Female</td><td>Undergraduate</td><td>Maharastra/ Karnataka</td><td>English, French, Hindi</td></tr><tr><td>IN-3</td><td>India</td><td>25–34</td><td>Female</td><td>Undergraduate</td><td>Assam</td><td>Assamese and Hindi</td></tr><tr><td>IN-4</td><td>India</td><td>45–54</td><td>Female</td><td>Post-Graduate</td><td>Maharashtra</td><td>Marathi</td></tr><tr><td>IN-5</td><td>India</td><td>45–54</td><td>Female</td><td>Post-Graduate</td><td>Chandigarh/ Punjab</td><td>Punjabi, Hindi, English</td></tr><tr><td>IN-6</td><td>India</td><td>25–34</td><td>Male</td><td>Post-Graduate</td><td>Bihar</td><td>Hindi</td></tr><tr><td>IN-7</td><td>India</td><td>18–24</td><td>Male</td><td>Secondary</td><td>Gujarat</td><td>Gujarati</td></tr><tr><td>IN-8</td><td>India</td><td>25–34</td><td>Non-binary</td><td>Post-Graduate</td><td>Maharashtra</td><td>Hindi, Magadhi</td></tr><tr><td>IN-9</td><td>India</td><td>25–34</td><td>Genderqueer</td><td>Post-Graduate</td><td>West Bengal, Gujarat, Karnataka</td><td>Bengali and English</td></tr><tr><td>IN-10</td><td>India</td><td>25–34</td><td>Female</td><td>Undergraduate</td><td>Telegana/ Karnataka</td><td>Telugu, Hindi, Kannada</td></tr><tr><td>IN-11</td><td>India</td><td>18–24</td><td>Male</td><td>Undergraduate</td><td>Rajasthan</td><td>Rajasthani</td></tr><tr><td>IN-12</td><td>India</td><td>18–24</td><td>Male</td><td>Undergraduate</td><td>Chhattisgarh</td><td>Chhattisgarhi1</td></tr><tr><td>IN-13</td><td>India</td><td>25–34</td><td>Male</td><td>Post-Graduate</td><td>Tripura</td><td>Sylheti</td></tr><tr><td>IN-14</td><td>India</td><td>25–34</td><td>Female</td><td>Post-Graduate</td><td>Assam</td><td>Assamese</td></tr><tr><td>IN-15</td><td>India</td><td>35–44</td><td>Female</td><td>Post-Graduate</td><td>Kashmir</td><td>Kashmiri, Urdu, Hindi, English</td></tr></table>
|
| 297 |
+
|
| 298 |
+
Table 3. Participant Information from India
|
data/2025/2503_19xxx/2503.19075/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c754a3aaeac8344597f6910fa021332765036afa7bd6e10a648aa8f15fdc6e90
|
| 3 |
+
size 205552
|
data/2025/2503_19xxx/2503.19075/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_content_list.json
ADDED
|
@@ -0,0 +1,1556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Rankers, Judges, and Assistants: Towards Understanding the Interplay of LLMs in Information Retrieval Evaluation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
112,
|
| 8 |
+
101,
|
| 9 |
+
883,
|
| 10 |
+
150
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Krisztian Balog",
|
| 17 |
+
"bbox": [
|
| 18 |
+
168,
|
| 19 |
+
162,
|
| 20 |
+
292,
|
| 21 |
+
178
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Google DeepMind",
|
| 28 |
+
"bbox": [
|
| 29 |
+
168,
|
| 30 |
+
179,
|
| 31 |
+
292,
|
| 32 |
+
194
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Stavanger, Norway",
|
| 39 |
+
"bbox": [
|
| 40 |
+
166,
|
| 41 |
+
195,
|
| 42 |
+
295,
|
| 43 |
+
209
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "krisztianb@google.com",
|
| 50 |
+
"bbox": [
|
| 51 |
+
151,
|
| 52 |
+
210,
|
| 53 |
+
310,
|
| 54 |
+
224
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Donald Metzler",
|
| 61 |
+
"bbox": [
|
| 62 |
+
436,
|
| 63 |
+
162,
|
| 64 |
+
563,
|
| 65 |
+
176
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Google DeepMind",
|
| 72 |
+
"bbox": [
|
| 73 |
+
436,
|
| 74 |
+
179,
|
| 75 |
+
560,
|
| 76 |
+
193
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Mountain View, USA",
|
| 83 |
+
"bbox": [
|
| 84 |
+
426,
|
| 85 |
+
194,
|
| 86 |
+
571,
|
| 87 |
+
208
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "metzler@google.com",
|
| 94 |
+
"bbox": [
|
| 95 |
+
426,
|
| 96 |
+
210,
|
| 97 |
+
571,
|
| 98 |
+
224
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "Zhen Qin",
|
| 105 |
+
"bbox": [
|
| 106 |
+
727,
|
| 107 |
+
162,
|
| 108 |
+
807,
|
| 109 |
+
178
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "Google DeepMind",
|
| 116 |
+
"bbox": [
|
| 117 |
+
705,
|
| 118 |
+
179,
|
| 119 |
+
830,
|
| 120 |
+
194
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "Mountain View, USA",
|
| 127 |
+
"bbox": [
|
| 128 |
+
694,
|
| 129 |
+
195,
|
| 130 |
+
839,
|
| 131 |
+
208
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "zhenqin@google.com",
|
| 138 |
+
"bbox": [
|
| 139 |
+
694,
|
| 140 |
+
210,
|
| 141 |
+
839,
|
| 142 |
+
224
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "Abstract",
|
| 149 |
+
"text_level": 1,
|
| 150 |
+
"bbox": [
|
| 151 |
+
83,
|
| 152 |
+
234,
|
| 153 |
+
156,
|
| 154 |
+
247
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "text",
|
| 160 |
+
"text": "Large language models (LLMs) are increasingly integral to information retrieval (IR), powering ranking, evaluation, and AI-assisted content creation. This widespread adoption necessitates a critical examination of potential biases arising from the interplay between these LLM-based components. This paper synthesizes existing research and presents novel experiment designs that explore how LLM-based rankers and assistants influence LLM-based judges. We provide the first empirical evidence of LLM judges exhibiting significant bias towards LLM-based rankers. Furthermore, we observe limitations in LLM judges' ability to discern subtle system performance differences. Contrary to some previous findings, our preliminary study does not find evidence of bias against AI-generated content. These results highlight the need for a more holistic view of the LLM-driven information ecosystem. To this end, we offer initial guidelines and a research agenda to ensure the reliable use of LLMs in IR evaluation.",
|
| 161 |
+
"bbox": [
|
| 162 |
+
81,
|
| 163 |
+
252,
|
| 164 |
+
483,
|
| 165 |
+
474
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "CCS Concepts",
|
| 172 |
+
"text_level": 1,
|
| 173 |
+
"bbox": [
|
| 174 |
+
83,
|
| 175 |
+
484,
|
| 176 |
+
200,
|
| 177 |
+
501
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "- Information systems $\\rightarrow$ Information retrieval.",
|
| 184 |
+
"bbox": [
|
| 185 |
+
83,
|
| 186 |
+
503,
|
| 187 |
+
403,
|
| 188 |
+
517
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 0
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "Keywords",
|
| 195 |
+
"text_level": 1,
|
| 196 |
+
"bbox": [
|
| 197 |
+
83,
|
| 198 |
+
530,
|
| 199 |
+
169,
|
| 200 |
+
545
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 0
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "Large language models, ranking, evaluation",
|
| 207 |
+
"bbox": [
|
| 208 |
+
83,
|
| 209 |
+
547,
|
| 210 |
+
349,
|
| 211 |
+
561
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 0
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "ACM Reference Format:",
|
| 218 |
+
"text_level": 1,
|
| 219 |
+
"bbox": [
|
| 220 |
+
83,
|
| 221 |
+
568,
|
| 222 |
+
230,
|
| 223 |
+
579
|
| 224 |
+
],
|
| 225 |
+
"page_idx": 0
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "text",
|
| 229 |
+
"text": "Krisztian Balog, Donald Metzler, and Zhen Qin. 2025. Rankers, Judges, and Assistants: Towards Understanding the Interplay of LLMs in Information Retrieval Evaluation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3726302.3730348",
|
| 230 |
+
"bbox": [
|
| 231 |
+
81,
|
| 232 |
+
579,
|
| 233 |
+
482,
|
| 234 |
+
656
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 0
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "1 Introduction",
|
| 241 |
+
"text_level": 1,
|
| 242 |
+
"bbox": [
|
| 243 |
+
83,
|
| 244 |
+
667,
|
| 245 |
+
218,
|
| 246 |
+
681
|
| 247 |
+
],
|
| 248 |
+
"page_idx": 0
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"type": "text",
|
| 252 |
+
"text": "Due to their remarkable capabilities, large language models (LLMs) are fundamentally reshaping the field of information retrieval (IR), becoming integral to core ranking algorithms and the automation of evaluation processes. Beyond their role in core IR processes, LLMs are also powering AI assistants that are rapidly changing how users generate content, from writing emails and articles to creating code and translating content between languages. As the reliance on LLMs is expected to deepen given their potential, it is increasingly crucial to maintain a balanced perspective by assessing and acknowledging",
|
| 253 |
+
"bbox": [
|
| 254 |
+
81,
|
| 255 |
+
686,
|
| 256 |
+
482,
|
| 257 |
+
811
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 0
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "image",
|
| 263 |
+
"img_path": "images/c64184efd69abec7bbb0e521f68251cef312c30401498e459dbf11d55c8af056.jpg",
|
| 264 |
+
"image_caption": [],
|
| 265 |
+
"image_footnote": [],
|
| 266 |
+
"bbox": [
|
| 267 |
+
84,
|
| 268 |
+
819,
|
| 269 |
+
156,
|
| 270 |
+
840
|
| 271 |
+
],
|
| 272 |
+
"page_idx": 0
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"type": "text",
|
| 276 |
+
"text": "This work is licensed under a Creative Commons Attribution 4.0 International License.",
|
| 277 |
+
"bbox": [
|
| 278 |
+
83,
|
| 279 |
+
842,
|
| 280 |
+
482,
|
| 281 |
+
852
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 0
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "SIGIR '25, Padua, Italy",
|
| 288 |
+
"bbox": [
|
| 289 |
+
84,
|
| 290 |
+
853,
|
| 291 |
+
191,
|
| 292 |
+
863
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 0
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "© 2025 Copyright held by the owner/author(s).",
|
| 299 |
+
"bbox": [
|
| 300 |
+
84,
|
| 301 |
+
864,
|
| 302 |
+
303,
|
| 303 |
+
875
|
| 304 |
+
],
|
| 305 |
+
"page_idx": 0
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"type": "text",
|
| 309 |
+
"text": "ACM ISBN 979-8-4007-1592-1/2025/07",
|
| 310 |
+
"bbox": [
|
| 311 |
+
84,
|
| 312 |
+
875,
|
| 313 |
+
264,
|
| 314 |
+
883
|
| 315 |
+
],
|
| 316 |
+
"page_idx": 0
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"type": "text",
|
| 320 |
+
"text": "https://doi.org/10.1145/3726302.3730348",
|
| 321 |
+
"bbox": [
|
| 322 |
+
84,
|
| 323 |
+
883,
|
| 324 |
+
272,
|
| 325 |
+
895
|
| 326 |
+
],
|
| 327 |
+
"page_idx": 0
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"type": "text",
|
| 331 |
+
"text": "the potential risks alongside the undeniable benefits. Could this heavy reliance on LLMs across content creation, retrieval, ranking, evaluation, etc., inadvertently introduce or amplify biases within these systems?",
|
| 332 |
+
"bbox": [
|
| 333 |
+
511,
|
| 334 |
+
234,
|
| 335 |
+
913,
|
| 336 |
+
290
|
| 337 |
+
],
|
| 338 |
+
"page_idx": 0
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"type": "text",
|
| 342 |
+
"text": "Recent research has begun to explore some of these emerging issues. For example, studies have shown that LLMs can exhibit biases in their output, favoring LLM-generated content over human-generated ones [10], and perpetuating biases present in their training data [15, 21, 33]. Furthermore, LLM-based rating systems have been found to be susceptible to manipulation [2], may not accurately reflect human preferences [28], and demonstrate self-inconsistency [50]. Additionally, the phenomenon of \"model collapse\" has also been observed, where LLMs trained on synthetic data generated by other LLMs can lead to a degradation of quality and diversity in generated content [47].",
|
| 343 |
+
"bbox": [
|
| 344 |
+
511,
|
| 345 |
+
290,
|
| 346 |
+
913,
|
| 347 |
+
443
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 0
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "Within the IR research community, the use of LLMs for assessment is a subject of ongoing debate [14], with opinions ranging from complete rejection of LLMs for relevance assessment [48] to the assertion that they can fully replace human judgments [55]. Investigations have thus far focused on the agreement of LLM-generated ratings with human assessments [14, 54, 55] and the potential for LLMs to introduce biases in search results [10]. However, a comprehensive analysis of the implications of LLMs across the entire information ecosystem, from content creation with AI assistance to LLM-based reranking and LLM-based judges for evaluation, remains a critical gap in the current literature.",
|
| 354 |
+
"bbox": [
|
| 355 |
+
511,
|
| 356 |
+
441,
|
| 357 |
+
913,
|
| 358 |
+
594
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 0
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "This paper aims to advance our understanding of these issues by synthesizing prior research and, crucially, providing novel empirical evidence. We specifically focus on the novel challenge of understanding the effect LLM-based rankers and AI-powered content creation have on an LLM-based judge's ability to accurately assess relevance. Prior work has separately noted the potential interaction between LLM-based rankers and judges [14, 30, 42, 54] (an interaction that has yet to be empirically investigated), while other initial work has explored the relationship between AI-powered content creation and rankers [10]. However, we argue that the complex interplay between each of these roles must be considered holistically to fully understand the potential implications of widespread adoption of LLM-based judges. We present initial results demonstrating the importance of this interconnected perspective, showcasing how the use of LLMs across the information lifecycle can influence the accuracy and potential biases of LLM judges.",
|
| 365 |
+
"bbox": [
|
| 366 |
+
511,
|
| 367 |
+
595,
|
| 368 |
+
913,
|
| 369 |
+
816
|
| 370 |
+
],
|
| 371 |
+
"page_idx": 0
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"type": "text",
|
| 375 |
+
"text": "We start by considering the case of LLMs being used as both rankers and judges and present the first empirical demonstration of a significant bias of LLM judges towards LLM-based rankers. Novel to our approach is the examination of LLM judge performance via the use of oracle rankers, allowing for a controlled assessment of",
|
| 376 |
+
"bbox": [
|
| 377 |
+
511,
|
| 378 |
+
816,
|
| 379 |
+
913,
|
| 380 |
+
885
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 0
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "aside_text",
|
| 386 |
+
"text": "arXiv:2503.19092v2 [cs.IR] 9 Jul 2025",
|
| 387 |
+
"bbox": [
|
| 388 |
+
22,
|
| 389 |
+
291,
|
| 390 |
+
57,
|
| 391 |
+
704
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 0
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "image",
|
| 397 |
+
"img_path": "images/8056ec1cec0233487d400399665af269122ad9e7e5a906be3cc5201872c6a16c.jpg",
|
| 398 |
+
"image_caption": [
|
| 399 |
+
"Figure 1: LLM usage in modern information access systems."
|
| 400 |
+
],
|
| 401 |
+
"image_footnote": [],
|
| 402 |
+
"bbox": [
|
| 403 |
+
94,
|
| 404 |
+
103,
|
| 405 |
+
472,
|
| 406 |
+
268
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 1
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "LLM judge behavior and discriminative ability. Using the TREC 2019 and 2020 Deep Learning track datasets, we conduct experiments that also compare different-sized LLM judges within the same model family. Our results reveal several key findings: (1) LLM judges are more lenient in their relevance assessments than human judges, confirming previous observations [55]; (2) LLM judges exhibit a significant bias towards LLM-based rankers, a phenomenon previously only hypothesized; and (3) LLM judges demonstrate limited ability to discern subtle, yet statistically significant, performance differences between systems. Additionally, we conduct a preliminary study into whether LLM judges demonstrate biases when they encounter AI-generated content. Contrary to some previously published findings [25, 27, 37], our experiments do not provide evidence of this bias, suggesting that deeper, more rigorous empirical evaluations are required to better understand this phenomenon.",
|
| 413 |
+
"bbox": [
|
| 414 |
+
81,
|
| 415 |
+
324,
|
| 416 |
+
482,
|
| 417 |
+
532
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 1
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "text",
|
| 423 |
+
"text": "What emerges from these targeted studies is a better picture of how different interactions between LLM-based components give rise to different behaviors within LLM-based judges. Taken together, our findings yield one of the most holistic views of this problem space, provide unique insights into best practices for leveraging LLMs as judges, and motivate a rich set of future research questions that will need to be answered to understand the complexities of these interactions even better.",
|
| 424 |
+
"bbox": [
|
| 425 |
+
81,
|
| 426 |
+
532,
|
| 427 |
+
482,
|
| 428 |
+
642
|
| 429 |
+
],
|
| 430 |
+
"page_idx": 1
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"type": "text",
|
| 434 |
+
"text": "In summary, the primary contributions of this paper are: (1) a review of how LLMs are currently used in IR, bringing attention to the interconnected roles they play, and synthesizing the current understanding of their interactions; (2) experiments that highlight how interactions between LLMs might result in inaccurate or biased assessments of retrieval effectiveness; (3) a preliminary set of guidelines for using LLMs in IR evaluation; and (4) a research agenda aimed at sparking further discussion and research along this emerging direction.",
|
| 435 |
+
"bbox": [
|
| 436 |
+
81,
|
| 437 |
+
643,
|
| 438 |
+
482,
|
| 439 |
+
768
|
| 440 |
+
],
|
| 441 |
+
"page_idx": 1
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"type": "text",
|
| 445 |
+
"text": "2 Background",
|
| 446 |
+
"text_level": 1,
|
| 447 |
+
"bbox": [
|
| 448 |
+
83,
|
| 449 |
+
782,
|
| 450 |
+
212,
|
| 451 |
+
799
|
| 452 |
+
],
|
| 453 |
+
"page_idx": 1
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"type": "text",
|
| 457 |
+
"text": "This section overviews the main uses of LLMs in information access, illustrated in Fig. 1, providing context for subsequent analysis of the interplay between some of these uses.",
|
| 458 |
+
"bbox": [
|
| 459 |
+
81,
|
| 460 |
+
801,
|
| 461 |
+
482,
|
| 462 |
+
843
|
| 463 |
+
],
|
| 464 |
+
"page_idx": 1
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"type": "text",
|
| 468 |
+
"text": "LLMs as Rankers. In modern large-scale IR systems, a multi-stage retrieval-then-erank pipeline has become a prominent approach, wherein an initial retrieval stage, often based on lexical",
|
| 469 |
+
"bbox": [
|
| 470 |
+
81,
|
| 471 |
+
854,
|
| 472 |
+
482,
|
| 473 |
+
896
|
| 474 |
+
],
|
| 475 |
+
"page_idx": 1
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"type": "text",
|
| 479 |
+
"text": "matching or embedding-based methods, is followed by one or multiple reranking stages, utilizing more sophisticated models to refine the results. This reranking stage frequently employs LLMs, either fine-tuned for the task of ranking [34, 35, 39, 64] or via prompting in a pointwise [12, 20, 44], pairwise [40], or listwise [29, 51] fashion. Dai et al. [10] present results suggesting an inherent bias in neural retrieval models toward LLM-generated texts. This source bias may stem from shared Transformer-based architectures and pretraining approaches, and can lead to \"semantic shortcuts\" during matching. Neural IR models are also shown to be vulnerable to adversarial attacks, such as keyword stuffing and content injection [38, 52].",
|
| 480 |
+
"bbox": [
|
| 481 |
+
511,
|
| 482 |
+
106,
|
| 483 |
+
913,
|
| 484 |
+
260
|
| 485 |
+
],
|
| 486 |
+
"page_idx": 1
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"type": "text",
|
| 490 |
+
"text": "LLMs as Judges. Early LLMs, such as BERT, have been utilized for measuring the distributional similarity of texts [60, 61] and for evaluating specific tasks via fine-tuning, including machine translation [63], text summarization [26], and question answering [31]. The arrival of generative LLMs, such as ChatGPT, have enabled various data labeling and annotation tasks [17]. The use of LLMs as surrogates for humans for evaluation, often referred to as \"LLM-as-a-Judge\" [62], now extends across virtually all natural language processing tasks, including text summarization and dialog response generation [18]. However, recent research increasingly demonstrates their limitations, such as favoring longer responses (length bias) [13, 58] or content generated by similar models (self bias) [27, 59]. Our interest is specifically in the use of LLMs for relevance assessments in IR. MacAvaney and Soldaini [30] is among the first to employ LLMs for automatic relevance labeling. They specifically focus on a setting where a single known relevant document per query is available for evaluation and explore several one-shot approaches. Faggioli et al. [14] present a spectrum of human-machine collaboration for producing relevance assessments, from AI assistance to fully automated judgments. They conduct a preliminary assessment of LLMs' capabilities of relevance judgments on two TREC collections and report a fair agreement between human assessors and LLMs. Thomas et al. [54] experiment with various prompt templates to improve quality and observe better agreement with the official TREC labels than Faggioli et al. [14]. These improvements are attributed to both prompt design and the use of a more capable LLM. Thomas et al. [54] further share experiences on using LLMs for relevance assessment at Microsoft Bing, where LLMs have reportedly been used, in conjunction with expert human labelers, since late 2022. Upadhyay et al. [56] reproduce results from Thomas et al. [54], verifying their claims, and create an open-source implementation (UMBRELA). Most recently, LLMs are leveraged in the TREC 2024 Retrieval Augmented Generation (RAG) track for automatic relevance assessment [55]. Relative system rankings are found to correlate with those obtained using human judgments, even if human assessors apply stricter relevance criteria than LLMs [55]. The authors also experiment with various LLM-assisted labeling processes, such as using UMBRELA to pre-filter the pools or to suggest relevance labels that human judges can then post-edit, but find that those solutions \"do not appear to have obvious tangible benefits over fully automatic processes\" [55]. Clarke and Dietz [6] raise concerns about the claims made by Upadhyay et al. [55] and highlight how LLM-based judgments fail to demonstrate strong alignment with manual judgments for top-performing systems. They further present evidence that when evaluation is performed",
|
| 491 |
+
"bbox": [
|
| 492 |
+
511,
|
| 493 |
+
271,
|
| 494 |
+
913,
|
| 495 |
+
898
|
| 496 |
+
],
|
| 497 |
+
"page_idx": 1
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"type": "text",
|
| 501 |
+
"text": "through a publicly known automatic process, such as UMBRELA, it can be subject to manipulation. Chen et al. [5] show that when performing relevance assessments in batches, the relevance levels of earlier documents in a batch influences the relevance judgments of subsequent documents, and that some LLMs are more affected by this so-called threshold priming effect than others. Alaofi et al. [2] compare various open-source and proprietary LLMs in labeling passages for relevance. They demonstrate that most LLMs exhibit some degree of susceptibility to judging non-relevant documents as relevant if query words are inserted at random positions, simulating a keyword stuffing SEO strategy. Rahmani et al. [43] present a large-scale synthetic passage ranking collection, SnyDL, by extending the TREC 2019-2023 Deep Learning collections via LLM-generated labels, and observe a high agreement on system ordering.",
|
| 502 |
+
"bbox": [
|
| 503 |
+
86,
|
| 504 |
+
107,
|
| 505 |
+
480,
|
| 506 |
+
300
|
| 507 |
+
],
|
| 508 |
+
"page_idx": 2
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"type": "text",
|
| 512 |
+
"text": "LLMs as Assistants. There is a wide array of AI tools available to aid people with content creation. Focusing only on textual content, the spectrum ranges from basic grammar and spell checkers to advanced tools that generate full articles. Studies indicate that by late 2024, LLM assistance is detectable in a significant portion of various text domains, with estimates reaching up to $18\\%$ of financial consumer complaints and $24\\%$ in corporate press releases [22]. The use of powerful LLMs can lead to situations where it is unclear whether the content is primarily human-created with AI assistance or the other way around.",
|
| 513 |
+
"bbox": [
|
| 514 |
+
86,
|
| 515 |
+
309,
|
| 516 |
+
480,
|
| 517 |
+
446
|
| 518 |
+
],
|
| 519 |
+
"page_idx": 2
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"type": "text",
|
| 523 |
+
"text": "LLMs for Data Augmentation. While not considered for this role in the current paper, LLMs are also used for data augmentation. For example, Dai et al. [11] use few-shot prompting to generate synthetic queries, while Bonifacio et al. [3] consider query generation in a full unsupervised setting. Soudani et al. [49] present a survey on synthetic dialogue data generation in open-domain, task-oriented, and information seeking dialogue systems. The use of LLM-generated data brings forth new challenges in bias and unfairness, potentially affecting the reliability of IR systems [9].",
|
| 524 |
+
"bbox": [
|
| 525 |
+
86,
|
| 526 |
+
457,
|
| 527 |
+
480,
|
| 528 |
+
580
|
| 529 |
+
],
|
| 530 |
+
"page_idx": 2
|
| 531 |
+
},
|
| 532 |
+
{
|
| 533 |
+
"type": "text",
|
| 534 |
+
"text": "3 Critical Issues with LLMs as Judges",
|
| 535 |
+
"text_level": 1,
|
| 536 |
+
"bbox": [
|
| 537 |
+
86,
|
| 538 |
+
595,
|
| 539 |
+
395,
|
| 540 |
+
611
|
| 541 |
+
],
|
| 542 |
+
"page_idx": 2
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "text",
|
| 546 |
+
"text": "While LLMs offer promising capabilities for automated evaluation in IR, a growing body of research highlights potential limitations and raises critical concerns about their widespread adoption as judges. This section synthesizes findings from prior work, identifying key challenges that warrant further investigation. We categorize these challenges into two broad areas: the quality of LLM judgments (Section 3.1) and the vulnerability of LLM judges to bias and manipulation (Section 3.2). Within these areas, we discuss specific issues related to validity, discriminative power, reliability, reproducibility, susceptibility to manipulation, and systemic biases. These issues, if unaddressed, could undermine the integrity of IR evaluation and potentially lead to misleading conclusions about system performance. This section discusses these critical issues, while Section 4 presents initial experiments designed to provide empirically-driven insight into each of the issues and Section 5 touches upon the fundamental issue of whether, and how, LLM judges should be used in practice.",
|
| 547 |
+
"bbox": [
|
| 548 |
+
86,
|
| 549 |
+
614,
|
| 550 |
+
480,
|
| 551 |
+
835
|
| 552 |
+
],
|
| 553 |
+
"page_idx": 2
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "text",
|
| 557 |
+
"text": "3.1 Quality of Judgments",
|
| 558 |
+
"text_level": 1,
|
| 559 |
+
"bbox": [
|
| 560 |
+
86,
|
| 561 |
+
849,
|
| 562 |
+
299,
|
| 563 |
+
864
|
| 564 |
+
],
|
| 565 |
+
"page_idx": 2
|
| 566 |
+
},
|
| 567 |
+
{
|
| 568 |
+
"type": "text",
|
| 569 |
+
"text": "The fundamental question underlying the use of LLMs as judges is whether their judgments accurately reflect \"true\" relevance and",
|
| 570 |
+
"bbox": [
|
| 571 |
+
86,
|
| 572 |
+
867,
|
| 573 |
+
480,
|
| 574 |
+
895
|
| 575 |
+
],
|
| 576 |
+
"page_idx": 2
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"type": "text",
|
| 580 |
+
"text": "effectively differentiate between systems of varying quality. We break down this question of quality along two sub-dimensions: validity and discriminative power and reliability and reproducibility.",
|
| 581 |
+
"bbox": [
|
| 582 |
+
517,
|
| 583 |
+
107,
|
| 584 |
+
911,
|
| 585 |
+
148
|
| 586 |
+
],
|
| 587 |
+
"page_idx": 2
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"type": "text",
|
| 591 |
+
"text": "3.1.1 Validity and Discriminative Power. For LLMs to serve as effective judges, their assessments must align with human judgments of relevance. Existing research measures this in two ways: (1) agreement on individual document-query relevance labels and (2) agreement on the relative ranking of a set of systems.",
|
| 592 |
+
"bbox": [
|
| 593 |
+
517,
|
| 594 |
+
157,
|
| 595 |
+
911,
|
| 596 |
+
226
|
| 597 |
+
],
|
| 598 |
+
"page_idx": 2
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"type": "list",
|
| 602 |
+
"sub_type": "text",
|
| 603 |
+
"list_items": [
|
| 604 |
+
"- Agreement on Individual Relevance Judgments: Several studies demonstrated that it is possible to use LLMs for relevance assessment and obtain performance comparable to TREC judges [14, 55] and notably better than crowd judges [54]. At the same time, it has also been observed that LLMs are more lenient when labeling a document relevant [2, 55], which leads to inflated evaluation scores. This leniency can lead to inflated evaluation scores, potentially masking subtle differences between systems.",
|
| 605 |
+
"- Agreement on System Rankings: A common approach to meta-evaluating LLM judges is to compare the relative ranking of retrieval systems based on LLM assessments with the ranking based on human-generated relevance judgments. This typically involves calculating the correlation between the two rankings, often using systems submitted to TREC tracks [14, 30, 55]. While high correlation is often interpreted as evidence of LLM judge validity, this approach has significant limitations."
|
| 606 |
+
],
|
| 607 |
+
"bbox": [
|
| 608 |
+
517,
|
| 609 |
+
231,
|
| 610 |
+
911,
|
| 611 |
+
452
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 2
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "Issue #1: Discriminative Ability and the Limits of Correlation Even though several studies report on strong leaderboard correlation between human and LLM judgments, Clarke and Dietz [6] argue that Kendall's $\\tau$ is \"less informative for assessing progress at the top of the leaderboard\" and demonstrate that LLM-based assessments fail to reliably identify the best-performing systems. Further, Alaofi et al. [2] show that correlation-based meta-evaluation hides interesting failure patterns. A crucial, often overlooked, aspect is the disconnect between typical TREC evaluation setups and the needs of many practical IR scenarios. TREC evaluations often involve dozens of systems with widely varying approaches and performance levels. In contrast, practitioners often need to compare a small number of high-performing (state-of-the-art) systems or distinguish between subtle variations of a single system (e.g., in ablation studies). It remains an open question whether LLM judges possess the necessary sensitivity to reliably detect small but meaningful performance differences in such scenarios. Indeed, achieving high correlation is inherently easier with a larger and more diverse set of systems; simply including more systems with varying performance levels can artificially inflate correlation, even if the LLM judge struggles to differentiate between the top contenders. It thus remains an open question: Can LLM judges reliably distinguish between high-performing systems with small, but meaningful, performance differences?",
|
| 618 |
+
"bbox": [
|
| 619 |
+
517,
|
| 620 |
+
470,
|
| 621 |
+
911,
|
| 622 |
+
803
|
| 623 |
+
],
|
| 624 |
+
"page_idx": 2
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"type": "text",
|
| 628 |
+
"text": "3.1.2 Reliability and Reproducibility. Beyond validity, a critical concern for LLM-based evaluation is the reliability and reproducibility of the judgments. Even if an LLM demonstrates a reasonable level of agreement with human judgments on average, its utility as a judge is undermined if its assessments are highly sensitive to seemingly minor variations in setup or input. Indeed, existing",
|
| 629 |
+
"bbox": [
|
| 630 |
+
517,
|
| 631 |
+
811,
|
| 632 |
+
911,
|
| 633 |
+
895
|
| 634 |
+
],
|
| 635 |
+
"page_idx": 2
|
| 636 |
+
},
|
| 637 |
+
{
|
| 638 |
+
"type": "text",
|
| 639 |
+
"text": "research demonstrates that LLM judgments can be significantly influenced by factors such as the choice of LLM [2, 5, 14], the specific wording and structure of the prompt [2, 54], and even the order in which documents are judged [5]. This variability raises concerns about the reliability of results obtained with a single LLM.",
|
| 640 |
+
"bbox": [
|
| 641 |
+
81,
|
| 642 |
+
106,
|
| 643 |
+
480,
|
| 644 |
+
176
|
| 645 |
+
],
|
| 646 |
+
"page_idx": 3
|
| 647 |
+
},
|
| 648 |
+
{
|
| 649 |
+
"type": "text",
|
| 650 |
+
"text": "Issue #2: The Impact of Model Choice A recurring theme in the literature is that more powerful LLMs (typically larger models with more parameters and trained on larger datasets) tend to exhibit better performance and consistency as judges [2]. This raises a crucial, but largely unexplored, question: To what extent would the conclusions of a study change if a more (or less) powerful LLM were used as the judge? This sensitivity to model choice has not been systematically investigated, particularly in the context of comparing high-performing systems where subtle differences matter.",
|
| 651 |
+
"bbox": [
|
| 652 |
+
81,
|
| 653 |
+
189,
|
| 654 |
+
480,
|
| 655 |
+
315
|
| 656 |
+
],
|
| 657 |
+
"page_idx": 3
|
| 658 |
+
},
|
| 659 |
+
{
|
| 660 |
+
"type": "text",
|
| 661 |
+
"text": "3.2 Vulnerability to Bias and Manipulation",
|
| 662 |
+
"text_level": 1,
|
| 663 |
+
"bbox": [
|
| 664 |
+
83,
|
| 665 |
+
329,
|
| 666 |
+
444,
|
| 667 |
+
345
|
| 668 |
+
],
|
| 669 |
+
"page_idx": 3
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "text",
|
| 673 |
+
"text": "Beyond the inherent quality of judgments, a separate set of concerns revolves around the potential for LLMs to be biased or manipulated, thereby impacting evaluation outcomes.",
|
| 674 |
+
"bbox": [
|
| 675 |
+
81,
|
| 676 |
+
348,
|
| 677 |
+
480,
|
| 678 |
+
390
|
| 679 |
+
],
|
| 680 |
+
"page_idx": 3
|
| 681 |
+
},
|
| 682 |
+
{
|
| 683 |
+
"type": "text",
|
| 684 |
+
"text": "3.2.1 Vulnerability to Manipulation. A significant concern with the adoption of LLMs as judges is their potential vulnerability to adversarial manipulation. Initial research suggests that LLM judges might be vulnerable to keyword stuffing and other SEO strategies [2]. More broadly, knowledge of the (characteristics of the) LLM judge opens up ways to manipulate benchmarking results. This could lead to situations where a system achieves much higher scores under automatic evaluation with the LLM judge than under manual assessment [6]. This \"eval hacking\" undermines the purpose of evaluation, which is to accurately assess the true utility of a system for users.",
|
| 685 |
+
"bbox": [
|
| 686 |
+
81,
|
| 687 |
+
400,
|
| 688 |
+
480,
|
| 689 |
+
551
|
| 690 |
+
],
|
| 691 |
+
"page_idx": 3
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "text",
|
| 695 |
+
"text": "Issue #3: Understanding and Mitigating Vulnerabilities of LLM Judges While initial studies demonstrate the possibility of manipulating LLM judges, the extent of this vulnerability across different LLMs, attack strategies, and IR tasks remains largely unknown. What specific vulnerabilities do LLM judges exhibit, and how do these vulnerabilities vary across different models and evaluation settings? Furthermore, How can we design evaluation protocols that are robust to manipulation, ensuring that LLM-based evaluation remains a reliable and trustworthy measure of system performance? This is a crucial area for future research.",
|
| 696 |
+
"bbox": [
|
| 697 |
+
81,
|
| 698 |
+
566,
|
| 699 |
+
483,
|
| 700 |
+
705
|
| 701 |
+
],
|
| 702 |
+
"page_idx": 3
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "text",
|
| 706 |
+
"text": "3.2.2 Systematic Biases. A core challenge in using LLMs for both ranking and evaluation lies in the fundamental similarity of the two tasks: both involve estimating the relevance of a document to a given query. Several studies note the potential for significant systemic biases when LLMs are employed in both roles [14, 30, 42, 54]. In their summary of the LLM4IR workshop, Rahmani et al. [42] state \"if we were to use an LLM both as an assessor and as a ranker, we could expect such a model to be favoured over other evaluated models.\" Faggioli et al. [14] similarly caution that \"if the model is used to judge relevance both for annotation and for retrieval, its evaluation would be overinflated, possibly with perfect performance.\" If both ranking and automatic evaluation are predisposed towards certain types of results, it becomes difficult to identify truly",
|
| 707 |
+
"bbox": [
|
| 708 |
+
81,
|
| 709 |
+
715,
|
| 710 |
+
482,
|
| 711 |
+
896
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 3
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"text": "relevant results. This can lead to the suppression of diverse perspectives and the promotion of homogenous content. Novel ranking approaches that deviate from the LLM's inherent understanding of relevance might be unfairly penalized during the assessment phase. This phenomenon shares similarities with \"reward hacking\" observed in reinforcement learning, where agents exploit loopholes in the reward function to achieve high scores without genuinely solving the underlying task [4]. A particularly concerning form of this bias is circularity, where retrieval models are trained on LLM-generated labels [6, 14, 42]. This creates a self-reinforcing loop, where the ranker learns to produce outputs that the LLM judge deems relevant, further amplifying any existing biases.",
|
| 718 |
+
"bbox": [
|
| 719 |
+
511,
|
| 720 |
+
106,
|
| 721 |
+
913,
|
| 722 |
+
273
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 3
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "text",
|
| 728 |
+
"text": "Issue #4: Interrelated Systemic Biases in LLM-Based Evaluation While the potential for systemic biases in LLM-based IR evaluation is acknowledged, the specific interactions and magnitudes of these biases remain largely unquantified. We identify three interrelated potential biases:",
|
| 729 |
+
"bbox": [
|
| 730 |
+
513,
|
| 731 |
+
286,
|
| 732 |
+
913,
|
| 733 |
+
356
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 3
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "list",
|
| 739 |
+
"sub_type": "text",
|
| 740 |
+
"list_items": [
|
| 741 |
+
"- Bias Towards LLM-Based Rankers: LLM judges might favor the output of systems that also employ LLMs for ranking. While intuitively plausible, this bias needs to be systematically investigated and quantified, independent of the content being retrieved.",
|
| 742 |
+
"- Bias Towards LLM-Generated Text: LLM judges might exhibit an inherent preference for text generated by LLMs, regardless of the ranking system that retrieved it. This could be due to factors like stylistic similarities, reduced noise, or other characteristics of LLM-generated text. Indeed, studies have observed that LLMs exhibit bias favoring texts generated by the same underlying model [27, 37]. However, there is a significant lack of studies systematically quantifying the extent to which LLM judges favor LLM-generated text in the specific context of IR evaluation.",
|
| 743 |
+
"- Combined Bias (LLM Ranker + LLM-Generated Text): The most complex scenario involves the potential interaction of the two biases above. Dai et al. [10] show that neural retrievers prefer LLM-generated content, but their analysis relies on human judgments, not LLM judges. Does an LLM judge exhibit an even stronger preference for LLM-generated text when it is retrieved by an LLM-based ranker? This synergistic effect, if present, could significantly distort evaluation outcomes."
|
| 744 |
+
],
|
| 745 |
+
"bbox": [
|
| 746 |
+
514,
|
| 747 |
+
359,
|
| 748 |
+
911,
|
| 749 |
+
647
|
| 750 |
+
],
|
| 751 |
+
"page_idx": 3
|
| 752 |
+
},
|
| 753 |
+
{
|
| 754 |
+
"type": "text",
|
| 755 |
+
"text": "It thus remains a set of open questions: To what extent do LLM judges exhibit biases towards (1) LLM-based rankers, (2) LLM-generated text, and (3) the combination of the two? How do these biases interact, and what is their combined impact on IR evaluation?",
|
| 756 |
+
"bbox": [
|
| 757 |
+
513,
|
| 758 |
+
652,
|
| 759 |
+
913,
|
| 760 |
+
709
|
| 761 |
+
],
|
| 762 |
+
"page_idx": 3
|
| 763 |
+
},
|
| 764 |
+
{
|
| 765 |
+
"type": "text",
|
| 766 |
+
"text": "4 Experiments",
|
| 767 |
+
"text_level": 1,
|
| 768 |
+
"bbox": [
|
| 769 |
+
514,
|
| 770 |
+
720,
|
| 771 |
+
650,
|
| 772 |
+
736
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 3
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "text",
|
| 778 |
+
"text": "To empirically demonstrate some of the challenges identified in Section 3, we present a series of targeted experiments aimed at investigating the discriminative ability of LLM judgments (Issue #1), the impact of model choice (Issue #2), and systematic biases (Issue #4). Note that, our goal is to provide illustrative evidence of these issues, rather than a comprehensive or exhaustive analysis.",
|
| 779 |
+
"bbox": [
|
| 780 |
+
513,
|
| 781 |
+
739,
|
| 782 |
+
913,
|
| 783 |
+
823
|
| 784 |
+
],
|
| 785 |
+
"page_idx": 3
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "text",
|
| 789 |
+
"text": "4.1 Experiment Design",
|
| 790 |
+
"text_level": 1,
|
| 791 |
+
"bbox": [
|
| 792 |
+
514,
|
| 793 |
+
835,
|
| 794 |
+
717,
|
| 795 |
+
851
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 3
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "text",
|
| 801 |
+
"text": "We study the classic ad hoc retrieval task where a ranked list of documents are returned in response to a user query. We follow a standard retrieve-then-erank paradigm, where an initial set of",
|
| 802 |
+
"bbox": [
|
| 803 |
+
513,
|
| 804 |
+
854,
|
| 805 |
+
913,
|
| 806 |
+
896
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 3
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "text",
|
| 812 |
+
"text": "potentially relevant documents is identified by a fast and efficient first stage retriever, which are then subsequently reranked by a computationally more intensive but more accurate model. Our focus lies specifically in this reranking stage, noting that LLMs may also be used for retrieval [53].",
|
| 813 |
+
"bbox": [
|
| 814 |
+
81,
|
| 815 |
+
106,
|
| 816 |
+
480,
|
| 817 |
+
175
|
| 818 |
+
],
|
| 819 |
+
"page_idx": 4
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"type": "text",
|
| 823 |
+
"text": "We employ a set of rankers built upon progressively more capable LLMs. This allows us to observe how their performance changes as the underlying LLM technology advances and whether LLM judges indeed exhibit bias toward LLM-based rankers. In a novel methodological approach, we also incorporate \"oracle\" rankings as reference points of comparison. These oracle rankings leverage ground truth human relevance labels to represent a hypothetical perfect ranking system as well as controlled degradations from this ideal. By intentionally degrading the perfect rankings, we create a spectrum of performance levels against which we can compare our LLM-based rankers as well as test the sensitivity of LLM judges.",
|
| 824 |
+
"bbox": [
|
| 825 |
+
81,
|
| 826 |
+
176,
|
| 827 |
+
480,
|
| 828 |
+
328
|
| 829 |
+
],
|
| 830 |
+
"page_idx": 4
|
| 831 |
+
},
|
| 832 |
+
{
|
| 833 |
+
"type": "text",
|
| 834 |
+
"text": "For the judging side, we explore the sensitivity of evaluation by employing specific variations of LLM judges within a single model family—a relatively unexplored dimension in prior work. By using these specific variations of LLM judges, we aim to assess the consistency and reliability of LLM-based evaluation and to understand how the choice of LLM judge might influence the predicted effectiveness of different rankers. Crucially, we compare the judgments provided by these LLM judges against human assessments, which we consider as the ground truth for relevance.",
|
| 835 |
+
"bbox": [
|
| 836 |
+
81,
|
| 837 |
+
329,
|
| 838 |
+
480,
|
| 839 |
+
452
|
| 840 |
+
],
|
| 841 |
+
"page_idx": 4
|
| 842 |
+
},
|
| 843 |
+
{
|
| 844 |
+
"type": "text",
|
| 845 |
+
"text": "To further explore the implications of LLM integration across the information lifecycle, we also examine the impact of LLM-assisted content creation on retrieval and evaluation. Specifically, we investigate how AI assistance in document authoring may influence relevance scores assigned by LLM-based rankers and judges.",
|
| 846 |
+
"bbox": [
|
| 847 |
+
81,
|
| 848 |
+
453,
|
| 849 |
+
480,
|
| 850 |
+
522
|
| 851 |
+
],
|
| 852 |
+
"page_idx": 4
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "text",
|
| 856 |
+
"text": "4.2 Experimental Setup",
|
| 857 |
+
"text_level": 1,
|
| 858 |
+
"bbox": [
|
| 859 |
+
83,
|
| 860 |
+
535,
|
| 861 |
+
289,
|
| 862 |
+
551
|
| 863 |
+
],
|
| 864 |
+
"page_idx": 4
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"type": "text",
|
| 868 |
+
"text": "We utilize the TREC Deep Learning (DL) 2019 and 2020 datasets [7, 8], chosen due to their extensive use in prior research in this area. Both use the MS MARCO v1 passage corpus, which contains 8.8 million passages. We adopt the convention of referring to passages as \"documents,\" even if the unit of retrieval are passages in our experiments. The two datasets contain 43 and 54 queries, respectively, with human relevance annotations by TREC assessors.",
|
| 869 |
+
"bbox": [
|
| 870 |
+
81,
|
| 871 |
+
553,
|
| 872 |
+
480,
|
| 873 |
+
648
|
| 874 |
+
],
|
| 875 |
+
"page_idx": 4
|
| 876 |
+
},
|
| 877 |
+
{
|
| 878 |
+
"type": "text",
|
| 879 |
+
"text": "Following [40, 51], all comparisons are based on the reranking of the top 100 passages retrieved by BM25 [23]. To ensure a fair comparison between human and LLM judges, we filter results that have not been judged by TREC assessors (instead of treating them as non-relevant). For simplicity, we report only on NDCG@10, which is the official evaluation metric of the DL track.",
|
| 880 |
+
"bbox": [
|
| 881 |
+
81,
|
| 882 |
+
650,
|
| 883 |
+
480,
|
| 884 |
+
733
|
| 885 |
+
],
|
| 886 |
+
"page_idx": 4
|
| 887 |
+
},
|
| 888 |
+
{
|
| 889 |
+
"type": "list",
|
| 890 |
+
"sub_type": "text",
|
| 891 |
+
"list_items": [
|
| 892 |
+
"4.2.1 LLM Judges. For automatic assessment, we use two model generations of a powerful commercial LLM, Gemini, in two sizes within each generation: v1 Nano, v1 Pro, v1.5 Flash, and v1.5 Pro. We use the best applicable prompt<sup>1</sup> in [54] based on the open source implementation UMBRELA [56], with judgments performed on a 4-point scale. We set top-p = 1 and the temperature to 0.",
|
| 893 |
+
"4.2.2 LLM Rankers. We consider both supervised and unsupervised LLM-based rankers, in addition to a BM25 baseline:"
|
| 894 |
+
],
|
| 895 |
+
"bbox": [
|
| 896 |
+
81,
|
| 897 |
+
741,
|
| 898 |
+
480,
|
| 899 |
+
859
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 4
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "image",
|
| 905 |
+
"img_path": "images/b6655166b41d228f4c4f5b8a174dea7b4581b4361a9d704ca9f498012d80ab3d.jpg",
|
| 906 |
+
"image_caption": [
|
| 907 |
+
"Figure 2: Illustration of oracle rankers, ordered by their expected performance, assuming that the top three results are highly relevant and the bottom three are non-relevant."
|
| 908 |
+
],
|
| 909 |
+
"image_footnote": [],
|
| 910 |
+
"bbox": [
|
| 911 |
+
555,
|
| 912 |
+
104,
|
| 913 |
+
867,
|
| 914 |
+
252
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "list",
|
| 920 |
+
"sub_type": "text",
|
| 921 |
+
"list_items": [
|
| 922 |
+
"- RankT5 [64] is a reranker that uses T5 [41] and listwise ranking loss during supervised fine-tuning. It is considered a state-of-the-art supervised LLM-based ranker.",
|
| 923 |
+
"- RG [20] is a pointwise prompting method based on Relevance Generation, where the prompt asks \"Does the passage answer the query?\" and the logit of \"Yes\" is used as the ranking score. We test RG with FLAN-T5-XXL and FLAN-UL2. Note that RG requires internal logits of output tokens and thus cannot be used with black-box LLMs such as Gemini.",
|
| 924 |
+
"- PRP [40] is a pairwise prompting approach that is effective and robust across LLMs with different sizes. Given a query and two passages, the prompt asks \"Which of the two passages is more relevant to the query?\" The winning rate is used as the ranking score for each passage."
|
| 925 |
+
],
|
| 926 |
+
"bbox": [
|
| 927 |
+
514,
|
| 928 |
+
327,
|
| 929 |
+
911,
|
| 930 |
+
521
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 4
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "text",
|
| 936 |
+
"text": "4.2.3 Oracle Rankers. We generate oracle rankings using the ground truth TREC relevance assessments. To ensure a fair comparison with LLM rankers, we consider the same initial set of BM25-retrieved documents for reranking. Specifically, we consider the following oracle rankers, which are visualized in Fig. 2:",
|
| 937 |
+
"bbox": [
|
| 938 |
+
513,
|
| 939 |
+
534,
|
| 940 |
+
915,
|
| 941 |
+
603
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 4
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "list",
|
| 947 |
+
"sub_type": "text",
|
| 948 |
+
"list_items": [
|
| 949 |
+
"- Perfect: Reranks results according to the ground truth relevance labels. While not perfect overall, this represents the ideal ranking within the initially retrieved set.",
|
| 950 |
+
"- Swap[i]: Introduces controlled errors by swapping the top- $i$ ranked result with the bottom- $i$ result. Decreasing $i$ (from 3 to 2 to 1) increases the deviation from the perfect ranking.",
|
| 951 |
+
"- Swap[i,i+1]: Swaps the $i$ th and $(i + 1)$ th highest-ranked results with the $i$ th and $(i + 1)$ th lowest-ranked results. This represents further degradation from the Swap[i] methods."
|
| 952 |
+
],
|
| 953 |
+
"bbox": [
|
| 954 |
+
514,
|
| 955 |
+
606,
|
| 956 |
+
911,
|
| 957 |
+
731
|
| 958 |
+
],
|
| 959 |
+
"page_idx": 4
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "text",
|
| 963 |
+
"text": "4.2.4 Measuring Alignment. Following prior work (cf. Section 3.1.1) we measure agreement with TREC judges (on all human-judged query-document pairs) in terms of Cohen's $\\kappa$ using both graded and binary relevance labels. Following Faggioli et al. [14], we create binary relevance labels by merging levels 0 and 1 (non-relevant) and levels 2 and 3 (relevant). Additionally, we report on relative system ordering in terms of Kendall's $\\tau$ .",
|
| 964 |
+
"bbox": [
|
| 965 |
+
513,
|
| 966 |
+
739,
|
| 967 |
+
911,
|
| 968 |
+
835
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 4
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"text": "4.3 Results",
|
| 975 |
+
"text_level": 1,
|
| 976 |
+
"bbox": [
|
| 977 |
+
514,
|
| 978 |
+
849,
|
| 979 |
+
619,
|
| 980 |
+
862
|
| 981 |
+
],
|
| 982 |
+
"page_idx": 4
|
| 983 |
+
},
|
| 984 |
+
{
|
| 985 |
+
"type": "text",
|
| 986 |
+
"text": "Table 1 presents the results of the various reranking methods evaluated using both human and LLM judges. Selected methods are",
|
| 987 |
+
"bbox": [
|
| 988 |
+
513,
|
| 989 |
+
867,
|
| 990 |
+
913,
|
| 991 |
+
896
|
| 992 |
+
],
|
| 993 |
+
"page_idx": 4
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"type": "page_footnote",
|
| 997 |
+
"text": "<sup>1</sup>We use the prompt that considers multiple aspects (A), but not role (R) nor multiple judges (M); narrative (N) and description (D) are unavailable for TREC DL.",
|
| 998 |
+
"bbox": [
|
| 999 |
+
81,
|
| 1000 |
+
872,
|
| 1001 |
+
480,
|
| 1002 |
+
895
|
| 1003 |
+
],
|
| 1004 |
+
"page_idx": 4
|
| 1005 |
+
},
|
| 1006 |
+
{
|
| 1007 |
+
"type": "table",
|
| 1008 |
+
"img_path": "images/c5cfd97d65f684c89732676b49bb1c9ea11730e5f60edc556a11b1b25cab4aa2.jpg",
|
| 1009 |
+
"table_caption": [
|
| 1010 |
+
"Table 1: Results (NDCG@10) on the TREC DL 2019 and 2020 collections using both human and LLM judges. The best LLM and Oracle reranking approaches per judge are boldfaced."
|
| 1011 |
+
],
|
| 1012 |
+
"table_footnote": [],
|
| 1013 |
+
"table_body": "<table><tr><td rowspan=\"3\">Method</td><td rowspan=\"3\">LLM</td><td colspan=\"5\">TREC DL19</td><td colspan=\"5\">TREC DL20</td></tr><tr><td rowspan=\"2\">Human judges</td><td colspan=\"4\">LLM judges</td><td rowspan=\"2\">Human judges</td><td colspan=\"4\">LLM judges</td></tr><tr><td>v1 Nano</td><td>v1 Pro</td><td>v1.5 Flash</td><td>v1.5 Pro</td><td>v1 Nano</td><td>v1 Pro</td><td>v1.5 Flash</td><td>v1.5 Pro</td></tr><tr><td colspan=\"12\">Initial retrieval</td></tr><tr><td>BM25</td><td>-</td><td>0.506</td><td>0.607</td><td>0.772</td><td>0.689</td><td>0.712</td><td>0.483</td><td>0.616</td><td>0.786</td><td>0.689</td><td>0.719</td></tr><tr><td colspan=\"12\">LLM reranking</td></tr><tr><td>RankT5</td><td>T5 (3B)</td><td>0.731</td><td>0.633</td><td>0.907</td><td>0.911</td><td>0.916</td><td>0.696</td><td>0.621</td><td>0.924</td><td>0.888</td><td>0.899</td></tr><tr><td rowspan=\"2\">RG</td><td>FLAN-T5-XXL (11B)</td><td>0.673</td><td>0.606</td><td>0.895</td><td>0.874</td><td>0.881</td><td>0.639</td><td>0.619</td><td>0.920</td><td>0.877</td><td>0.878</td></tr><tr><td>FLAN-UL2 (20B)</td><td>0.689</td><td>0.595</td><td>0.896</td><td>0.884</td><td>0.887</td><td>0.667</td><td>0.611</td><td>0.922</td><td>0.880</td><td>0.885</td></tr><tr><td rowspan=\"4\">PRP</td><td>FLAN-T5-XL (3B)</td><td>0.716</td><td>0.610</td><td>0.924</td><td>0.921</td><td>0.909</td><td>0.691</td><td>0.618</td><td>0.924</td><td>0.898</td><td>0.901</td></tr><tr><td>FLAN-T5-XXL (11B)</td><td>0.712</td><td>0.620</td><td>0.918</td><td>0.922</td><td>0.926</td><td>0.712</td><td>0.615</td><td>0.938</td><td>0.905</td><td>0.912</td></tr><tr><td>FLAN-UL2 (20B)</td><td>0.734</td><td>0.614</td><td>0.923</td><td>0.914</td><td>0.928</td><td>0.718</td><td>0.622</td><td>0.932</td><td>0.909</td><td>0.917</td></tr><tr><td>Gemini v1.5 Flash</td><td>0.747</td><td>0.623</td><td>0.937</td><td>0.961</td><td>0.947</td><td>0.699</td><td>0.619</td><td>0.952</td><td>0.937</td><td>0.933</td></tr><tr><td colspan=\"12\">Oracle reranking</td></tr><tr><td>Perfect</td><td>-</td><td>0.892</td><td>0.582</td><td>0.896</td><td>0.876</td><td>0.864</td><td>0.871</td><td>0.617</td><td>0.872</td><td>0.828</td><td>0.824</td></tr><tr><td>Swap[3]</td><td>-</td><td>0.824</td><td>0.589</td><td>0.868</td><td>0.835</td><td>0.827</td><td>0.795</td><td>0.611</td><td>0.842</td><td>0.795</td><td>0.796</td></tr><tr><td>Swap[2]</td><td>-</td><td>0.814</td><td>0.589</td><td>0.859</td><td>0.825</td><td>0.814</td><td>0.790</td><td>0.611</td><td>0.853</td><td>0.797</td><td>0.797</td></tr><tr><td>Swap[1]</td><td>-</td><td>0.803</td><td>0.578</td><td>0.870</td><td>0.836</td><td>0.836</td><td>0.764</td><td>0.621</td><td>0.832</td><td>0.778</td><td>0.776</td></tr><tr><td>Swap[2,3]</td><td>-</td><td>0.739</td><td>0.596</td><td>0.829</td><td>0.779</td><td>0.771</td><td>0.706</td><td>0.602</td><td>0.821</td><td>0.760</td><td>0.765</td></tr><tr><td>Swap[1,2]</td><td>-</td><td>0.713</td><td>0.585</td><td>0.831</td><td>0.782</td><td>0.783</td><td>0.672</td><td>0.615</td><td>0.810</td><td>0.743</td><td>0.746</td></tr></table>",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
117,
|
| 1016 |
+
138,
|
| 1017 |
+
879,
|
| 1018 |
+
435
|
| 1019 |
+
],
|
| 1020 |
+
"page_idx": 5
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"type": "image",
|
| 1024 |
+
"img_path": "images/dfd71a16db8bf19a61b47e196a6a2e25042d255f5d87ed8fc6a04406a024949a.jpg",
|
| 1025 |
+
"image_caption": [
|
| 1026 |
+
"Figure 3: Visualization of the performance of selected rankers from Table 1."
|
| 1027 |
+
],
|
| 1028 |
+
"image_footnote": [],
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
89,
|
| 1031 |
+
450,
|
| 1032 |
+
433,
|
| 1033 |
+
590
|
| 1034 |
+
],
|
| 1035 |
+
"page_idx": 5
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"type": "image",
|
| 1039 |
+
"img_path": "images/51707a0cf4cfafbf4db58fc97852568164c6c48af6458d807b2280c553921d06.jpg",
|
| 1040 |
+
"image_caption": [],
|
| 1041 |
+
"image_footnote": [],
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
452,
|
| 1044 |
+
450,
|
| 1045 |
+
913,
|
| 1046 |
+
590
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 5
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "table",
|
| 1052 |
+
"img_path": "images/7ae27cbb718655032c1ba2317f0034fc08490d3000bba2244eb6c6088d95a608.jpg",
|
| 1053 |
+
"table_caption": [
|
| 1054 |
+
"Table 2: Agreement between LLM and human judges (1) on individual relevance judgments (Cohen's $\\kappa$ ) using both graded and binary labels and (2) on relative ordering of systems (Kendall's $\\tau$ ) considering all systems in Table 1 and Oracle rankers only."
|
| 1055 |
+
],
|
| 1056 |
+
"table_footnote": [],
|
| 1057 |
+
"table_body": "<table><tr><td rowspan=\"3\">LLM judge</td><td colspan=\"4\">Cohen's κ</td><td colspan=\"4\">Kendall's τ</td></tr><tr><td colspan=\"2\">Graded</td><td colspan=\"2\">Binary</td><td colspan=\"2\">All systems</td><td colspan=\"2\">Oracles-only</td></tr><tr><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td></tr><tr><td>v1 Nano</td><td>-0.002</td><td>-0.011</td><td>0.007</td><td>-0.003</td><td>-0.253</td><td>0.011</td><td>-0.067</td><td>0.067</td></tr><tr><td>v1 Pro</td><td>0.139</td><td>0.144</td><td>0.337</td><td>0.273</td><td>0.077</td><td>0.121</td><td>0.600</td><td>0.867</td></tr><tr><td>v1.5 Flash</td><td>0.268</td><td>0.230</td><td>0.461</td><td>0.370</td><td>0.033</td><td>0.143</td><td>0.600</td><td>0.867</td></tr><tr><td>v1.5 Pro</td><td>0.204</td><td>0.192</td><td>0.462</td><td>0.359</td><td>0.077</td><td>0.143</td><td>0.600</td><td>0.867</td></tr></table>",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
243,
|
| 1060 |
+
660,
|
| 1061 |
+
754,
|
| 1062 |
+
767
|
| 1063 |
+
],
|
| 1064 |
+
"page_idx": 5
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"type": "text",
|
| 1068 |
+
"text": "shown in Fig. 3 for easier visual inspection. Additionally, Table 2 reports on agreement between human and LLM judges.",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
81,
|
| 1071 |
+
784,
|
| 1072 |
+
482,
|
| 1073 |
+
815
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 5
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "Choice of LLM How well do LLM-based judgments align with human assessments when using different variations of LLM judges from the same model family? Looking at the evaluation scores of various rankers, we observe generally good agreement among the three",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
81,
|
| 1082 |
+
825,
|
| 1083 |
+
483,
|
| 1084 |
+
883
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 5
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "largest models. In terms of agreement with human judges on individual relevance judgments (see Cohen's $\\kappa$ Table 2) the results are comparable to those reported in prior work for these datasets [56], with the newer v1.5 models performing clearly better than the v1 models. Interestingly, within this newer model generation, a larger model is not necessarily more capable, at least not according to this measure; v1.5 Flash shows much better agreement with humans",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
511,
|
| 1093 |
+
784,
|
| 1094 |
+
916,
|
| 1095 |
+
883
|
| 1096 |
+
],
|
| 1097 |
+
"page_idx": 5
|
| 1098 |
+
},
|
| 1099 |
+
{
|
| 1100 |
+
"type": "text",
|
| 1101 |
+
"text": "when a graded relevance scale is used than the v1.5 Pro. On the other hand, the smallest LLM (v1 Nano) is unable to provide useful judgments, as evidenced by the Cohen's $\\kappa$ values being close to 0. While this model may be capable in other tasks [16], our results clearly indicate its unsuitability for judging relevance in this specific context. Therefore, we exclude the v1 Nano judge from subsequent analyses and discussions referring to \"LLM judges.\"",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
81,
|
| 1104 |
+
106,
|
| 1105 |
+
480,
|
| 1106 |
+
204
|
| 1107 |
+
],
|
| 1108 |
+
"page_idx": 6
|
| 1109 |
+
},
|
| 1110 |
+
{
|
| 1111 |
+
"type": "text",
|
| 1112 |
+
"text": "Another way to validate LLM judges is by measuring how well they agree on the relative ordering of systems with human judges; see Kendall's $\\tau$ Table 2. In this regard, the newest and largest model (v1.5 Pro) is the most capable overall, but there is in fact little difference in performance among the three largest models. Thus, while newer model generations clearly perform better (v1 vs. v1.5), larger models with the same generation do not necessarily make more capable judges (v1.5 Flash vs. v1.5 Pro). We also note that differentiating between the entire pool of systems (\"All systems\") proves to be especially challenging; we will elaborate on this next.",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
81,
|
| 1115 |
+
205,
|
| 1116 |
+
482,
|
| 1117 |
+
340
|
| 1118 |
+
],
|
| 1119 |
+
"page_idx": 6
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"type": "text",
|
| 1123 |
+
"text": "Discriminative Ability Can LLM judges reliably distinguish between high-performing systems with small, but meaningful, performance differences? The Oracle rankers, with their controlled performance degradations, enable us to assess the discriminative power of LLMs in a setting free from potential biases introduced by LLM-based rankers. While the absolute score differences between some pairs of Oracle rankings may be small, all pairwise differences are statistically significant according to human judgments (paired t-test, $p < 0.05$ ). Therefore, a failure to observe a statistically significant difference, or, more critically, a reversal of the correct ordering, indicates that the LLM judge is not sufficiently sensitive. Table 2 (Oracle-only setting) reveals that accurately ordering the Oracle rankings is challenging for LLM judges, particularly on the DL19 dataset. This suggests limitations in their ability to discern subtle, yet statistically significant, performance differences. This limited discriminative ability is not confined to the Oracle setting; it also manifests when evaluating actual retrieval systems. For instance, the v1.5 Pro model, which performed best among the LLMs on the Oracle rankings, fails to identify statistically significant differences between certain pairs of systems (e.g., RankT5 vs. RG-FLAN-T5-XXL on DL19, $p < 0.001$ according to human evaluation). Conversely, it can also identify differences as statistically significant (e.g., PRP-FLAN-UL2 vs. PRP-Gemini-v1.5-Flash, $p < 0.05$ for both years) when human judgments show no significant difference.",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
81,
|
| 1126 |
+
356,
|
| 1127 |
+
482,
|
| 1128 |
+
686
|
| 1129 |
+
],
|
| 1130 |
+
"page_idx": 6
|
| 1131 |
+
},
|
| 1132 |
+
{
|
| 1133 |
+
"type": "text",
|
| 1134 |
+
"text": "Furthermore, the substantial difference in correlation between the \"All systems\" and \"Oracles-only\" results in Table 2 provides direct evidence of the concerns raised in Issue #1 (Section 3.1.1), namely, how easily correlation-based metrics can be manipulated by the choice of systems included in the evaluation.",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
81,
|
| 1137 |
+
688,
|
| 1138 |
+
482,
|
| 1139 |
+
757
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 6
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "Bias Towards LLM-based Rankers Do LLM judges exhibit biases towards LLM-based rankers? The results presented in Fig. 3 demonstrate a clear and substantial bias in favor of LLM-based rankers when evaluated by LLM judges. While prior work has hinted at the potential for such a bias, this study provides direct empirical evidence of its existence and magnitude. Human judgments consistently place the selected Oracle rankers shown in Fig. 3 above all LLM-based rankers. LLM judges, however, completely invert",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
81,
|
| 1148 |
+
770,
|
| 1149 |
+
482,
|
| 1150 |
+
882
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 6
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "image",
|
| 1156 |
+
"img_path": "images/6507791a242eaac398e33b1726cbd7b96ff48dab0b791112abb79101a6069f2c.jpg",
|
| 1157 |
+
"image_caption": [
|
| 1158 |
+
"Figure 4: Relevance levels estimated by an LLM judge (Gemini v1.5 Pro) for Original vs. AI-assisted content (Rewrites, using Gemini v1.5 Flash). According to human assessors, the labels should be uniformly distributed across the four relevance classes, as indicated by the dashed horizontal line."
|
| 1159 |
+
],
|
| 1160 |
+
"image_footnote": [],
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
529,
|
| 1163 |
+
104,
|
| 1164 |
+
893,
|
| 1165 |
+
215
|
| 1166 |
+
],
|
| 1167 |
+
"page_idx": 6
|
| 1168 |
+
},
|
| 1169 |
+
{
|
| 1170 |
+
"type": "text",
|
| 1171 |
+
"text": "this order, ranking all LLM-based rankers as superior to these Oracle runs. This is not a subtle effect; the magnitude of the bias is sufficient to completely reverse the relative ranking of these two fundamentally different types of systems. The fact that the true performance of non-LLM-based systems is severely underestimated, highlighting a critical limitation of relying solely on LLM judges for evaluation, particularly when assessing fundamentally new or unconventional approaches.",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
511,
|
| 1174 |
+
306,
|
| 1175 |
+
913,
|
| 1176 |
+
417
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 6
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "Bias Towards LLM-generated Text Do LLM judges exhibit biases towards LLM-generated text (independent of the ranking mechanism used to retrieve that text)? We investigate this by comparing LLM judge assessments of original human-written documents and their AI-assisted counterparts. Using the MS MARCO dataset, which predates the widespread adoption of modern AI writing tools, we can reasonably assume that the original documents represent content created without significant AI assistance. To avoid bias potentially introduced by initial retrieval, we take a balanced sample: for each year (DL19 and DL20) we randomly sample 500 query-document pairs for each of the four relevance levels, resulting in a total of 4000 query-document pairs. We refer to this set as Original. We then employ our second most capable LLM (Gemini v1.5 Flash) to create an AI rewritten version of each document in the Original set, following the methodology of Dai et al. [10]. This rewritten set is referred to as Rewritten. We shall assume that this rewriting process does not substantially alter the relevance of the documents to their corresponding queries, as verified by human assessors in [10]. Figure 4 presents the results using our most capable LLM (Gemini v1.5 Pro) as the judge. We can observe on the Original data that the LLM judge is lenient in its assessment of relevance, and specifically in labeling non-relevant documents as partially relevant. However, the judge does not appear to systematically inflate scores for the highest relevance level. Crucially, when comparing these results to the judgments on the Rewritten (LLM-generated) text, we do not observe a distributional shift towards higher relevance levels. In fact, the Rewritten documents show a slight increase in lower relevance labels. While these findings are specific to this particular combination of LLM rewriter and judge, they provide evidence against a general bias towards LLM-generated content, even when both models are from the same family.",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
511,
|
| 1185 |
+
431,
|
| 1186 |
+
913,
|
| 1187 |
+
861
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 6
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "page_footnote",
|
| 1193 |
+
"text": "2They use the straightforward prompt \"Please rewrite the following text: human-written text\" in a zero-shot setting.",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
513,
|
| 1196 |
+
872,
|
| 1197 |
+
913,
|
| 1198 |
+
895
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 6
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "image",
|
| 1204 |
+
"img_path": "images/fa1e2e2a533d7056499fde56533acb540a399a9bf1109839a68545ee2dda5c4b.jpg",
|
| 1205 |
+
"image_caption": [
|
| 1206 |
+
"Figure 5: Distribution of score differences of a RankT5 ranker on LLM rewritten vs. original text on a sample of 4000 query-document pairs."
|
| 1207 |
+
],
|
| 1208 |
+
"image_footnote": [],
|
| 1209 |
+
"bbox": [
|
| 1210 |
+
99,
|
| 1211 |
+
104,
|
| 1212 |
+
464,
|
| 1213 |
+
215
|
| 1214 |
+
],
|
| 1215 |
+
"page_idx": 7
|
| 1216 |
+
},
|
| 1217 |
+
{
|
| 1218 |
+
"type": "text",
|
| 1219 |
+
"text": "It is important to note that the preceding analysis examines the distributional impact of LLM-generated text on relevance judgments. To further investigate potential biases in a ranking context, we conduct a second experiment. We take the rankings produced by the perfect Oracle method and re-evaluate them using our LLM judge (Gemini v1.5 Pro). However, instead of using the Original document content, we substitute the Rewritten versions. If the LLM judge exhibited a strong preference for LLM-generated text, we would expect to see a significant increase in the scores assigned to these rankings. However, according to our results, that is not the case. We find that the performance of the Perfect Oracle method, as assessed by the LLM judge, does not change significantly when using the Rewritten text instead of the Original text: we get an NDCG@10 of 0.868 vs. 0.883 on DL19 and 0.825 vs. 0.818 on DL20 for Rewritten vs. Original; none of these differences is statistically significant. This further reinforces the conclusion that, at least in this experimental setup, the LLM judge does not exhibit a strong bias towards LLM-generated content.",
|
| 1220 |
+
"bbox": [
|
| 1221 |
+
81,
|
| 1222 |
+
286,
|
| 1223 |
+
482,
|
| 1224 |
+
535
|
| 1225 |
+
],
|
| 1226 |
+
"page_idx": 7
|
| 1227 |
+
},
|
| 1228 |
+
{
|
| 1229 |
+
"type": "text",
|
| 1230 |
+
"text": "Combined Bias (LLM Ranker + LLM-Generated Text) Do LLM judges exhibit biases towards LLM-generated text when using LLM-based rankers? To address this question, we conduct an experiment combining an LLM-based ranker with LLM-generated text and an LLM judge. We utilize the same balanced sample of 4,000 query-document pairs (500 per relevance level for each of DL19 and DL20) used in the previous experiment, comprising both the Original and Rewritten sets. For ranking, we employ a pointwise approach using RankT5 with a Flan-T5-XXL model. We compare the scenarios where the RankT5 model scores (1) the Original query-document pairs and (2) the Rewritten query-document pairs. Both scorings are then evaluated using the same LLM judge (Gemini v1.5 Pro). We observe minimal differences in the LLM-assigned evaluation scores between the Original and Rewritten scenarios. Closer inspection of the RankT5 scores reveals that the rewriting process had a negligible impact on retrieval scores for the vast majority of query-document pairs. The few observed changes were symmetrically distributed, with increases and decreases in scores mirroring each other; see Fig. 5. This aligns with the previous experiment's findings, suggesting that neither the LLM judge nor the LLM ranker (in this specific configuration) exhibits a strong preference for the LLM-rewritten content. Consequently, the LLM judge produces very similar evaluation results in both cases. While these results do not demonstrate a combined bias in this specific experimental setup, the potential for synergistic effects between LLM rankers,",
|
| 1231 |
+
"bbox": [
|
| 1232 |
+
81,
|
| 1233 |
+
549,
|
| 1234 |
+
482,
|
| 1235 |
+
896
|
| 1236 |
+
],
|
| 1237 |
+
"page_idx": 7
|
| 1238 |
+
},
|
| 1239 |
+
{
|
| 1240 |
+
"type": "text",
|
| 1241 |
+
"text": "LLM-generated text, and LLM judges remains an open question requiring further, more comprehensive investigation.",
|
| 1242 |
+
"bbox": [
|
| 1243 |
+
513,
|
| 1244 |
+
106,
|
| 1245 |
+
911,
|
| 1246 |
+
133
|
| 1247 |
+
],
|
| 1248 |
+
"page_idx": 7
|
| 1249 |
+
},
|
| 1250 |
+
{
|
| 1251 |
+
"type": "text",
|
| 1252 |
+
"text": "5 The Role and Challenges of LLM Judges in IR",
|
| 1253 |
+
"text_level": 1,
|
| 1254 |
+
"bbox": [
|
| 1255 |
+
513,
|
| 1256 |
+
148,
|
| 1257 |
+
911,
|
| 1258 |
+
165
|
| 1259 |
+
],
|
| 1260 |
+
"page_idx": 7
|
| 1261 |
+
},
|
| 1262 |
+
{
|
| 1263 |
+
"type": "text",
|
| 1264 |
+
"text": "The feasibility of LLMs as automatic relevance assessors has been established and they have been rapidly adopted both in academia and in industry. The question, therefore, is not whether they can be used as judges, but rather how they should be used in a principled and effective manner. This requires a careful consideration of both the intended purpose of LLM judges and their inherent limitations.",
|
| 1265 |
+
"bbox": [
|
| 1266 |
+
511,
|
| 1267 |
+
167,
|
| 1268 |
+
913,
|
| 1269 |
+
252
|
| 1270 |
+
],
|
| 1271 |
+
"page_idx": 7
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"type": "text",
|
| 1275 |
+
"text": "Clarifying the Purpose Most studies, albeit often implicitly, employ LLMs as judges with the aim of replacing human assessors. However, as we discuss below, fundamental limitations preclude this possibility. We argue (in line with [14, 30]) that a more appropriate and productive goal should be to enable more effective use of limited human assessor time and resources. This shift in perspective—from replacement to reducing human effort—is crucial for guiding the development and deployment of LLM judges.",
|
| 1276 |
+
"bbox": [
|
| 1277 |
+
511,
|
| 1278 |
+
266,
|
| 1279 |
+
913,
|
| 1280 |
+
377
|
| 1281 |
+
],
|
| 1282 |
+
"page_idx": 7
|
| 1283 |
+
},
|
| 1284 |
+
{
|
| 1285 |
+
"type": "text",
|
| 1286 |
+
"text": "Acknowledging Fundamental Limitations It is essential to recognize that both ranking and relevance assessment address the same problem: predicting the relevance of a document to a given query. This inherent overlap introduces fundamental limitations when using LLMs for both tasks. Clarke and Dietz [6] argue that \"A true gold standard must originate from human assessments, as only humans can determine the relevance of information in a way that reflects real-world utility.\" We must also recognize that relevance itself carries an intrinsic uncertainty; it depends on the entire cognitive state of the person, which changes as they use the system [46]. Because of these inherent limitations, \"LLM assessments may themselves represent a strong ranking method, rather than a valid evaluation metric\" [6]. Recent work, such as [36], has begun to provide uncertainty measures with LLM relevance predictions.",
|
| 1287 |
+
"bbox": [
|
| 1288 |
+
511,
|
| 1289 |
+
390,
|
| 1290 |
+
913,
|
| 1291 |
+
584
|
| 1292 |
+
],
|
| 1293 |
+
"page_idx": 7
|
| 1294 |
+
},
|
| 1295 |
+
{
|
| 1296 |
+
"type": "text",
|
| 1297 |
+
"text": "What, then, is the Role of LLM Judges? Given the limitations outlined above, and recognizing that information access systems are ultimately built to serve human needs and provide utility to users, the highest-fidelity evaluation of these systems can only be achieved through online evaluation with real users. Offline evaluation, while valuable, remains an abstraction of the real-world task because it removes the user from the evaluation process. There is a genuine risk that findings from offline experiments, particularly those relying solely on LLM judges, may not translate to operational settings. It is crucial to recognize that the signal provided by LLM judges is inherently a noisy and potentially biased one, and therefore cannot be fully trusted as a direct proxy for utility. Nevertheless, this noisy signal can be a useful indicator, helping to identify which methods or system variants are promising enough to warrant the more resource-intensive process of human evaluation.",
|
| 1298 |
+
"bbox": [
|
| 1299 |
+
511,
|
| 1300 |
+
598,
|
| 1301 |
+
913,
|
| 1302 |
+
805
|
| 1303 |
+
],
|
| 1304 |
+
"page_idx": 7
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "text",
|
| 1308 |
+
"text": "5.1 Guidelines for Employing LLMs as Judges",
|
| 1309 |
+
"text_level": 1,
|
| 1310 |
+
"bbox": [
|
| 1311 |
+
513,
|
| 1312 |
+
821,
|
| 1313 |
+
897,
|
| 1314 |
+
837
|
| 1315 |
+
],
|
| 1316 |
+
"page_idx": 7
|
| 1317 |
+
},
|
| 1318 |
+
{
|
| 1319 |
+
"type": "text",
|
| 1320 |
+
"text": "We outline several key considerations for employing LLMs in evaluation, aiming to foster a community-wide set of best practices and ensure methodological soundness. These are not exhaustive, but represent an important starting point.",
|
| 1321 |
+
"bbox": [
|
| 1322 |
+
511,
|
| 1323 |
+
840,
|
| 1324 |
+
913,
|
| 1325 |
+
896
|
| 1326 |
+
],
|
| 1327 |
+
"page_idx": 7
|
| 1328 |
+
},
|
| 1329 |
+
{
|
| 1330 |
+
"type": "list",
|
| 1331 |
+
"sub_type": "text",
|
| 1332 |
+
"list_items": [
|
| 1333 |
+
"- Consistent Evaluation Across Systems. All systems being compared within a single evaluation should be assessed using the same LLM judge configuration (model, prompt, settings). This ensures a fair and unbiased comparison, avoiding situations where some systems are evaluated with a more lenient or biased judge than others. Specifically, LLM judges should not be used selectively to fill \"relevance holes\" in existing human judgments [1].",
|
| 1334 |
+
"- Transparency and Reproducibility. To enable reproducibility and facilitate comparisons across studies, researchers should clearly report the specific LLM used (model version), the exact prompt(s) employed, and any relevant settings or parameters.",
|
| 1335 |
+
"- Employing Multiple LLMs as Judges. Using a combination of different LLM judges can help mitigate biases stemming from LLMs favoring responses from their own model family and improve robustness (see, e.g., [19]). Reporting the distribution of scores across different judges, as suggested by Rahmani et al. [42], can further enhance robustness.",
|
| 1336 |
+
"- Alignment with Human Preferences. Ensuring alignment between human and LLM raters is a substantial effort that needs to be continuously monitored and refined [54]. Ideally, results reported on LLM judges should also include human validation of the results on a representative sample. Researchers should also exercise care when making research claims based on results from LLM judges."
|
| 1337 |
+
],
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
83,
|
| 1340 |
+
107,
|
| 1341 |
+
480,
|
| 1342 |
+
425
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 8
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"text": "5.2 Open Questions and Future Directions",
|
| 1349 |
+
"text_level": 1,
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
83,
|
| 1352 |
+
441,
|
| 1353 |
+
437,
|
| 1354 |
+
457
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 8
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "text",
|
| 1360 |
+
"text": "The adoption of LLMs as judges in IR presents several open questions and necessitates further research to address the limitations and biases identified in previous sections.",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
81,
|
| 1363 |
+
460,
|
| 1364 |
+
482,
|
| 1365 |
+
501
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 8
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "list",
|
| 1371 |
+
"sub_type": "text",
|
| 1372 |
+
"list_items": [
|
| 1373 |
+
"- Assessing and Improving LLM Judge Quality. Our findings highlight the critical importance of LLM judge quality, revealing shortcomings in their discriminative ability and biases toward LLM-powered rankers. Developing robust methods for assessing and improving the quality of LLM judges is a crucial research direction for the IR community, potentially drawing motivations from horizontal autorater efforts [57].",
|
| 1374 |
+
"- Robustness Against Adversarial Attacks. LLM judges, similar to LLM-based rankers, are susceptible to adversarial attacks, including keyword stuffing and content injection [2, 52]. Understanding these vulnerabilities and developing effective mechanism to enhance the robustness of LLM judges against them are critical areas for ensuring the practical applicability of LLM judges in real-world scenarios.",
|
| 1375 |
+
"- Human-in-the-Loop LLM judges. The potential for using LLMs to augment human assessors, e.g., for quality-control, has been suggested [14, 48]. While Upadhyay et al. [55] found that human-in-the-loop processes did not bring obvious tangible benefits, their study represents a preliminary investigation. Further research is needed to explore the potential of this approach more comprehensively.",
|
| 1376 |
+
"- From Passages to Longer Documents. Most of the existing work focuses on paragraphs as the unit of retrieval, using either the TREC DL [2, 5, 14, 30] or RAG [55] benchmarks. There is much less work on ad hoc retrieval, with exceptions including TREC-8 [14] and Robust [54]. It is known that LLMs handle long context differently [24] and its implication in judging long documents need further investigation."
|
| 1377 |
+
],
|
| 1378 |
+
"bbox": [
|
| 1379 |
+
83,
|
| 1380 |
+
508,
|
| 1381 |
+
480,
|
| 1382 |
+
896
|
| 1383 |
+
],
|
| 1384 |
+
"page_idx": 8
|
| 1385 |
+
},
|
| 1386 |
+
{
|
| 1387 |
+
"type": "list",
|
| 1388 |
+
"sub_type": "text",
|
| 1389 |
+
"list_items": [
|
| 1390 |
+
"- Alternative Judging Approaches. All existing studies apply LLMs in a pointwise manner, but a pairwise or listwise setup would also be possible. LLM judge research can borrow ideas from LLM ranking research where pairwise and listwise approaches are extensively explored.",
|
| 1391 |
+
"- Domain-specific Solutions. While most existing research focuses on general-purpose search, specific domains might require purpose-built solutions. Recent work, for example, has explored the use of LLM judges for e-commerce search [32, 45]. Extending this concept of domain specialization, the applicability and value of LLM judges in specialized domains requiring expert knowledge (e.g., medical or legal search) remain less clear. In such domains, LLM judges might offer potential cost savings and more in-depth domain-specific knowledge compared to non-expert human assessors, but they also introduce new challenges, including the need for high accuracy, the potential for serious consequences from errors, and the complexities of expert judgment. Future research should explore the use of LLM judges in these contexts, carefully considering the trade-offs between cost, efficiency, and the risks of inaccurate assessments.",
|
| 1392 |
+
"- Smaller, Purpose-Built Models. The use of LLMs for judging at large scale raises concerns about computational cost and latency. A promising research direction is to explore the development of smaller models, designed specifically for the judging task. These purpose-built models could potentially offer significant advantages in terms of efficiency and speed, while maintaining performance comparable to massive LLMs in terms of accuracy and reliability.",
|
| 1393 |
+
"- Internationalization. Most related research focuses on English-language corpora. The issues discussed in this paper may be amplified in other languages, especially low resource ones, due to limitations in LLMs' multilingual capabilities. Further research is needed to evaluate the performance of LLM judges across a wider range of languages.",
|
| 1394 |
+
"- Training Models on LLM-generated Labels. Training retrieval models on data labeled by LLMs introduces a significant risk of circularity and bias amplification. If done recursively, this might lead to model collapse [47]. Thomas et al. [54] acknowledge that parts of the Bing search engine are retrained using LLM-generated labels. Understanding the long-term effects of such training is an important research direction."
|
| 1395 |
+
],
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
514,
|
| 1398 |
+
107,
|
| 1399 |
+
921,
|
| 1400 |
+
674
|
| 1401 |
+
],
|
| 1402 |
+
"page_idx": 8
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
"type": "text",
|
| 1406 |
+
"text": "6 Conclusion",
|
| 1407 |
+
"text_level": 1,
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
514,
|
| 1410 |
+
710,
|
| 1411 |
+
638,
|
| 1412 |
+
724
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 8
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "This paper has investigated the emerging and critical challenge of understanding the effect LLM-based rankers and AI-powered content creation may have on LLM-based judges' ability to accurately assess relevance. Through a synthesis of existing literature, we identified key concerns regarding the quality, validity, reliability, and potential biases of LLM judgments. Our experiments provided empirical evidence demonstrating how interactions between the various roles LLMs play can lead to inaccurate or biased assessments of retrieval effectiveness, particularly in scenarios involving LLM-based rankers. Finally, we presented guidelines for the use of LLMs as judges in IR and outlined a research agenda to address crucial open questions in this rapidly evolving field.",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
511,
|
| 1421 |
+
729,
|
| 1422 |
+
913,
|
| 1423 |
+
896
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 8
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "text",
|
| 1429 |
+
"text": "References",
|
| 1430 |
+
"text_level": 1,
|
| 1431 |
+
"bbox": [
|
| 1432 |
+
84,
|
| 1433 |
+
104,
|
| 1434 |
+
176,
|
| 1435 |
+
119
|
| 1436 |
+
],
|
| 1437 |
+
"page_idx": 9
|
| 1438 |
+
},
|
| 1439 |
+
{
|
| 1440 |
+
"type": "list",
|
| 1441 |
+
"sub_type": "ref_text",
|
| 1442 |
+
"list_items": [
|
| 1443 |
+
"[1] Zahra Abbasiantaeb, Chuan Meng, Leif Azzopardi, and Mohammad Aliannejadi. 2024. Can We Use Large Language Models to Fill Relevance Judgment Holes? In Joint Proceedings of the 1st Workshop on Evaluation Methodologies, Testbeds and Community for Information Access Research (EMTCIR 2024) and the 1st Workshop on User Modelling in Conversational Information Retrieval (UM-CIR 2024) co-located with the 2nd International ACM SIGIR Conference on Information Retrieval in the Asia Pacific (SIGIR-AP 2024), Tokyo, Japan, December 12, 2024 (CEUR Workshop Proceedings, Vol. 3854).",
|
| 1444 |
+
"[2] Marwah Alaofi, Paul Thomas, Falk Scholer, and Mark Sanderson. 2024. LLMs can be Fooled into Labelling a Document as Relevant. In Proceedings of the 2024 Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region (SIGIR-AP '24). 32-41.",
|
| 1445 |
+
"[3] Luiz Bonifacio, Hugo Abonizio, Marzieh Fadaee, and Rodrigo Nogueira. 2022. InPars: Unsupervised Dataset Generation for Information Retrieval. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22), 2387-2392.",
|
| 1446 |
+
"[4] Lichang Chen, Chen Zhu, Jiuhai Chen, Davit Soselia, Tianyi Zhou, Tom Goldstein, Heng Huang, Mohammad Shoeybi, and Bryan Catanzaro. 2024. ODIN: Disentangled Reward Mitigates Hacking in RLHF. In *Forty-first International Conference on Machine Learning (ICML '24).",
|
| 1447 |
+
"[5] Nuo Chen, Jiqun Liu, Xiaoyu Dong, Qijiong Liu, Tetsuya Sakai, and Xiao-Ming Wu. 2024. AI Can Be Cognitively Biased: An Exploratory Study on Threshold Priming in LLM-Based Batch Relevance Assessment. In Proceedings of the 2024 Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region (SIGIR-AP '24). 54–63.",
|
| 1448 |
+
"[6] Charles L. A. Clarke and Laura Dietz. 2024. LLM-based relevance assessment still can't replace human relevance assessment. arXiv:2412.17156 [cs.IR]",
|
| 1449 |
+
"[7] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2020. Overview of the TREC 2020 Deep Learning Track. In Proceedings of the Twenty-Ninth Text Retrieval Conference (TREC '20).",
|
| 1450 |
+
"[8] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2019. Overview of the TREC 2019 Deep Learning track. In Proceedings of the Twenty-Eighth Text REtrieval Conference (TREC '19).",
|
| 1451 |
+
"[9] Sunhao Dai, Chen Xu, Shicheng Xu, Liang Pang, Zhenhua Dong, and Jun Xu. 2024. Bias and Unfairness in Information Retrieval Systems: New Challenges in the LLM Era. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24), 6437-6447.",
|
| 1452 |
+
"[10] Sunhao Dai, Yuqi Zhou, Liang Pang, Weihao Liu, Xiaolin Hu, Yong Liu, Xiao Zhang, Gang Wang, and Jun Xu. 2024. Neural Retrievers are Biased Towards LLM-Generated Content. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24). 526-537.",
|
| 1453 |
+
"[11] Zhuyun Dai, Vincent Y Zhao, Ji Ma, Yi Luan, Jianmo Ni, Jing Lu, Anton Bakalov, Kelvin Guu, Keith Hall, and Ming-Wei Chang. 2023. Promptagator: Few-shot Dense Retrieval From 8 Examples. In The Eleventh International Conference on Learning Representations (ICLR '23).",
|
| 1454 |
+
"[12] Andrew Drozdov, Honglei Zhuang, Zhuyun Dai, Zhen Qin, Razieh Rahimi, Xuanhui Wang, Dana Alon, Mohit Iyer, Andrew McCallum, Donald Metzler, and Kai Hui. 2023. PaRaDe: Passage Ranking using Demonstrations with LLMs. In Findings of the Association for Computational Linguistics: EMNLP 2023 (EMNLP '23). 14242-14252.",
|
| 1455 |
+
"[13] Yann Dubois, Percy Liang, and Tatsunori Hashimoto. 2024. Length-Controlled AlpacaEval: A Simple Debiasing of Automatic Evaluators. In First Conference on Language Modeling (COLM '24).",
|
| 1456 |
+
"[14] Guglielmo Faggioli, Laura Dietz, Charles L. A. Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, and Henning Wachsmuth. 2023. Perspectives on Large Language Models for Relevance Judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval (ICTIR '23). 39-50.",
|
| 1457 |
+
"[15] Isabel O. Gallegos, Ryan A. Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K. Ahmed. 2024. Bias and Fairness in Large Language Models: A Survey. Computational Linguistics 50, 3 (Sept. 2024), 1097-1179.",
|
| 1458 |
+
"[16] Gemini Team Google. 2023. Gemini: A family of highly capable multimodal models. arXiv:2312.11805 [cs.CL]",
|
| 1459 |
+
"[17] Fabrizio Gilardi, Meysam Alizadeh, and Mael Kubli. 2023. ChatGPT outperforms crowd workers for text-annotation tasks. Proceedings of the National Academy of Sciences 120, 30 (2023), e2305016120.",
|
| 1460 |
+
"[18] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuanzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. 2024. A Survey on LLM-as-a-Judge. arXiv:2411.15594 [cs.CL]",
|
| 1461 |
+
"[19] Alon Jacovi, Andrew Wang, Chris Alberti, Connie Tao, Jon Lipovetz, Kate Olszewska, Lukas Haas, Michelle Liu, Nate Keating, Adam Bloniarz, Carl Saroufim, Corey Fry, Dror Marcus, Doron Kukliansky, Gaurav Singh Tomar, James Swirhun, Jinwei Xing, Lily Wang, Madhu Gurumurthy, Michael Aaron, Moran Ambar, Rachana Fellinger, Rui Wang, Zizhao Zhang, Sasha Goldshtein, and Dipanjan"
|
| 1462 |
+
],
|
| 1463 |
+
"bbox": [
|
| 1464 |
+
86,
|
| 1465 |
+
122,
|
| 1466 |
+
480,
|
| 1467 |
+
888
|
| 1468 |
+
],
|
| 1469 |
+
"page_idx": 9
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "list",
|
| 1473 |
+
"sub_type": "ref_text",
|
| 1474 |
+
"list_items": [
|
| 1475 |
+
"Das. 2025. The FACTS Grounding Leaderboard: Benchmarking LLMs' Ability to Ground Responses to Long-Form Input. arXiv:2501.03200 [cs.CL]",
|
| 1476 |
+
"[20] Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. 2023. Holistic Evaluation of Language Models. Transactions on Machine Learning Research (2023).",
|
| 1477 |
+
"[21] Paul Pu Liang, Chiyu Wu, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2021. Towards understanding and mitigating social biases in language models. In International Conference on Machine Learning (ICML '21). 6565-6576.",
|
| 1478 |
+
"[22] Weixin Liang, Yaohui Zhang, Mihai Codreanu, Jiayu Wang, Hancheng Cao, and James Zou. 2025. The Widespread Adoption of Large Language Model-Assisted Writing Across Society. arXiv:2502.09747 [cs.CL]",
|
| 1479 |
+
"[23] Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python Toolkit for Reproducible Information Retrieval Research with Sparse and Dense Representations. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21). 2356-2362.",
|
| 1480 |
+
"[24] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173.",
|
| 1481 |
+
"[25] Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-Eval: NLG Evaluation using GPT-4 with Better Human Alignment. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP '23). 2511–2522.",
|
| 1482 |
+
"[26] Yang Liu and Mirella Lapata. 2019. Text Summarization with Pretrained Encoders. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP). 3730-3740.",
|
| 1483 |
+
"[27] Yiqi Liu, Nafise Moosavi, and Chenghua Lin. 2024. LLMs as Narcissistic Evaluators: When Ego Inflates Evaluation Scores. In Findings of the Association for Computational Linguistics: ACL 2024. 12688-12701.",
|
| 1484 |
+
"[28] Yinhong Liu, Han Zhou, Zhijiang Guo, Ehsan Shareghi, Ivan Vulic, Anna Korhonen, and Nigel Collier. 2024. Aligning with Human Judgement: The Role of Pairwise Preference in Large Language Model Evaluators. In First Conference on Language Modeling (COLM '24).",
|
| 1485 |
+
"[29] Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-Shot Listwise Document Reranking with a Large Language Model. arXiv:2305.02156 [cs.IR]",
|
| 1486 |
+
"[30] Sean MacAvaney and Luca Soldaini. 2023. One-Shot Labeling for Automatic Relevance Estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '23). 2230-2235.",
|
| 1487 |
+
"[31] J. S. McCarley, Rishav Chakravarti, and Avirup Sil. 2019. Structured Pruning of a BERT-based Question Answering Model. arXiv:1910.06360 [cs.CL]",
|
| 1488 |
+
"[32] Navid Mehrdad, Hrushikesh Mohapatra, Mossaab Bagdouri, Prijith Chandran, Alessandro Magnani, Xunfan Cai, Ajit Puthenputhussery, Sachin Yadav, Tony Lee, ChengXiang Zhai, and Ciya Liao. 2024. Large Language Models for Relevance Judgment in Product Search. arXiv:2406.00247 [cs.IR]",
|
| 1489 |
+
"[33] Roberto Navigli, Simone Conia, and Björn Ross. 2023. Biases in large language models: origins, inventory, and discussion. ACM Journal of Data and Information Quality 15, 2 (2023), 1-21.",
|
| 1490 |
+
"[34] Rodrigo Nogueira and Kyunghyun Cho. 2019. Passage Re-ranking with BERT. arXiv:1901.04085 [cs.IR]",
|
| 1491 |
+
"[35] Rodrigo Nogueira, Zhiying Jiang, and Jimmy Lin. 2020. Document ranking with a pretrained sequence-to-sequence model. In Findings of the Association for Computational Linguistics: EMNLP 2020. 708-718.",
|
| 1492 |
+
"[36] Harrie Oosterhuis, Rolf Jagerman, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2024. Reliable confidence intervals for information retrieval evaluation using generative ai. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24). 2307-2317.",
|
| 1493 |
+
"[37] Arjun Panickssery, Samuel R. Bowman, and Shi Feng. 2024. LLM Evaluators Recognize and Favor Their Own Generations. In The Thirty-eighth Annual Conference on Neural Information Processing Systems (NeurlPS '24).",
|
| 1494 |
+
"[38] Andrew Parry, Maik Frobe, Sean MacAvaney, Martin Potthast, and Matthias Hagen. 2024. Analyzing Adversarial Attacks on Sequence-to-Sequence Relevance Models. In Proceedings of the 46th European Conference on Information Retrieval (ECIR '24), 286-302.",
|
| 1495 |
+
"[39] Ronak Pradeep, Rodrigo Nogueira, and Jimmy Lin. 2021. The Expando-Mono-Duo Design Pattern for Text Ranking with Pretrained Sequence-to-Sequence Models. arXiv:2101.05667 [cs.IR]."
|
| 1496 |
+
],
|
| 1497 |
+
"bbox": [
|
| 1498 |
+
517,
|
| 1499 |
+
108,
|
| 1500 |
+
913,
|
| 1501 |
+
883
|
| 1502 |
+
],
|
| 1503 |
+
"page_idx": 9
|
| 1504 |
+
},
|
| 1505 |
+
{
|
| 1506 |
+
"type": "list",
|
| 1507 |
+
"sub_type": "ref_text",
|
| 1508 |
+
"list_items": [
|
| 1509 |
+
"[40] Zhen Qin, Rolf Jagerman, Kai Hui, Honglei Zhuang, Junru Wu, Le Yan, Jiaming Shen, Tianqi Liu, Jialu Liu, Donald Metzler, Xuanhui Wang, and Michael Bendersky. 2024. Large Language Models are Effective Text Rankers with Pairwise Ranking Prompting. In Findings of the Association for Computational Linguistics: NAACL 2024. 1504-1518.",
|
| 1510 |
+
"[41] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. Journal of Machine Learning Research 21, 140 (2020), 1-67.",
|
| 1511 |
+
"[42] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR]",
|
| 1512 |
+
"[43] Hossein A. Rahmani, Xi Wang, Emine Yilmaz, Nick Craswell, Bhaskar Mitra, and Paul Thomas. 2025. SynDL: A Large-Scale Synthetic Test Collection for Passage Retrieval. arXiv:2408.16312 [cs.IR]",
|
| 1513 |
+
"[44] Devendra Sachan, Mike Lewis, Mandal Joshi, Armen Aghajanyan, Wen-tau Yih, Joelle Pineau, and Luke Zettlemoyer. 2022. Improving Passage Retrieval with Zero-Shot Question Generation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP '22). 3781-3797.",
|
| 1514 |
+
"[45] Jayant Sachdev, Sean D. Rosario, Abhijeet Phatak, He Wen, Swati Kirti, and Chittaranjan Tripathy. 2025. Automated Query-Product Relevance Labeling using Large Language Models for E-commerce Search. arXiv:2502.15990 [cs.IR]",
|
| 1515 |
+
"[46] Tefko Saracevic. 1996. Relevance reconsidered. In Proceedings of the second conference on conceptions of library and information science (CoLIS 2). 201-218.",
|
| 1516 |
+
"[47] Ilia Shumailov, Zakhhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross J. Anderson, and Yarin Gal. 2024. AI models collapse when trained on recursively generated data. Nature 631, 8022 (July 2024), 755-759.",
|
| 1517 |
+
"[48] Ian Soboroff. 2025. Don't Use LLMs to Make Relevance Judgments. Information Retrieval Research 1 (Mar. 2025), 29-46.",
|
| 1518 |
+
"[49] Heydar Soudani, Roxana Petcu, Evangelos Kanoulas, and Faegheh Hasibi. 2024. A Survey on Recent Advances in Conversational Data Generation. arXiv:2405.13003 [cs.CL]",
|
| 1519 |
+
"[50] Rickard Stureborg, Dimitris Alikaniotis, and Yoshi Suhara. 2024. Large Language Models are Inconsistent and Biased Evaluators. arXiv:2405.01724 [cs.CL]",
|
| 1520 |
+
"[51] Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP '23). 14918-14937.",
|
| 1521 |
+
"[52] Manveer Singh Tamber and Jimmy Lin. 2025. Illusions of Relevance: Using Content Injection Attacks to Deceive Retrievers, Rerankers, and LLM Judges. arXiv:2501.18536 [cs.IR]",
|
| 1522 |
+
"[53] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a"
|
| 1523 |
+
],
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
84,
|
| 1526 |
+
108,
|
| 1527 |
+
480,
|
| 1528 |
+
573
|
| 1529 |
+
],
|
| 1530 |
+
"page_idx": 10
|
| 1531 |
+
},
|
| 1532 |
+
{
|
| 1533 |
+
"type": "list",
|
| 1534 |
+
"sub_type": "ref_text",
|
| 1535 |
+
"list_items": [
|
| 1536 |
+
"differentiable search index. In Proceedings of the 36th International Conference on Neural Information Processing Systems (NeurIPS '22), 21831-21843.",
|
| 1537 |
+
"[54] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2024. Large Language Models can Accurately Predict Searcher Preferences. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '24). 1930-1940.",
|
| 1538 |
+
"[55] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR]",
|
| 1539 |
+
"[56] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv:2406.06519 [cs.IR]",
|
| 1540 |
+
"[58] Yizhong Wang, Hamish Ivison, Pradeep Dasigi, Jack Hessel, Tushar Khot, Khyathi Chandu, David Wadden, Kelsey MacMillan, Noah A Smith, Iz Beltagy, and Hannaneh Hajishirzi. 2023. How Far Can Camels Go? Exploring the State of Instruction Tuning on Open Resources. In Proceedings of the 37th International Conference on Neural Information Processing Systems (NeurIPS '23). 74764-74786.",
|
| 1541 |
+
"[59] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. 2024. Pride and Prejudice: LLM Amplifies Self-Bias in Self-Refinement. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (ACL '24). 15474-15492.",
|
| 1542 |
+
"[60] Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating Text Generation with BERT. In 8th International Conference on Learning Representations (ICLR '20).",
|
| 1543 |
+
"[61] Wei Zhao, Maxime Peyrard, Fei Liu, Yang Gao, Christian M. Meyer, and Steffen Eger. 2019. MoverScore: Text Generation Evaluating with Contextualized Embeddings and Earth Mover Distance. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP '19). 563-578.",
|
| 1544 |
+
"[62] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MTbench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems (NeurIPS '23).",
|
| 1545 |
+
"[63] Jinhua Zhu, Yingce Xia, Lijun Wu, Di He, Tao Qin, Wengang Zhou, Houqiang Li, and Tieyan Liu. 2020. Incorporating BERT into Neural Machine Translation. In International Conference on Learning Representations (ICLR '20).",
|
| 1546 |
+
"[64] Honglei Zhuang, Zhen Qin, Rolf Jagerman, Kai Hui, Ji Ma, Jing Lu, Jianmo Ni, Xuanhui Wang, and Michael Bendersky. 2023. RankT5: Fine-Tuning T5 for Text Ranking with Ranking Losses. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '23). 2308-2313."
|
| 1547 |
+
],
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
517,
|
| 1550 |
+
108,
|
| 1551 |
+
913,
|
| 1552 |
+
580
|
| 1553 |
+
],
|
| 1554 |
+
"page_idx": 10
|
| 1555 |
+
}
|
| 1556 |
+
]
|
data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19092/19267b68-41a0-4e5f-86e1-ad97629c2a36_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38ed7321ff627f23417722188b5df6a9595e8cf57a775bfdf8b5a7e822407cde
|
| 3 |
+
size 632233
|
data/2025/2503_19xxx/2503.19092/full.md
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Rankers, Judges, and Assistants: Towards Understanding the Interplay of LLMs in Information Retrieval Evaluation
|
| 2 |
+
|
| 3 |
+
Krisztian Balog
|
| 4 |
+
|
| 5 |
+
Google DeepMind
|
| 6 |
+
|
| 7 |
+
Stavanger, Norway
|
| 8 |
+
|
| 9 |
+
krisztianb@google.com
|
| 10 |
+
|
| 11 |
+
Donald Metzler
|
| 12 |
+
|
| 13 |
+
Google DeepMind
|
| 14 |
+
|
| 15 |
+
Mountain View, USA
|
| 16 |
+
|
| 17 |
+
metzler@google.com
|
| 18 |
+
|
| 19 |
+
Zhen Qin
|
| 20 |
+
|
| 21 |
+
Google DeepMind
|
| 22 |
+
|
| 23 |
+
Mountain View, USA
|
| 24 |
+
|
| 25 |
+
zhenqin@google.com
|
| 26 |
+
|
| 27 |
+
# Abstract
|
| 28 |
+
|
| 29 |
+
Large language models (LLMs) are increasingly integral to information retrieval (IR), powering ranking, evaluation, and AI-assisted content creation. This widespread adoption necessitates a critical examination of potential biases arising from the interplay between these LLM-based components. This paper synthesizes existing research and presents novel experiment designs that explore how LLM-based rankers and assistants influence LLM-based judges. We provide the first empirical evidence of LLM judges exhibiting significant bias towards LLM-based rankers. Furthermore, we observe limitations in LLM judges' ability to discern subtle system performance differences. Contrary to some previous findings, our preliminary study does not find evidence of bias against AI-generated content. These results highlight the need for a more holistic view of the LLM-driven information ecosystem. To this end, we offer initial guidelines and a research agenda to ensure the reliable use of LLMs in IR evaluation.
|
| 30 |
+
|
| 31 |
+
# CCS Concepts
|
| 32 |
+
|
| 33 |
+
- Information systems $\rightarrow$ Information retrieval.
|
| 34 |
+
|
| 35 |
+
# Keywords
|
| 36 |
+
|
| 37 |
+
Large language models, ranking, evaluation
|
| 38 |
+
|
| 39 |
+
# ACM Reference Format:
|
| 40 |
+
|
| 41 |
+
Krisztian Balog, Donald Metzler, and Zhen Qin. 2025. Rankers, Judges, and Assistants: Towards Understanding the Interplay of LLMs in Information Retrieval Evaluation. In Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '25), July 13-18, 2025, Padua, Italy. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3726302.3730348
|
| 42 |
+
|
| 43 |
+
# 1 Introduction
|
| 44 |
+
|
| 45 |
+
Due to their remarkable capabilities, large language models (LLMs) are fundamentally reshaping the field of information retrieval (IR), becoming integral to core ranking algorithms and the automation of evaluation processes. Beyond their role in core IR processes, LLMs are also powering AI assistants that are rapidly changing how users generate content, from writing emails and articles to creating code and translating content between languages. As the reliance on LLMs is expected to deepen given their potential, it is increasingly crucial to maintain a balanced perspective by assessing and acknowledging
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
|
| 49 |
+
This work is licensed under a Creative Commons Attribution 4.0 International License.
|
| 50 |
+
|
| 51 |
+
SIGIR '25, Padua, Italy
|
| 52 |
+
|
| 53 |
+
© 2025 Copyright held by the owner/author(s).
|
| 54 |
+
|
| 55 |
+
ACM ISBN 979-8-4007-1592-1/2025/07
|
| 56 |
+
|
| 57 |
+
https://doi.org/10.1145/3726302.3730348
|
| 58 |
+
|
| 59 |
+
the potential risks alongside the undeniable benefits. Could this heavy reliance on LLMs across content creation, retrieval, ranking, evaluation, etc., inadvertently introduce or amplify biases within these systems?
|
| 60 |
+
|
| 61 |
+
Recent research has begun to explore some of these emerging issues. For example, studies have shown that LLMs can exhibit biases in their output, favoring LLM-generated content over human-generated ones [10], and perpetuating biases present in their training data [15, 21, 33]. Furthermore, LLM-based rating systems have been found to be susceptible to manipulation [2], may not accurately reflect human preferences [28], and demonstrate self-inconsistency [50]. Additionally, the phenomenon of "model collapse" has also been observed, where LLMs trained on synthetic data generated by other LLMs can lead to a degradation of quality and diversity in generated content [47].
|
| 62 |
+
|
| 63 |
+
Within the IR research community, the use of LLMs for assessment is a subject of ongoing debate [14], with opinions ranging from complete rejection of LLMs for relevance assessment [48] to the assertion that they can fully replace human judgments [55]. Investigations have thus far focused on the agreement of LLM-generated ratings with human assessments [14, 54, 55] and the potential for LLMs to introduce biases in search results [10]. However, a comprehensive analysis of the implications of LLMs across the entire information ecosystem, from content creation with AI assistance to LLM-based reranking and LLM-based judges for evaluation, remains a critical gap in the current literature.
|
| 64 |
+
|
| 65 |
+
This paper aims to advance our understanding of these issues by synthesizing prior research and, crucially, providing novel empirical evidence. We specifically focus on the novel challenge of understanding the effect LLM-based rankers and AI-powered content creation have on an LLM-based judge's ability to accurately assess relevance. Prior work has separately noted the potential interaction between LLM-based rankers and judges [14, 30, 42, 54] (an interaction that has yet to be empirically investigated), while other initial work has explored the relationship between AI-powered content creation and rankers [10]. However, we argue that the complex interplay between each of these roles must be considered holistically to fully understand the potential implications of widespread adoption of LLM-based judges. We present initial results demonstrating the importance of this interconnected perspective, showcasing how the use of LLMs across the information lifecycle can influence the accuracy and potential biases of LLM judges.
|
| 66 |
+
|
| 67 |
+
We start by considering the case of LLMs being used as both rankers and judges and present the first empirical demonstration of a significant bias of LLM judges towards LLM-based rankers. Novel to our approach is the examination of LLM judge performance via the use of oracle rankers, allowing for a controlled assessment of
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
Figure 1: LLM usage in modern information access systems.
|
| 71 |
+
|
| 72 |
+
LLM judge behavior and discriminative ability. Using the TREC 2019 and 2020 Deep Learning track datasets, we conduct experiments that also compare different-sized LLM judges within the same model family. Our results reveal several key findings: (1) LLM judges are more lenient in their relevance assessments than human judges, confirming previous observations [55]; (2) LLM judges exhibit a significant bias towards LLM-based rankers, a phenomenon previously only hypothesized; and (3) LLM judges demonstrate limited ability to discern subtle, yet statistically significant, performance differences between systems. Additionally, we conduct a preliminary study into whether LLM judges demonstrate biases when they encounter AI-generated content. Contrary to some previously published findings [25, 27, 37], our experiments do not provide evidence of this bias, suggesting that deeper, more rigorous empirical evaluations are required to better understand this phenomenon.
|
| 73 |
+
|
| 74 |
+
What emerges from these targeted studies is a better picture of how different interactions between LLM-based components give rise to different behaviors within LLM-based judges. Taken together, our findings yield one of the most holistic views of this problem space, provide unique insights into best practices for leveraging LLMs as judges, and motivate a rich set of future research questions that will need to be answered to understand the complexities of these interactions even better.
|
| 75 |
+
|
| 76 |
+
In summary, the primary contributions of this paper are: (1) a review of how LLMs are currently used in IR, bringing attention to the interconnected roles they play, and synthesizing the current understanding of their interactions; (2) experiments that highlight how interactions between LLMs might result in inaccurate or biased assessments of retrieval effectiveness; (3) a preliminary set of guidelines for using LLMs in IR evaluation; and (4) a research agenda aimed at sparking further discussion and research along this emerging direction.
|
| 77 |
+
|
| 78 |
+
# 2 Background
|
| 79 |
+
|
| 80 |
+
This section overviews the main uses of LLMs in information access, illustrated in Fig. 1, providing context for subsequent analysis of the interplay between some of these uses.
|
| 81 |
+
|
| 82 |
+
LLMs as Rankers. In modern large-scale IR systems, a multi-stage retrieval-then-erank pipeline has become a prominent approach, wherein an initial retrieval stage, often based on lexical
|
| 83 |
+
|
| 84 |
+
matching or embedding-based methods, is followed by one or multiple reranking stages, utilizing more sophisticated models to refine the results. This reranking stage frequently employs LLMs, either fine-tuned for the task of ranking [34, 35, 39, 64] or via prompting in a pointwise [12, 20, 44], pairwise [40], or listwise [29, 51] fashion. Dai et al. [10] present results suggesting an inherent bias in neural retrieval models toward LLM-generated texts. This source bias may stem from shared Transformer-based architectures and pretraining approaches, and can lead to "semantic shortcuts" during matching. Neural IR models are also shown to be vulnerable to adversarial attacks, such as keyword stuffing and content injection [38, 52].
|
| 85 |
+
|
| 86 |
+
LLMs as Judges. Early LLMs, such as BERT, have been utilized for measuring the distributional similarity of texts [60, 61] and for evaluating specific tasks via fine-tuning, including machine translation [63], text summarization [26], and question answering [31]. The arrival of generative LLMs, such as ChatGPT, have enabled various data labeling and annotation tasks [17]. The use of LLMs as surrogates for humans for evaluation, often referred to as "LLM-as-a-Judge" [62], now extends across virtually all natural language processing tasks, including text summarization and dialog response generation [18]. However, recent research increasingly demonstrates their limitations, such as favoring longer responses (length bias) [13, 58] or content generated by similar models (self bias) [27, 59]. Our interest is specifically in the use of LLMs for relevance assessments in IR. MacAvaney and Soldaini [30] is among the first to employ LLMs for automatic relevance labeling. They specifically focus on a setting where a single known relevant document per query is available for evaluation and explore several one-shot approaches. Faggioli et al. [14] present a spectrum of human-machine collaboration for producing relevance assessments, from AI assistance to fully automated judgments. They conduct a preliminary assessment of LLMs' capabilities of relevance judgments on two TREC collections and report a fair agreement between human assessors and LLMs. Thomas et al. [54] experiment with various prompt templates to improve quality and observe better agreement with the official TREC labels than Faggioli et al. [14]. These improvements are attributed to both prompt design and the use of a more capable LLM. Thomas et al. [54] further share experiences on using LLMs for relevance assessment at Microsoft Bing, where LLMs have reportedly been used, in conjunction with expert human labelers, since late 2022. Upadhyay et al. [56] reproduce results from Thomas et al. [54], verifying their claims, and create an open-source implementation (UMBRELA). Most recently, LLMs are leveraged in the TREC 2024 Retrieval Augmented Generation (RAG) track for automatic relevance assessment [55]. Relative system rankings are found to correlate with those obtained using human judgments, even if human assessors apply stricter relevance criteria than LLMs [55]. The authors also experiment with various LLM-assisted labeling processes, such as using UMBRELA to pre-filter the pools or to suggest relevance labels that human judges can then post-edit, but find that those solutions "do not appear to have obvious tangible benefits over fully automatic processes" [55]. Clarke and Dietz [6] raise concerns about the claims made by Upadhyay et al. [55] and highlight how LLM-based judgments fail to demonstrate strong alignment with manual judgments for top-performing systems. They further present evidence that when evaluation is performed
|
| 87 |
+
|
| 88 |
+
through a publicly known automatic process, such as UMBRELA, it can be subject to manipulation. Chen et al. [5] show that when performing relevance assessments in batches, the relevance levels of earlier documents in a batch influences the relevance judgments of subsequent documents, and that some LLMs are more affected by this so-called threshold priming effect than others. Alaofi et al. [2] compare various open-source and proprietary LLMs in labeling passages for relevance. They demonstrate that most LLMs exhibit some degree of susceptibility to judging non-relevant documents as relevant if query words are inserted at random positions, simulating a keyword stuffing SEO strategy. Rahmani et al. [43] present a large-scale synthetic passage ranking collection, SnyDL, by extending the TREC 2019-2023 Deep Learning collections via LLM-generated labels, and observe a high agreement on system ordering.
|
| 89 |
+
|
| 90 |
+
LLMs as Assistants. There is a wide array of AI tools available to aid people with content creation. Focusing only on textual content, the spectrum ranges from basic grammar and spell checkers to advanced tools that generate full articles. Studies indicate that by late 2024, LLM assistance is detectable in a significant portion of various text domains, with estimates reaching up to $18\%$ of financial consumer complaints and $24\%$ in corporate press releases [22]. The use of powerful LLMs can lead to situations where it is unclear whether the content is primarily human-created with AI assistance or the other way around.
|
| 91 |
+
|
| 92 |
+
LLMs for Data Augmentation. While not considered for this role in the current paper, LLMs are also used for data augmentation. For example, Dai et al. [11] use few-shot prompting to generate synthetic queries, while Bonifacio et al. [3] consider query generation in a full unsupervised setting. Soudani et al. [49] present a survey on synthetic dialogue data generation in open-domain, task-oriented, and information seeking dialogue systems. The use of LLM-generated data brings forth new challenges in bias and unfairness, potentially affecting the reliability of IR systems [9].
|
| 93 |
+
|
| 94 |
+
# 3 Critical Issues with LLMs as Judges
|
| 95 |
+
|
| 96 |
+
While LLMs offer promising capabilities for automated evaluation in IR, a growing body of research highlights potential limitations and raises critical concerns about their widespread adoption as judges. This section synthesizes findings from prior work, identifying key challenges that warrant further investigation. We categorize these challenges into two broad areas: the quality of LLM judgments (Section 3.1) and the vulnerability of LLM judges to bias and manipulation (Section 3.2). Within these areas, we discuss specific issues related to validity, discriminative power, reliability, reproducibility, susceptibility to manipulation, and systemic biases. These issues, if unaddressed, could undermine the integrity of IR evaluation and potentially lead to misleading conclusions about system performance. This section discusses these critical issues, while Section 4 presents initial experiments designed to provide empirically-driven insight into each of the issues and Section 5 touches upon the fundamental issue of whether, and how, LLM judges should be used in practice.
|
| 97 |
+
|
| 98 |
+
# 3.1 Quality of Judgments
|
| 99 |
+
|
| 100 |
+
The fundamental question underlying the use of LLMs as judges is whether their judgments accurately reflect "true" relevance and
|
| 101 |
+
|
| 102 |
+
effectively differentiate between systems of varying quality. We break down this question of quality along two sub-dimensions: validity and discriminative power and reliability and reproducibility.
|
| 103 |
+
|
| 104 |
+
3.1.1 Validity and Discriminative Power. For LLMs to serve as effective judges, their assessments must align with human judgments of relevance. Existing research measures this in two ways: (1) agreement on individual document-query relevance labels and (2) agreement on the relative ranking of a set of systems.
|
| 105 |
+
|
| 106 |
+
- Agreement on Individual Relevance Judgments: Several studies demonstrated that it is possible to use LLMs for relevance assessment and obtain performance comparable to TREC judges [14, 55] and notably better than crowd judges [54]. At the same time, it has also been observed that LLMs are more lenient when labeling a document relevant [2, 55], which leads to inflated evaluation scores. This leniency can lead to inflated evaluation scores, potentially masking subtle differences between systems.
|
| 107 |
+
- Agreement on System Rankings: A common approach to meta-evaluating LLM judges is to compare the relative ranking of retrieval systems based on LLM assessments with the ranking based on human-generated relevance judgments. This typically involves calculating the correlation between the two rankings, often using systems submitted to TREC tracks [14, 30, 55]. While high correlation is often interpreted as evidence of LLM judge validity, this approach has significant limitations.
|
| 108 |
+
|
| 109 |
+
Issue #1: Discriminative Ability and the Limits of Correlation Even though several studies report on strong leaderboard correlation between human and LLM judgments, Clarke and Dietz [6] argue that Kendall's $\tau$ is "less informative for assessing progress at the top of the leaderboard" and demonstrate that LLM-based assessments fail to reliably identify the best-performing systems. Further, Alaofi et al. [2] show that correlation-based meta-evaluation hides interesting failure patterns. A crucial, often overlooked, aspect is the disconnect between typical TREC evaluation setups and the needs of many practical IR scenarios. TREC evaluations often involve dozens of systems with widely varying approaches and performance levels. In contrast, practitioners often need to compare a small number of high-performing (state-of-the-art) systems or distinguish between subtle variations of a single system (e.g., in ablation studies). It remains an open question whether LLM judges possess the necessary sensitivity to reliably detect small but meaningful performance differences in such scenarios. Indeed, achieving high correlation is inherently easier with a larger and more diverse set of systems; simply including more systems with varying performance levels can artificially inflate correlation, even if the LLM judge struggles to differentiate between the top contenders. It thus remains an open question: Can LLM judges reliably distinguish between high-performing systems with small, but meaningful, performance differences?
|
| 110 |
+
|
| 111 |
+
3.1.2 Reliability and Reproducibility. Beyond validity, a critical concern for LLM-based evaluation is the reliability and reproducibility of the judgments. Even if an LLM demonstrates a reasonable level of agreement with human judgments on average, its utility as a judge is undermined if its assessments are highly sensitive to seemingly minor variations in setup or input. Indeed, existing
|
| 112 |
+
|
| 113 |
+
research demonstrates that LLM judgments can be significantly influenced by factors such as the choice of LLM [2, 5, 14], the specific wording and structure of the prompt [2, 54], and even the order in which documents are judged [5]. This variability raises concerns about the reliability of results obtained with a single LLM.
|
| 114 |
+
|
| 115 |
+
Issue #2: The Impact of Model Choice A recurring theme in the literature is that more powerful LLMs (typically larger models with more parameters and trained on larger datasets) tend to exhibit better performance and consistency as judges [2]. This raises a crucial, but largely unexplored, question: To what extent would the conclusions of a study change if a more (or less) powerful LLM were used as the judge? This sensitivity to model choice has not been systematically investigated, particularly in the context of comparing high-performing systems where subtle differences matter.
|
| 116 |
+
|
| 117 |
+
# 3.2 Vulnerability to Bias and Manipulation
|
| 118 |
+
|
| 119 |
+
Beyond the inherent quality of judgments, a separate set of concerns revolves around the potential for LLMs to be biased or manipulated, thereby impacting evaluation outcomes.
|
| 120 |
+
|
| 121 |
+
3.2.1 Vulnerability to Manipulation. A significant concern with the adoption of LLMs as judges is their potential vulnerability to adversarial manipulation. Initial research suggests that LLM judges might be vulnerable to keyword stuffing and other SEO strategies [2]. More broadly, knowledge of the (characteristics of the) LLM judge opens up ways to manipulate benchmarking results. This could lead to situations where a system achieves much higher scores under automatic evaluation with the LLM judge than under manual assessment [6]. This "eval hacking" undermines the purpose of evaluation, which is to accurately assess the true utility of a system for users.
|
| 122 |
+
|
| 123 |
+
Issue #3: Understanding and Mitigating Vulnerabilities of LLM Judges While initial studies demonstrate the possibility of manipulating LLM judges, the extent of this vulnerability across different LLMs, attack strategies, and IR tasks remains largely unknown. What specific vulnerabilities do LLM judges exhibit, and how do these vulnerabilities vary across different models and evaluation settings? Furthermore, How can we design evaluation protocols that are robust to manipulation, ensuring that LLM-based evaluation remains a reliable and trustworthy measure of system performance? This is a crucial area for future research.
|
| 124 |
+
|
| 125 |
+
3.2.2 Systematic Biases. A core challenge in using LLMs for both ranking and evaluation lies in the fundamental similarity of the two tasks: both involve estimating the relevance of a document to a given query. Several studies note the potential for significant systemic biases when LLMs are employed in both roles [14, 30, 42, 54]. In their summary of the LLM4IR workshop, Rahmani et al. [42] state "if we were to use an LLM both as an assessor and as a ranker, we could expect such a model to be favoured over other evaluated models." Faggioli et al. [14] similarly caution that "if the model is used to judge relevance both for annotation and for retrieval, its evaluation would be overinflated, possibly with perfect performance." If both ranking and automatic evaluation are predisposed towards certain types of results, it becomes difficult to identify truly
|
| 126 |
+
|
| 127 |
+
relevant results. This can lead to the suppression of diverse perspectives and the promotion of homogenous content. Novel ranking approaches that deviate from the LLM's inherent understanding of relevance might be unfairly penalized during the assessment phase. This phenomenon shares similarities with "reward hacking" observed in reinforcement learning, where agents exploit loopholes in the reward function to achieve high scores without genuinely solving the underlying task [4]. A particularly concerning form of this bias is circularity, where retrieval models are trained on LLM-generated labels [6, 14, 42]. This creates a self-reinforcing loop, where the ranker learns to produce outputs that the LLM judge deems relevant, further amplifying any existing biases.
|
| 128 |
+
|
| 129 |
+
Issue #4: Interrelated Systemic Biases in LLM-Based Evaluation While the potential for systemic biases in LLM-based IR evaluation is acknowledged, the specific interactions and magnitudes of these biases remain largely unquantified. We identify three interrelated potential biases:
|
| 130 |
+
|
| 131 |
+
- Bias Towards LLM-Based Rankers: LLM judges might favor the output of systems that also employ LLMs for ranking. While intuitively plausible, this bias needs to be systematically investigated and quantified, independent of the content being retrieved.
|
| 132 |
+
- Bias Towards LLM-Generated Text: LLM judges might exhibit an inherent preference for text generated by LLMs, regardless of the ranking system that retrieved it. This could be due to factors like stylistic similarities, reduced noise, or other characteristics of LLM-generated text. Indeed, studies have observed that LLMs exhibit bias favoring texts generated by the same underlying model [27, 37]. However, there is a significant lack of studies systematically quantifying the extent to which LLM judges favor LLM-generated text in the specific context of IR evaluation.
|
| 133 |
+
- Combined Bias (LLM Ranker + LLM-Generated Text): The most complex scenario involves the potential interaction of the two biases above. Dai et al. [10] show that neural retrievers prefer LLM-generated content, but their analysis relies on human judgments, not LLM judges. Does an LLM judge exhibit an even stronger preference for LLM-generated text when it is retrieved by an LLM-based ranker? This synergistic effect, if present, could significantly distort evaluation outcomes.
|
| 134 |
+
|
| 135 |
+
It thus remains a set of open questions: To what extent do LLM judges exhibit biases towards (1) LLM-based rankers, (2) LLM-generated text, and (3) the combination of the two? How do these biases interact, and what is their combined impact on IR evaluation?
|
| 136 |
+
|
| 137 |
+
# 4 Experiments
|
| 138 |
+
|
| 139 |
+
To empirically demonstrate some of the challenges identified in Section 3, we present a series of targeted experiments aimed at investigating the discriminative ability of LLM judgments (Issue #1), the impact of model choice (Issue #2), and systematic biases (Issue #4). Note that, our goal is to provide illustrative evidence of these issues, rather than a comprehensive or exhaustive analysis.
|
| 140 |
+
|
| 141 |
+
# 4.1 Experiment Design
|
| 142 |
+
|
| 143 |
+
We study the classic ad hoc retrieval task where a ranked list of documents are returned in response to a user query. We follow a standard retrieve-then-erank paradigm, where an initial set of
|
| 144 |
+
|
| 145 |
+
potentially relevant documents is identified by a fast and efficient first stage retriever, which are then subsequently reranked by a computationally more intensive but more accurate model. Our focus lies specifically in this reranking stage, noting that LLMs may also be used for retrieval [53].
|
| 146 |
+
|
| 147 |
+
We employ a set of rankers built upon progressively more capable LLMs. This allows us to observe how their performance changes as the underlying LLM technology advances and whether LLM judges indeed exhibit bias toward LLM-based rankers. In a novel methodological approach, we also incorporate "oracle" rankings as reference points of comparison. These oracle rankings leverage ground truth human relevance labels to represent a hypothetical perfect ranking system as well as controlled degradations from this ideal. By intentionally degrading the perfect rankings, we create a spectrum of performance levels against which we can compare our LLM-based rankers as well as test the sensitivity of LLM judges.
|
| 148 |
+
|
| 149 |
+
For the judging side, we explore the sensitivity of evaluation by employing specific variations of LLM judges within a single model family—a relatively unexplored dimension in prior work. By using these specific variations of LLM judges, we aim to assess the consistency and reliability of LLM-based evaluation and to understand how the choice of LLM judge might influence the predicted effectiveness of different rankers. Crucially, we compare the judgments provided by these LLM judges against human assessments, which we consider as the ground truth for relevance.
|
| 150 |
+
|
| 151 |
+
To further explore the implications of LLM integration across the information lifecycle, we also examine the impact of LLM-assisted content creation on retrieval and evaluation. Specifically, we investigate how AI assistance in document authoring may influence relevance scores assigned by LLM-based rankers and judges.
|
| 152 |
+
|
| 153 |
+
# 4.2 Experimental Setup
|
| 154 |
+
|
| 155 |
+
We utilize the TREC Deep Learning (DL) 2019 and 2020 datasets [7, 8], chosen due to their extensive use in prior research in this area. Both use the MS MARCO v1 passage corpus, which contains 8.8 million passages. We adopt the convention of referring to passages as "documents," even if the unit of retrieval are passages in our experiments. The two datasets contain 43 and 54 queries, respectively, with human relevance annotations by TREC assessors.
|
| 156 |
+
|
| 157 |
+
Following [40, 51], all comparisons are based on the reranking of the top 100 passages retrieved by BM25 [23]. To ensure a fair comparison between human and LLM judges, we filter results that have not been judged by TREC assessors (instead of treating them as non-relevant). For simplicity, we report only on NDCG@10, which is the official evaluation metric of the DL track.
|
| 158 |
+
|
| 159 |
+
4.2.1 LLM Judges. For automatic assessment, we use two model generations of a powerful commercial LLM, Gemini, in two sizes within each generation: v1 Nano, v1 Pro, v1.5 Flash, and v1.5 Pro. We use the best applicable prompt<sup>1</sup> in [54] based on the open source implementation UMBRELA [56], with judgments performed on a 4-point scale. We set top-p = 1 and the temperature to 0.
|
| 160 |
+
4.2.2 LLM Rankers. We consider both supervised and unsupervised LLM-based rankers, in addition to a BM25 baseline:
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
Figure 2: Illustration of oracle rankers, ordered by their expected performance, assuming that the top three results are highly relevant and the bottom three are non-relevant.
|
| 164 |
+
|
| 165 |
+
- RankT5 [64] is a reranker that uses T5 [41] and listwise ranking loss during supervised fine-tuning. It is considered a state-of-the-art supervised LLM-based ranker.
|
| 166 |
+
- RG [20] is a pointwise prompting method based on Relevance Generation, where the prompt asks "Does the passage answer the query?" and the logit of "Yes" is used as the ranking score. We test RG with FLAN-T5-XXL and FLAN-UL2. Note that RG requires internal logits of output tokens and thus cannot be used with black-box LLMs such as Gemini.
|
| 167 |
+
- PRP [40] is a pairwise prompting approach that is effective and robust across LLMs with different sizes. Given a query and two passages, the prompt asks "Which of the two passages is more relevant to the query?" The winning rate is used as the ranking score for each passage.
|
| 168 |
+
|
| 169 |
+
4.2.3 Oracle Rankers. We generate oracle rankings using the ground truth TREC relevance assessments. To ensure a fair comparison with LLM rankers, we consider the same initial set of BM25-retrieved documents for reranking. Specifically, we consider the following oracle rankers, which are visualized in Fig. 2:
|
| 170 |
+
|
| 171 |
+
- Perfect: Reranks results according to the ground truth relevance labels. While not perfect overall, this represents the ideal ranking within the initially retrieved set.
|
| 172 |
+
- Swap[i]: Introduces controlled errors by swapping the top- $i$ ranked result with the bottom- $i$ result. Decreasing $i$ (from 3 to 2 to 1) increases the deviation from the perfect ranking.
|
| 173 |
+
- Swap[i,i+1]: Swaps the $i$ th and $(i + 1)$ th highest-ranked results with the $i$ th and $(i + 1)$ th lowest-ranked results. This represents further degradation from the Swap[i] methods.
|
| 174 |
+
|
| 175 |
+
4.2.4 Measuring Alignment. Following prior work (cf. Section 3.1.1) we measure agreement with TREC judges (on all human-judged query-document pairs) in terms of Cohen's $\kappa$ using both graded and binary relevance labels. Following Faggioli et al. [14], we create binary relevance labels by merging levels 0 and 1 (non-relevant) and levels 2 and 3 (relevant). Additionally, we report on relative system ordering in terms of Kendall's $\tau$ .
|
| 176 |
+
|
| 177 |
+
# 4.3 Results
|
| 178 |
+
|
| 179 |
+
Table 1 presents the results of the various reranking methods evaluated using both human and LLM judges. Selected methods are
|
| 180 |
+
|
| 181 |
+
Table 1: Results (NDCG@10) on the TREC DL 2019 and 2020 collections using both human and LLM judges. The best LLM and Oracle reranking approaches per judge are boldfaced.
|
| 182 |
+
|
| 183 |
+
<table><tr><td rowspan="3">Method</td><td rowspan="3">LLM</td><td colspan="5">TREC DL19</td><td colspan="5">TREC DL20</td></tr><tr><td rowspan="2">Human judges</td><td colspan="4">LLM judges</td><td rowspan="2">Human judges</td><td colspan="4">LLM judges</td></tr><tr><td>v1 Nano</td><td>v1 Pro</td><td>v1.5 Flash</td><td>v1.5 Pro</td><td>v1 Nano</td><td>v1 Pro</td><td>v1.5 Flash</td><td>v1.5 Pro</td></tr><tr><td colspan="12">Initial retrieval</td></tr><tr><td>BM25</td><td>-</td><td>0.506</td><td>0.607</td><td>0.772</td><td>0.689</td><td>0.712</td><td>0.483</td><td>0.616</td><td>0.786</td><td>0.689</td><td>0.719</td></tr><tr><td colspan="12">LLM reranking</td></tr><tr><td>RankT5</td><td>T5 (3B)</td><td>0.731</td><td>0.633</td><td>0.907</td><td>0.911</td><td>0.916</td><td>0.696</td><td>0.621</td><td>0.924</td><td>0.888</td><td>0.899</td></tr><tr><td rowspan="2">RG</td><td>FLAN-T5-XXL (11B)</td><td>0.673</td><td>0.606</td><td>0.895</td><td>0.874</td><td>0.881</td><td>0.639</td><td>0.619</td><td>0.920</td><td>0.877</td><td>0.878</td></tr><tr><td>FLAN-UL2 (20B)</td><td>0.689</td><td>0.595</td><td>0.896</td><td>0.884</td><td>0.887</td><td>0.667</td><td>0.611</td><td>0.922</td><td>0.880</td><td>0.885</td></tr><tr><td rowspan="4">PRP</td><td>FLAN-T5-XL (3B)</td><td>0.716</td><td>0.610</td><td>0.924</td><td>0.921</td><td>0.909</td><td>0.691</td><td>0.618</td><td>0.924</td><td>0.898</td><td>0.901</td></tr><tr><td>FLAN-T5-XXL (11B)</td><td>0.712</td><td>0.620</td><td>0.918</td><td>0.922</td><td>0.926</td><td>0.712</td><td>0.615</td><td>0.938</td><td>0.905</td><td>0.912</td></tr><tr><td>FLAN-UL2 (20B)</td><td>0.734</td><td>0.614</td><td>0.923</td><td>0.914</td><td>0.928</td><td>0.718</td><td>0.622</td><td>0.932</td><td>0.909</td><td>0.917</td></tr><tr><td>Gemini v1.5 Flash</td><td>0.747</td><td>0.623</td><td>0.937</td><td>0.961</td><td>0.947</td><td>0.699</td><td>0.619</td><td>0.952</td><td>0.937</td><td>0.933</td></tr><tr><td colspan="12">Oracle reranking</td></tr><tr><td>Perfect</td><td>-</td><td>0.892</td><td>0.582</td><td>0.896</td><td>0.876</td><td>0.864</td><td>0.871</td><td>0.617</td><td>0.872</td><td>0.828</td><td>0.824</td></tr><tr><td>Swap[3]</td><td>-</td><td>0.824</td><td>0.589</td><td>0.868</td><td>0.835</td><td>0.827</td><td>0.795</td><td>0.611</td><td>0.842</td><td>0.795</td><td>0.796</td></tr><tr><td>Swap[2]</td><td>-</td><td>0.814</td><td>0.589</td><td>0.859</td><td>0.825</td><td>0.814</td><td>0.790</td><td>0.611</td><td>0.853</td><td>0.797</td><td>0.797</td></tr><tr><td>Swap[1]</td><td>-</td><td>0.803</td><td>0.578</td><td>0.870</td><td>0.836</td><td>0.836</td><td>0.764</td><td>0.621</td><td>0.832</td><td>0.778</td><td>0.776</td></tr><tr><td>Swap[2,3]</td><td>-</td><td>0.739</td><td>0.596</td><td>0.829</td><td>0.779</td><td>0.771</td><td>0.706</td><td>0.602</td><td>0.821</td><td>0.760</td><td>0.765</td></tr><tr><td>Swap[1,2]</td><td>-</td><td>0.713</td><td>0.585</td><td>0.831</td><td>0.782</td><td>0.783</td><td>0.672</td><td>0.615</td><td>0.810</td><td>0.743</td><td>0.746</td></tr></table>
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Figure 3: Visualization of the performance of selected rankers from Table 1.
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
|
| 190 |
+
Table 2: Agreement between LLM and human judges (1) on individual relevance judgments (Cohen's $\kappa$ ) using both graded and binary labels and (2) on relative ordering of systems (Kendall's $\tau$ ) considering all systems in Table 1 and Oracle rankers only.
|
| 191 |
+
|
| 192 |
+
<table><tr><td rowspan="3">LLM judge</td><td colspan="4">Cohen's κ</td><td colspan="4">Kendall's τ</td></tr><tr><td colspan="2">Graded</td><td colspan="2">Binary</td><td colspan="2">All systems</td><td colspan="2">Oracles-only</td></tr><tr><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td><td>DL19</td><td>DL20</td></tr><tr><td>v1 Nano</td><td>-0.002</td><td>-0.011</td><td>0.007</td><td>-0.003</td><td>-0.253</td><td>0.011</td><td>-0.067</td><td>0.067</td></tr><tr><td>v1 Pro</td><td>0.139</td><td>0.144</td><td>0.337</td><td>0.273</td><td>0.077</td><td>0.121</td><td>0.600</td><td>0.867</td></tr><tr><td>v1.5 Flash</td><td>0.268</td><td>0.230</td><td>0.461</td><td>0.370</td><td>0.033</td><td>0.143</td><td>0.600</td><td>0.867</td></tr><tr><td>v1.5 Pro</td><td>0.204</td><td>0.192</td><td>0.462</td><td>0.359</td><td>0.077</td><td>0.143</td><td>0.600</td><td>0.867</td></tr></table>
|
| 193 |
+
|
| 194 |
+
shown in Fig. 3 for easier visual inspection. Additionally, Table 2 reports on agreement between human and LLM judges.
|
| 195 |
+
|
| 196 |
+
Choice of LLM How well do LLM-based judgments align with human assessments when using different variations of LLM judges from the same model family? Looking at the evaluation scores of various rankers, we observe generally good agreement among the three
|
| 197 |
+
|
| 198 |
+
largest models. In terms of agreement with human judges on individual relevance judgments (see Cohen's $\kappa$ Table 2) the results are comparable to those reported in prior work for these datasets [56], with the newer v1.5 models performing clearly better than the v1 models. Interestingly, within this newer model generation, a larger model is not necessarily more capable, at least not according to this measure; v1.5 Flash shows much better agreement with humans
|
| 199 |
+
|
| 200 |
+
when a graded relevance scale is used than the v1.5 Pro. On the other hand, the smallest LLM (v1 Nano) is unable to provide useful judgments, as evidenced by the Cohen's $\kappa$ values being close to 0. While this model may be capable in other tasks [16], our results clearly indicate its unsuitability for judging relevance in this specific context. Therefore, we exclude the v1 Nano judge from subsequent analyses and discussions referring to "LLM judges."
|
| 201 |
+
|
| 202 |
+
Another way to validate LLM judges is by measuring how well they agree on the relative ordering of systems with human judges; see Kendall's $\tau$ Table 2. In this regard, the newest and largest model (v1.5 Pro) is the most capable overall, but there is in fact little difference in performance among the three largest models. Thus, while newer model generations clearly perform better (v1 vs. v1.5), larger models with the same generation do not necessarily make more capable judges (v1.5 Flash vs. v1.5 Pro). We also note that differentiating between the entire pool of systems ("All systems") proves to be especially challenging; we will elaborate on this next.
|
| 203 |
+
|
| 204 |
+
Discriminative Ability Can LLM judges reliably distinguish between high-performing systems with small, but meaningful, performance differences? The Oracle rankers, with their controlled performance degradations, enable us to assess the discriminative power of LLMs in a setting free from potential biases introduced by LLM-based rankers. While the absolute score differences between some pairs of Oracle rankings may be small, all pairwise differences are statistically significant according to human judgments (paired t-test, $p < 0.05$ ). Therefore, a failure to observe a statistically significant difference, or, more critically, a reversal of the correct ordering, indicates that the LLM judge is not sufficiently sensitive. Table 2 (Oracle-only setting) reveals that accurately ordering the Oracle rankings is challenging for LLM judges, particularly on the DL19 dataset. This suggests limitations in their ability to discern subtle, yet statistically significant, performance differences. This limited discriminative ability is not confined to the Oracle setting; it also manifests when evaluating actual retrieval systems. For instance, the v1.5 Pro model, which performed best among the LLMs on the Oracle rankings, fails to identify statistically significant differences between certain pairs of systems (e.g., RankT5 vs. RG-FLAN-T5-XXL on DL19, $p < 0.001$ according to human evaluation). Conversely, it can also identify differences as statistically significant (e.g., PRP-FLAN-UL2 vs. PRP-Gemini-v1.5-Flash, $p < 0.05$ for both years) when human judgments show no significant difference.
|
| 205 |
+
|
| 206 |
+
Furthermore, the substantial difference in correlation between the "All systems" and "Oracles-only" results in Table 2 provides direct evidence of the concerns raised in Issue #1 (Section 3.1.1), namely, how easily correlation-based metrics can be manipulated by the choice of systems included in the evaluation.
|
| 207 |
+
|
| 208 |
+
Bias Towards LLM-based Rankers Do LLM judges exhibit biases towards LLM-based rankers? The results presented in Fig. 3 demonstrate a clear and substantial bias in favor of LLM-based rankers when evaluated by LLM judges. While prior work has hinted at the potential for such a bias, this study provides direct empirical evidence of its existence and magnitude. Human judgments consistently place the selected Oracle rankers shown in Fig. 3 above all LLM-based rankers. LLM judges, however, completely invert
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
Figure 4: Relevance levels estimated by an LLM judge (Gemini v1.5 Pro) for Original vs. AI-assisted content (Rewrites, using Gemini v1.5 Flash). According to human assessors, the labels should be uniformly distributed across the four relevance classes, as indicated by the dashed horizontal line.
|
| 212 |
+
|
| 213 |
+
this order, ranking all LLM-based rankers as superior to these Oracle runs. This is not a subtle effect; the magnitude of the bias is sufficient to completely reverse the relative ranking of these two fundamentally different types of systems. The fact that the true performance of non-LLM-based systems is severely underestimated, highlighting a critical limitation of relying solely on LLM judges for evaluation, particularly when assessing fundamentally new or unconventional approaches.
|
| 214 |
+
|
| 215 |
+
Bias Towards LLM-generated Text Do LLM judges exhibit biases towards LLM-generated text (independent of the ranking mechanism used to retrieve that text)? We investigate this by comparing LLM judge assessments of original human-written documents and their AI-assisted counterparts. Using the MS MARCO dataset, which predates the widespread adoption of modern AI writing tools, we can reasonably assume that the original documents represent content created without significant AI assistance. To avoid bias potentially introduced by initial retrieval, we take a balanced sample: for each year (DL19 and DL20) we randomly sample 500 query-document pairs for each of the four relevance levels, resulting in a total of 4000 query-document pairs. We refer to this set as Original. We then employ our second most capable LLM (Gemini v1.5 Flash) to create an AI rewritten version of each document in the Original set, following the methodology of Dai et al. [10]. This rewritten set is referred to as Rewritten. We shall assume that this rewriting process does not substantially alter the relevance of the documents to their corresponding queries, as verified by human assessors in [10]. Figure 4 presents the results using our most capable LLM (Gemini v1.5 Pro) as the judge. We can observe on the Original data that the LLM judge is lenient in its assessment of relevance, and specifically in labeling non-relevant documents as partially relevant. However, the judge does not appear to systematically inflate scores for the highest relevance level. Crucially, when comparing these results to the judgments on the Rewritten (LLM-generated) text, we do not observe a distributional shift towards higher relevance levels. In fact, the Rewritten documents show a slight increase in lower relevance labels. While these findings are specific to this particular combination of LLM rewriter and judge, they provide evidence against a general bias towards LLM-generated content, even when both models are from the same family.
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
Figure 5: Distribution of score differences of a RankT5 ranker on LLM rewritten vs. original text on a sample of 4000 query-document pairs.
|
| 219 |
+
|
| 220 |
+
It is important to note that the preceding analysis examines the distributional impact of LLM-generated text on relevance judgments. To further investigate potential biases in a ranking context, we conduct a second experiment. We take the rankings produced by the perfect Oracle method and re-evaluate them using our LLM judge (Gemini v1.5 Pro). However, instead of using the Original document content, we substitute the Rewritten versions. If the LLM judge exhibited a strong preference for LLM-generated text, we would expect to see a significant increase in the scores assigned to these rankings. However, according to our results, that is not the case. We find that the performance of the Perfect Oracle method, as assessed by the LLM judge, does not change significantly when using the Rewritten text instead of the Original text: we get an NDCG@10 of 0.868 vs. 0.883 on DL19 and 0.825 vs. 0.818 on DL20 for Rewritten vs. Original; none of these differences is statistically significant. This further reinforces the conclusion that, at least in this experimental setup, the LLM judge does not exhibit a strong bias towards LLM-generated content.
|
| 221 |
+
|
| 222 |
+
Combined Bias (LLM Ranker + LLM-Generated Text) Do LLM judges exhibit biases towards LLM-generated text when using LLM-based rankers? To address this question, we conduct an experiment combining an LLM-based ranker with LLM-generated text and an LLM judge. We utilize the same balanced sample of 4,000 query-document pairs (500 per relevance level for each of DL19 and DL20) used in the previous experiment, comprising both the Original and Rewritten sets. For ranking, we employ a pointwise approach using RankT5 with a Flan-T5-XXL model. We compare the scenarios where the RankT5 model scores (1) the Original query-document pairs and (2) the Rewritten query-document pairs. Both scorings are then evaluated using the same LLM judge (Gemini v1.5 Pro). We observe minimal differences in the LLM-assigned evaluation scores between the Original and Rewritten scenarios. Closer inspection of the RankT5 scores reveals that the rewriting process had a negligible impact on retrieval scores for the vast majority of query-document pairs. The few observed changes were symmetrically distributed, with increases and decreases in scores mirroring each other; see Fig. 5. This aligns with the previous experiment's findings, suggesting that neither the LLM judge nor the LLM ranker (in this specific configuration) exhibits a strong preference for the LLM-rewritten content. Consequently, the LLM judge produces very similar evaluation results in both cases. While these results do not demonstrate a combined bias in this specific experimental setup, the potential for synergistic effects between LLM rankers,
|
| 223 |
+
|
| 224 |
+
LLM-generated text, and LLM judges remains an open question requiring further, more comprehensive investigation.
|
| 225 |
+
|
| 226 |
+
# 5 The Role and Challenges of LLM Judges in IR
|
| 227 |
+
|
| 228 |
+
The feasibility of LLMs as automatic relevance assessors has been established and they have been rapidly adopted both in academia and in industry. The question, therefore, is not whether they can be used as judges, but rather how they should be used in a principled and effective manner. This requires a careful consideration of both the intended purpose of LLM judges and their inherent limitations.
|
| 229 |
+
|
| 230 |
+
Clarifying the Purpose Most studies, albeit often implicitly, employ LLMs as judges with the aim of replacing human assessors. However, as we discuss below, fundamental limitations preclude this possibility. We argue (in line with [14, 30]) that a more appropriate and productive goal should be to enable more effective use of limited human assessor time and resources. This shift in perspective—from replacement to reducing human effort—is crucial for guiding the development and deployment of LLM judges.
|
| 231 |
+
|
| 232 |
+
Acknowledging Fundamental Limitations It is essential to recognize that both ranking and relevance assessment address the same problem: predicting the relevance of a document to a given query. This inherent overlap introduces fundamental limitations when using LLMs for both tasks. Clarke and Dietz [6] argue that "A true gold standard must originate from human assessments, as only humans can determine the relevance of information in a way that reflects real-world utility." We must also recognize that relevance itself carries an intrinsic uncertainty; it depends on the entire cognitive state of the person, which changes as they use the system [46]. Because of these inherent limitations, "LLM assessments may themselves represent a strong ranking method, rather than a valid evaluation metric" [6]. Recent work, such as [36], has begun to provide uncertainty measures with LLM relevance predictions.
|
| 233 |
+
|
| 234 |
+
What, then, is the Role of LLM Judges? Given the limitations outlined above, and recognizing that information access systems are ultimately built to serve human needs and provide utility to users, the highest-fidelity evaluation of these systems can only be achieved through online evaluation with real users. Offline evaluation, while valuable, remains an abstraction of the real-world task because it removes the user from the evaluation process. There is a genuine risk that findings from offline experiments, particularly those relying solely on LLM judges, may not translate to operational settings. It is crucial to recognize that the signal provided by LLM judges is inherently a noisy and potentially biased one, and therefore cannot be fully trusted as a direct proxy for utility. Nevertheless, this noisy signal can be a useful indicator, helping to identify which methods or system variants are promising enough to warrant the more resource-intensive process of human evaluation.
|
| 235 |
+
|
| 236 |
+
# 5.1 Guidelines for Employing LLMs as Judges
|
| 237 |
+
|
| 238 |
+
We outline several key considerations for employing LLMs in evaluation, aiming to foster a community-wide set of best practices and ensure methodological soundness. These are not exhaustive, but represent an important starting point.
|
| 239 |
+
|
| 240 |
+
- Consistent Evaluation Across Systems. All systems being compared within a single evaluation should be assessed using the same LLM judge configuration (model, prompt, settings). This ensures a fair and unbiased comparison, avoiding situations where some systems are evaluated with a more lenient or biased judge than others. Specifically, LLM judges should not be used selectively to fill "relevance holes" in existing human judgments [1].
|
| 241 |
+
- Transparency and Reproducibility. To enable reproducibility and facilitate comparisons across studies, researchers should clearly report the specific LLM used (model version), the exact prompt(s) employed, and any relevant settings or parameters.
|
| 242 |
+
- Employing Multiple LLMs as Judges. Using a combination of different LLM judges can help mitigate biases stemming from LLMs favoring responses from their own model family and improve robustness (see, e.g., [19]). Reporting the distribution of scores across different judges, as suggested by Rahmani et al. [42], can further enhance robustness.
|
| 243 |
+
- Alignment with Human Preferences. Ensuring alignment between human and LLM raters is a substantial effort that needs to be continuously monitored and refined [54]. Ideally, results reported on LLM judges should also include human validation of the results on a representative sample. Researchers should also exercise care when making research claims based on results from LLM judges.
|
| 244 |
+
|
| 245 |
+
# 5.2 Open Questions and Future Directions
|
| 246 |
+
|
| 247 |
+
The adoption of LLMs as judges in IR presents several open questions and necessitates further research to address the limitations and biases identified in previous sections.
|
| 248 |
+
|
| 249 |
+
- Assessing and Improving LLM Judge Quality. Our findings highlight the critical importance of LLM judge quality, revealing shortcomings in their discriminative ability and biases toward LLM-powered rankers. Developing robust methods for assessing and improving the quality of LLM judges is a crucial research direction for the IR community, potentially drawing motivations from horizontal autorater efforts [57].
|
| 250 |
+
- Robustness Against Adversarial Attacks. LLM judges, similar to LLM-based rankers, are susceptible to adversarial attacks, including keyword stuffing and content injection [2, 52]. Understanding these vulnerabilities and developing effective mechanism to enhance the robustness of LLM judges against them are critical areas for ensuring the practical applicability of LLM judges in real-world scenarios.
|
| 251 |
+
- Human-in-the-Loop LLM judges. The potential for using LLMs to augment human assessors, e.g., for quality-control, has been suggested [14, 48]. While Upadhyay et al. [55] found that human-in-the-loop processes did not bring obvious tangible benefits, their study represents a preliminary investigation. Further research is needed to explore the potential of this approach more comprehensively.
|
| 252 |
+
- From Passages to Longer Documents. Most of the existing work focuses on paragraphs as the unit of retrieval, using either the TREC DL [2, 5, 14, 30] or RAG [55] benchmarks. There is much less work on ad hoc retrieval, with exceptions including TREC-8 [14] and Robust [54]. It is known that LLMs handle long context differently [24] and its implication in judging long documents need further investigation.
|
| 253 |
+
|
| 254 |
+
- Alternative Judging Approaches. All existing studies apply LLMs in a pointwise manner, but a pairwise or listwise setup would also be possible. LLM judge research can borrow ideas from LLM ranking research where pairwise and listwise approaches are extensively explored.
|
| 255 |
+
- Domain-specific Solutions. While most existing research focuses on general-purpose search, specific domains might require purpose-built solutions. Recent work, for example, has explored the use of LLM judges for e-commerce search [32, 45]. Extending this concept of domain specialization, the applicability and value of LLM judges in specialized domains requiring expert knowledge (e.g., medical or legal search) remain less clear. In such domains, LLM judges might offer potential cost savings and more in-depth domain-specific knowledge compared to non-expert human assessors, but they also introduce new challenges, including the need for high accuracy, the potential for serious consequences from errors, and the complexities of expert judgment. Future research should explore the use of LLM judges in these contexts, carefully considering the trade-offs between cost, efficiency, and the risks of inaccurate assessments.
|
| 256 |
+
- Smaller, Purpose-Built Models. The use of LLMs for judging at large scale raises concerns about computational cost and latency. A promising research direction is to explore the development of smaller models, designed specifically for the judging task. These purpose-built models could potentially offer significant advantages in terms of efficiency and speed, while maintaining performance comparable to massive LLMs in terms of accuracy and reliability.
|
| 257 |
+
- Internationalization. Most related research focuses on English-language corpora. The issues discussed in this paper may be amplified in other languages, especially low resource ones, due to limitations in LLMs' multilingual capabilities. Further research is needed to evaluate the performance of LLM judges across a wider range of languages.
|
| 258 |
+
- Training Models on LLM-generated Labels. Training retrieval models on data labeled by LLMs introduces a significant risk of circularity and bias amplification. If done recursively, this might lead to model collapse [47]. Thomas et al. [54] acknowledge that parts of the Bing search engine are retrained using LLM-generated labels. Understanding the long-term effects of such training is an important research direction.
|
| 259 |
+
|
| 260 |
+
# 6 Conclusion
|
| 261 |
+
|
| 262 |
+
This paper has investigated the emerging and critical challenge of understanding the effect LLM-based rankers and AI-powered content creation may have on LLM-based judges' ability to accurately assess relevance. Through a synthesis of existing literature, we identified key concerns regarding the quality, validity, reliability, and potential biases of LLM judgments. Our experiments provided empirical evidence demonstrating how interactions between the various roles LLMs play can lead to inaccurate or biased assessments of retrieval effectiveness, particularly in scenarios involving LLM-based rankers. Finally, we presented guidelines for the use of LLMs as judges in IR and outlined a research agenda to address crucial open questions in this rapidly evolving field.
|
| 263 |
+
|
| 264 |
+
# References
|
| 265 |
+
|
| 266 |
+
[1] Zahra Abbasiantaeb, Chuan Meng, Leif Azzopardi, and Mohammad Aliannejadi. 2024. Can We Use Large Language Models to Fill Relevance Judgment Holes? In Joint Proceedings of the 1st Workshop on Evaluation Methodologies, Testbeds and Community for Information Access Research (EMTCIR 2024) and the 1st Workshop on User Modelling in Conversational Information Retrieval (UM-CIR 2024) co-located with the 2nd International ACM SIGIR Conference on Information Retrieval in the Asia Pacific (SIGIR-AP 2024), Tokyo, Japan, December 12, 2024 (CEUR Workshop Proceedings, Vol. 3854).
|
| 267 |
+
[2] Marwah Alaofi, Paul Thomas, Falk Scholer, and Mark Sanderson. 2024. LLMs can be Fooled into Labelling a Document as Relevant. In Proceedings of the 2024 Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region (SIGIR-AP '24). 32-41.
|
| 268 |
+
[3] Luiz Bonifacio, Hugo Abonizio, Marzieh Fadaee, and Rodrigo Nogueira. 2022. InPars: Unsupervised Dataset Generation for Information Retrieval. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22), 2387-2392.
|
| 269 |
+
[4] Lichang Chen, Chen Zhu, Jiuhai Chen, Davit Soselia, Tianyi Zhou, Tom Goldstein, Heng Huang, Mohammad Shoeybi, and Bryan Catanzaro. 2024. ODIN: Disentangled Reward Mitigates Hacking in RLHF. In *Forty-first International Conference on Machine Learning (ICML '24).
|
| 270 |
+
[5] Nuo Chen, Jiqun Liu, Xiaoyu Dong, Qijiong Liu, Tetsuya Sakai, and Xiao-Ming Wu. 2024. AI Can Be Cognitively Biased: An Exploratory Study on Threshold Priming in LLM-Based Batch Relevance Assessment. In Proceedings of the 2024 Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region (SIGIR-AP '24). 54–63.
|
| 271 |
+
[6] Charles L. A. Clarke and Laura Dietz. 2024. LLM-based relevance assessment still can't replace human relevance assessment. arXiv:2412.17156 [cs.IR]
|
| 272 |
+
[7] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2020. Overview of the TREC 2020 Deep Learning Track. In Proceedings of the Twenty-Ninth Text Retrieval Conference (TREC '20).
|
| 273 |
+
[8] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2019. Overview of the TREC 2019 Deep Learning track. In Proceedings of the Twenty-Eighth Text REtrieval Conference (TREC '19).
|
| 274 |
+
[9] Sunhao Dai, Chen Xu, Shicheng Xu, Liang Pang, Zhenhua Dong, and Jun Xu. 2024. Bias and Unfairness in Information Retrieval Systems: New Challenges in the LLM Era. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24), 6437-6447.
|
| 275 |
+
[10] Sunhao Dai, Yuqi Zhou, Liang Pang, Weihao Liu, Xiaolin Hu, Yong Liu, Xiao Zhang, Gang Wang, and Jun Xu. 2024. Neural Retrievers are Biased Towards LLM-Generated Content. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24). 526-537.
|
| 276 |
+
[11] Zhuyun Dai, Vincent Y Zhao, Ji Ma, Yi Luan, Jianmo Ni, Jing Lu, Anton Bakalov, Kelvin Guu, Keith Hall, and Ming-Wei Chang. 2023. Promptagator: Few-shot Dense Retrieval From 8 Examples. In The Eleventh International Conference on Learning Representations (ICLR '23).
|
| 277 |
+
[12] Andrew Drozdov, Honglei Zhuang, Zhuyun Dai, Zhen Qin, Razieh Rahimi, Xuanhui Wang, Dana Alon, Mohit Iyer, Andrew McCallum, Donald Metzler, and Kai Hui. 2023. PaRaDe: Passage Ranking using Demonstrations with LLMs. In Findings of the Association for Computational Linguistics: EMNLP 2023 (EMNLP '23). 14242-14252.
|
| 278 |
+
[13] Yann Dubois, Percy Liang, and Tatsunori Hashimoto. 2024. Length-Controlled AlpacaEval: A Simple Debiasing of Automatic Evaluators. In First Conference on Language Modeling (COLM '24).
|
| 279 |
+
[14] Guglielmo Faggioli, Laura Dietz, Charles L. A. Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, and Henning Wachsmuth. 2023. Perspectives on Large Language Models for Relevance Judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval (ICTIR '23). 39-50.
|
| 280 |
+
[15] Isabel O. Gallegos, Ryan A. Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K. Ahmed. 2024. Bias and Fairness in Large Language Models: A Survey. Computational Linguistics 50, 3 (Sept. 2024), 1097-1179.
|
| 281 |
+
[16] Gemini Team Google. 2023. Gemini: A family of highly capable multimodal models. arXiv:2312.11805 [cs.CL]
|
| 282 |
+
[17] Fabrizio Gilardi, Meysam Alizadeh, and Mael Kubli. 2023. ChatGPT outperforms crowd workers for text-annotation tasks. Proceedings of the National Academy of Sciences 120, 30 (2023), e2305016120.
|
| 283 |
+
[18] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, Saizhuo Wang, Kun Zhang, Yuanzhuo Wang, Wen Gao, Lionel Ni, and Jian Guo. 2024. A Survey on LLM-as-a-Judge. arXiv:2411.15594 [cs.CL]
|
| 284 |
+
[19] Alon Jacovi, Andrew Wang, Chris Alberti, Connie Tao, Jon Lipovetz, Kate Olszewska, Lukas Haas, Michelle Liu, Nate Keating, Adam Bloniarz, Carl Saroufim, Corey Fry, Dror Marcus, Doron Kukliansky, Gaurav Singh Tomar, James Swirhun, Jinwei Xing, Lily Wang, Madhu Gurumurthy, Michael Aaron, Moran Ambar, Rachana Fellinger, Rui Wang, Zizhao Zhang, Sasha Goldshtein, and Dipanjan
|
| 285 |
+
|
| 286 |
+
Das. 2025. The FACTS Grounding Leaderboard: Benchmarking LLMs' Ability to Ground Responses to Long-Form Input. arXiv:2501.03200 [cs.CL]
|
| 287 |
+
[20] Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. 2023. Holistic Evaluation of Language Models. Transactions on Machine Learning Research (2023).
|
| 288 |
+
[21] Paul Pu Liang, Chiyu Wu, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2021. Towards understanding and mitigating social biases in language models. In International Conference on Machine Learning (ICML '21). 6565-6576.
|
| 289 |
+
[22] Weixin Liang, Yaohui Zhang, Mihai Codreanu, Jiayu Wang, Hancheng Cao, and James Zou. 2025. The Widespread Adoption of Large Language Model-Assisted Writing Across Society. arXiv:2502.09747 [cs.CL]
|
| 290 |
+
[23] Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python Toolkit for Reproducible Information Retrieval Research with Sparse and Dense Representations. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '21). 2356-2362.
|
| 291 |
+
[24] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173.
|
| 292 |
+
[25] Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-Eval: NLG Evaluation using GPT-4 with Better Human Alignment. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP '23). 2511–2522.
|
| 293 |
+
[26] Yang Liu and Mirella Lapata. 2019. Text Summarization with Pretrained Encoders. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP). 3730-3740.
|
| 294 |
+
[27] Yiqi Liu, Nafise Moosavi, and Chenghua Lin. 2024. LLMs as Narcissistic Evaluators: When Ego Inflates Evaluation Scores. In Findings of the Association for Computational Linguistics: ACL 2024. 12688-12701.
|
| 295 |
+
[28] Yinhong Liu, Han Zhou, Zhijiang Guo, Ehsan Shareghi, Ivan Vulic, Anna Korhonen, and Nigel Collier. 2024. Aligning with Human Judgement: The Role of Pairwise Preference in Large Language Model Evaluators. In First Conference on Language Modeling (COLM '24).
|
| 296 |
+
[29] Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-Shot Listwise Document Reranking with a Large Language Model. arXiv:2305.02156 [cs.IR]
|
| 297 |
+
[30] Sean MacAvaney and Luca Soldaini. 2023. One-Shot Labeling for Automatic Relevance Estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '23). 2230-2235.
|
| 298 |
+
[31] J. S. McCarley, Rishav Chakravarti, and Avirup Sil. 2019. Structured Pruning of a BERT-based Question Answering Model. arXiv:1910.06360 [cs.CL]
|
| 299 |
+
[32] Navid Mehrdad, Hrushikesh Mohapatra, Mossaab Bagdouri, Prijith Chandran, Alessandro Magnani, Xunfan Cai, Ajit Puthenputhussery, Sachin Yadav, Tony Lee, ChengXiang Zhai, and Ciya Liao. 2024. Large Language Models for Relevance Judgment in Product Search. arXiv:2406.00247 [cs.IR]
|
| 300 |
+
[33] Roberto Navigli, Simone Conia, and Björn Ross. 2023. Biases in large language models: origins, inventory, and discussion. ACM Journal of Data and Information Quality 15, 2 (2023), 1-21.
|
| 301 |
+
[34] Rodrigo Nogueira and Kyunghyun Cho. 2019. Passage Re-ranking with BERT. arXiv:1901.04085 [cs.IR]
|
| 302 |
+
[35] Rodrigo Nogueira, Zhiying Jiang, and Jimmy Lin. 2020. Document ranking with a pretrained sequence-to-sequence model. In Findings of the Association for Computational Linguistics: EMNLP 2020. 708-718.
|
| 303 |
+
[36] Harrie Oosterhuis, Rolf Jagerman, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2024. Reliable confidence intervals for information retrieval evaluation using generative ai. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '24). 2307-2317.
|
| 304 |
+
[37] Arjun Panickssery, Samuel R. Bowman, and Shi Feng. 2024. LLM Evaluators Recognize and Favor Their Own Generations. In The Thirty-eighth Annual Conference on Neural Information Processing Systems (NeurlPS '24).
|
| 305 |
+
[38] Andrew Parry, Maik Frobe, Sean MacAvaney, Martin Potthast, and Matthias Hagen. 2024. Analyzing Adversarial Attacks on Sequence-to-Sequence Relevance Models. In Proceedings of the 46th European Conference on Information Retrieval (ECIR '24), 286-302.
|
| 306 |
+
[39] Ronak Pradeep, Rodrigo Nogueira, and Jimmy Lin. 2021. The Expando-Mono-Duo Design Pattern for Text Ranking with Pretrained Sequence-to-Sequence Models. arXiv:2101.05667 [cs.IR].
|
| 307 |
+
|
| 308 |
+
[40] Zhen Qin, Rolf Jagerman, Kai Hui, Honglei Zhuang, Junru Wu, Le Yan, Jiaming Shen, Tianqi Liu, Jialu Liu, Donald Metzler, Xuanhui Wang, and Michael Bendersky. 2024. Large Language Models are Effective Text Rankers with Pairwise Ranking Prompting. In Findings of the Association for Computational Linguistics: NAACL 2024. 1504-1518.
|
| 309 |
+
[41] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. Journal of Machine Learning Research 21, 140 (2020), 1-67.
|
| 310 |
+
[42] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR]
|
| 311 |
+
[43] Hossein A. Rahmani, Xi Wang, Emine Yilmaz, Nick Craswell, Bhaskar Mitra, and Paul Thomas. 2025. SynDL: A Large-Scale Synthetic Test Collection for Passage Retrieval. arXiv:2408.16312 [cs.IR]
|
| 312 |
+
[44] Devendra Sachan, Mike Lewis, Mandal Joshi, Armen Aghajanyan, Wen-tau Yih, Joelle Pineau, and Luke Zettlemoyer. 2022. Improving Passage Retrieval with Zero-Shot Question Generation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP '22). 3781-3797.
|
| 313 |
+
[45] Jayant Sachdev, Sean D. Rosario, Abhijeet Phatak, He Wen, Swati Kirti, and Chittaranjan Tripathy. 2025. Automated Query-Product Relevance Labeling using Large Language Models for E-commerce Search. arXiv:2502.15990 [cs.IR]
|
| 314 |
+
[46] Tefko Saracevic. 1996. Relevance reconsidered. In Proceedings of the second conference on conceptions of library and information science (CoLIS 2). 201-218.
|
| 315 |
+
[47] Ilia Shumailov, Zakhhar Shumaylov, Yiren Zhao, Nicolas Papernot, Ross J. Anderson, and Yarin Gal. 2024. AI models collapse when trained on recursively generated data. Nature 631, 8022 (July 2024), 755-759.
|
| 316 |
+
[48] Ian Soboroff. 2025. Don't Use LLMs to Make Relevance Judgments. Information Retrieval Research 1 (Mar. 2025), 29-46.
|
| 317 |
+
[49] Heydar Soudani, Roxana Petcu, Evangelos Kanoulas, and Faegheh Hasibi. 2024. A Survey on Recent Advances in Conversational Data Generation. arXiv:2405.13003 [cs.CL]
|
| 318 |
+
[50] Rickard Stureborg, Dimitris Alikaniotis, and Yoshi Suhara. 2024. Large Language Models are Inconsistent and Biased Evaluators. arXiv:2405.01724 [cs.CL]
|
| 319 |
+
[51] Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing (EMNLP '23). 14918-14937.
|
| 320 |
+
[52] Manveer Singh Tamber and Jimmy Lin. 2025. Illusions of Relevance: Using Content Injection Attacks to Deceive Retrievers, Rerankers, and LLM Judges. arXiv:2501.18536 [cs.IR]
|
| 321 |
+
[53] Yi Tay, Vinh Tran, Mostafa Dehghani, Jianmo Ni, Dara Bahri, Harsh Mehta, Zhen Qin, Kai Hui, Zhe Zhao, Jai Gupta, et al. 2022. Transformer memory as a
|
| 322 |
+
|
| 323 |
+
differentiable search index. In Proceedings of the 36th International Conference on Neural Information Processing Systems (NeurIPS '22), 21831-21843.
|
| 324 |
+
[54] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2024. Large Language Models can Accurately Predict Searcher Preferences. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '24). 1930-1940.
|
| 325 |
+
[55] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR]
|
| 326 |
+
[56] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv:2406.06519 [cs.IR]
|
| 327 |
+
[58] Yizhong Wang, Hamish Ivison, Pradeep Dasigi, Jack Hessel, Tushar Khot, Khyathi Chandu, David Wadden, Kelsey MacMillan, Noah A Smith, Iz Beltagy, and Hannaneh Hajishirzi. 2023. How Far Can Camels Go? Exploring the State of Instruction Tuning on Open Resources. In Proceedings of the 37th International Conference on Neural Information Processing Systems (NeurIPS '23). 74764-74786.
|
| 328 |
+
[59] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. 2024. Pride and Prejudice: LLM Amplifies Self-Bias in Self-Refinement. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (ACL '24). 15474-15492.
|
| 329 |
+
[60] Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating Text Generation with BERT. In 8th International Conference on Learning Representations (ICLR '20).
|
| 330 |
+
[61] Wei Zhao, Maxime Peyrard, Fei Liu, Yang Gao, Christian M. Meyer, and Steffen Eger. 2019. MoverScore: Text Generation Evaluating with Contextualized Embeddings and Earth Mover Distance. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP '19). 563-578.
|
| 331 |
+
[62] Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MTbench and Chatbot Arena. In Proceedings of the 37th International Conference on Neural Information Processing Systems (NeurIPS '23).
|
| 332 |
+
[63] Jinhua Zhu, Yingce Xia, Lijun Wu, Di He, Tao Qin, Wengang Zhou, Houqiang Li, and Tieyan Liu. 2020. Incorporating BERT into Neural Machine Translation. In International Conference on Learning Representations (ICLR '20).
|
| 333 |
+
[64] Honglei Zhuang, Zhen Qin, Rolf Jagerman, Kai Hui, Ji Ma, Jing Lu, Jianmo Ni, Xuanhui Wang, and Michael Bendersky. 2023. RankT5: Fine-Tuning T5 for Text Ranking with Ranking Losses. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '23). 2308-2313.
|
data/2025/2503_19xxx/2503.19092/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:40b19da27afe954def37c1532707537bb425f6ab8f8b4e466fef1f1a7c00a888
|
| 3 |
+
size 240473
|
data/2025/2503_19xxx/2503.19092/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19108/00252ab9-ae73-497c-9623-292ffadd7e20_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f05c5230057222f6ac9da691a2e092743490f57704f5028bb9b341666615d2b4
|
| 3 |
+
size 6114495
|
data/2025/2503_19xxx/2503.19108/full.md
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Your ViT is Secretly an Image Segmentation Model
|
| 2 |
+
|
| 3 |
+
Tommie Kerssies<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
Giuseppe Averta
|
| 6 |
+
|
| 7 |
+
Niccolò Cavagnero $^{2,*}$
|
| 8 |
+
|
| 9 |
+
Bastian Leibe3
|
| 10 |
+
|
| 11 |
+
Alexander Hermans<sup>3</sup>
|
| 12 |
+
|
| 13 |
+
Gijs Dubbelman
|
| 14 |
+
|
| 15 |
+
Narges Norouzi
|
| 16 |
+
|
| 17 |
+
Daan de Geus<sup>1,3</sup>
|
| 18 |
+
|
| 19 |
+
$^{1}$ Eindhoven University of Technology
|
| 20 |
+
|
| 21 |
+
$^{2}$ Polytechnic of Turin
|
| 22 |
+
|
| 23 |
+
$^{3}$ RWTH Aachen University
|
| 24 |
+
|
| 25 |
+
# Abstract
|
| 26 |
+
|
| 27 |
+
Vision Transformers (ViTs) have shown remarkable performance and scalability across various computer vision tasks. To apply single-scale ViTs to image segmentation, existing methods adopt a convolutional adapter to generate multiscale features, a pixel decoder to fuse these features, and a Transformer decoder that uses the fused features to make predictions. In this paper, we show that the inductive biases introduced by these task-specific components can instead be learned by the ViT itself, given sufficiently large models and extensive pre-training. Based on these findings, we introduce the Encoder-only Mask Transformer (EoMT), which repurposes the plain ViT architecture to conduct image segmentation. With large-scale models and pre-training, EoMT obtains a segmentation accuracy similar to state-of-the-art models that use task-specific components. At the same time, EoMT is significantly faster than these methods due to its architectural simplicity, e.g., up to $4 \times$ faster with ViT-L. Across a range of model sizes, EoMT demonstrates an optimal balance between segmentation accuracy and prediction speed, suggesting that compute resources are better spent on scaling the ViT itself rather than adding architectural complexity. Code: https://www.tue-mps.org/eomt/.
|
| 28 |
+
|
| 29 |
+
# 1. Introduction
|
| 30 |
+
|
| 31 |
+
The Vision Transformer (ViT) [23] has proven to be a strong, scalable, and generally applicable architecture for computer vision [1, 21, 51, 67]. Recently, research has shown that ViTs are very suitable for large-scale pretraining [5, 28, 51, 68], resulting in generalizable models that achieve high performance on many downstream tasks. A particularly well-researched task is image segmentation, e.g., semantic, instance, and panoptic segmentation [39]. To achieve state-of-the-art segmentation performance with ViTs, they are typically combined with several computationally intensive and task-specific components such as ViT-Adapter and Mask2Former (M2F) [13, 15]. In these meth
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Figure 1. ViT-Adapter + Mask2Former vs. EoMT (Ours). EoMT demonstrates an optimal balance between Panoptic Quality (PQ) and FPS across different sizes of DINOv2 [51] pre-trained ViTs [23]. Evaluation on COCO val2017 [43], see Tab. 3.
|
| 35 |
+
|
| 36 |
+
ods, an adapter [13, 63] is first applied in parallel to the ViT, using convolutional layers and interacting with the ViT to extract multi-scale features. Second, these multi-scale features are fed to a Mask Transformer module [14, 15, 35, 66], consisting of a pixel decoder and a Transformer decoder. The pixel decoder fuses and enhances information across multiple feature scales. The Transformer decoder then introduces learnable object queries that attend to the multiscale features via cross-attention. These queries are finally used to generate segmentation mask and class predictions.
|
| 37 |
+
|
| 38 |
+
In this paper, we explore whether these additional components, as visualized in Fig. 1 (top left), are truly necessary to obtain state-of-the-art performance. We hypothesize that with increasingly extensive pre-training and for larger ViTs, the positive effect of these additional components decreases, making them nearly irrelevant. There are two key reasons for this: (i) Large-scale pre-training, particularly
|
| 39 |
+
|
| 40 |
+
when combined with objectives like masked image modeling (e.g., DINOv2 [51]), teaches the ViT to extract dense, fine-grained semantic information essential for segmentation [36]. Therefore, we expect that additional components are no longer required to aid the ViT in extracting this information. (ii) Larger ViTs have more learnable parameters. We expect that this increased capacity allows large ViTs to accurately conduct image segmentation without additional components. If these hypotheses hold, the simplicity and efficiency of segmentation models could be significantly enhanced by removing these task-specific components, while only minimally affecting their segmentation accuracy.
|
| 41 |
+
|
| 42 |
+
To confirm these hypotheses, we experimentally assess the effect of gradually removing the aforementioned components, in combination with several types of pre-training and different model sizes. This process ultimately leads us to a conceptually simple model with an architecture that only minimally differs from the plain ViT. We call this model the Encoder-only Mask Transformer (EoMT). Crucially, EoMT repurposes the ViT blocks to not only extract image features, but also enable interaction between learnable object queries and image features, to finally predict a mask and class label for each of these queries. This highly simplified design is visualized in Fig. 1 (top right).
|
| 43 |
+
|
| 44 |
+
A key innovation of EoMT is that it does not require masked attention during inference, unlike existing Mask2Former-like architectures. Models use masked attention to constrain each query to cross-attend only within the intermediate segmentation mask predicted for that query. While this improves segmentation accuracy, it also harms efficiency as it requires additional operations. To overcome this, we present a novel mask annealing strategy, where masked attention is initially fully enabled during training, but then gradually phased out as training progresses, allowing for efficient, masked-attention-free inference.
|
| 45 |
+
|
| 46 |
+
Overall, EoMT offers several advantages: (i) By not requiring additional components and by enabling inference without masked attention, EoMT greatly reduces computational requirements and latency. (ii) Due to its architectural simplicity, EoMT is significantly easier to implement than existing approaches that use additional components. (iii) By relying purely on the Transformer architecture [57] of the ViT, EoMT can fully and directly leverage ongoing and future developments related to Transformers, without being bottlenecked by additional non-optimized modules. This applies not only to general improvements like FlashAttention [18, 19] and specialized hardware [16, 24], but also to vision-specific advances like token merging [7, 48, 50] and vision foundation models [28, 51].
|
| 47 |
+
|
| 48 |
+
Through our experimental analysis, we find that removing task-specific components has only a minimal impact on segmentation accuracy when using a large pre-trained model like DINOv2 [51], confirming our hypotheses. By
|
| 49 |
+
|
| 50 |
+
removing these components, EoMT achieves performance competitive with the state of the art, while being much faster. As shown in Fig. 1, EoMT obtains a considerably better balance between prediction speed and segmentation quality. To illustrate this: ViT-Adapter + M2F with ViT-B achieves a Panoptic Quality (PQ) of 54.4 at 32 frames per second (FPS). In contrast, despite using a larger model, EoMT runs significantly faster with ViT-L at 128 FPS while also achieving a higher PQ of 56.0.
|
| 51 |
+
|
| 52 |
+
In summary, we make the following contributions: (1) We assess the necessity of task-specific components of state-of-the-art image segmentation models and find that they become less relevant when scaling up model size and pre-training. (2) We present the Encoder-only Mask Transformer (EoMT), a simple and efficient model that repurposes the ViT blocks for segmentation and obtains state-of-the-art performance without requiring inefficient task-specific components. (3) We propose a mask annealing training strategy that enables significantly faster inference by removing the need for masked attention.
|
| 53 |
+
|
| 54 |
+
# 2. Related Work
|
| 55 |
+
|
| 56 |
+
Image segmentation. Image segmentation is a fundamental task in computer vision, for which the goal is to divide an image into pixel-level segments based on semantics, by providing a segmentation mask and class label for each segment. For semantic segmentation, the objective is to output a single segment for each class in the image. Instance segmentation, on the other hand, requires a segment for each individual object instance, but disregards uncountable entities like 'road' or 'sky'. Finally, panoptic segmentation [39] combines these tasks and requires $(i)$ segments per individual instance for countable classes called things (e.g., 'person' or 'car'), and $(ii)$ a single segment per class for uncountable classes called stuff (e.g., 'road' or 'sky').
|
| 57 |
+
|
| 58 |
+
Traditionally, segmentation methods had specialized architectures that were tailored for only one of these tasks [11, 33, 38, 55, 69]. Recently, however, the emergence of the Mask Transformer framework [8, 14, 59] has enabled the adoption of a unified architecture and training pipeline for all three segmentation tasks [3, 10, 14, 15, 35, 40, 41, 66]. The versatility of these models is enabled by learnable object queries, which adaptively learn to represent a single segment, whether it is a stuff class or a thing instance. In this work, we investigate state-of-the-art Mask Transformer models and propose a minimalistic, efficient ViT-based model that is competitive with more complex architectures while being significantly more efficient.
|
| 59 |
+
|
| 60 |
+
Vision Transformers. The Transformer architecture [57] was originally developed for Natural Language Processing (NLP), where its ability to model long-range dependencies revolutionized the field. To harness the power of this
|
| 61 |
+
|
| 62 |
+
architecture for computer vision, the Vision Transformer (ViT) [23] was introduced. ViTs divide images into fixed-size patches, project them into an embedding space to form tokens, and then process these tokens using multiple Transformer blocks. By design, there are key differences between Convolutional Neural Networks (CNNs) and ViTs. $(i)$ ViTs process images at a fixed resolution due to the fixed patch size. In contrast, CNNs (e.g., ResNet [32]) typically contain various downscaling steps, allowing them to output feature maps of multiple resolutions [44]. $(ii)$ ViTs process images globally leveraging self-attention, while CNNs process images locally by applying convolutional filters.
|
| 63 |
+
|
| 64 |
+
For tasks like image segmentation, multi-scale features and local processing are claimed to be beneficial for performance [13]. To introduce these properties into Transformers, some works propose alternative architectures [30, 31, 45, 64] that incorporate local attention and token down-sampling. However, as discussed later, these models deviate from the plain ViT architecture, preventing them from leveraging advancements in large-scale pre-training. An alternative approach extends ViTs with a CNN-based adapter to produce multi-scale features [13, 63]. In this work, however, we find that the necessity of these adapters and other task-specific components greatly diminishes when using extensively pre-trained large ViTs for image segmentation. This allows a much simpler and more efficient model while maintaining performance competitive with state-of-the-art approaches. Similar to our work, UViT [12] explores the use of single-scale features from the plain ViT for instance recognition tasks, but it still relies on complex task-specific decoders. Meanwhile, YOLOS [27] adopts an encoder-only ViT for instance recognition but is restricted exclusively to object detection. Moreover, neither method demonstrates that scaling up model size and pre-training enables a simple ViT-based model to be competitive with complex state-of-the-art architectures, which we do with EoMT.
|
| 65 |
+
|
| 66 |
+
Large-scale visual pre-training. For tasks like image segmentation, it is common practice to initialize a model's backbone with pre-trained weights to improve downstream performance over random initialization. Initially, such pre-training relied on supervised image classification on ImageNet [22]. More recently, pre-training has been scaled up to massive datasets using weakly- [52] or self-supervised [9] learning. Currently, vision foundation models (VFMs) that use masked image modeling, like DINOv2 [51] and EVA-02 [28], provide the best pre-training for image segmentation [36]. Notably, all these VFMs use the ViT architecture for its scalability. This means that models with non-ViT backbones, such as Swin [45] and ConvNeXt [46], which are commonly used for image segmentation, cannot leverage VFM pre-training due to their incompatible architectures. In contrast, EoMT can benefit from VFM initialization, as it is fully ViT-based.
|
| 67 |
+
|
| 68 |
+
# 3. Towards Encoder-only Mask Transformer
|
| 69 |
+
|
| 70 |
+
# 3.1. Preliminaries
|
| 71 |
+
|
| 72 |
+
Vision Transformers. Vision Transformers first divide an image $I \in \mathbb{R}^{3 \times H \times W}$ into $N$ non-overlapping patches of shape $(p \times p)$ , where $H$ and $W$ are the image height and width, and $p$ is the pre-determined patch size. Subsequently, these patches are linearly projected into patch tokens $X^0 \in \mathbb{R}^{D \times N}$ and processed by $L$ Transformer blocks [57]. Each Transformer block applies multi-head self-attention (MHSA) and a two-layer multi-layer perceptron (MLP) with a nonlinear activation function. Concretely, for each block $i$ ,
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\boldsymbol {Z} ^ {i} = \boldsymbol {X} ^ {i} + \text {M H S A} (\text {N o r m} (\boldsymbol {X} ^ {i}));
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\boldsymbol {X} ^ {i + 1} = \boldsymbol {Z} ^ {i} + \operatorname {M L P} (\operatorname {N o r m} (\boldsymbol {Z} ^ {i})), \tag {1}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where Norm is Layer Normalization [4]. The result of this process is a set of final patch tokens $\mathbf{X}^L$ . Reordering these tokens yields spatial features $\mathbf{F}^{\mathrm{vit}} \in \mathbb{R}^{D \times \frac{H}{p} \times \frac{W}{p}}$ , where the patch size $p$ determines the resolution.
|
| 83 |
+
|
| 84 |
+
To achieve state-of-the-art image segmentation with ViTs, recent works have proposed several components that further process the resulting patch tokens and interact with the ViT at different levels.
|
| 85 |
+
|
| 86 |
+
Adapters. To introduce convolutional biases and enable multi-scale feature extraction, an adapter is applied in parallel to the ViT to inject and extract features [13, 63]. Concretely, the ViT-Adapter [13], which we study in this work, first applies a CNN to the input image to extract multi-scale features at resolutions $\frac{1}{4}, \frac{1}{8}, \frac{1}{16}$ , and $\frac{1}{32}$ . Subsequently, the ViT-Adapter repeatedly injects these CNN features into the ViT through multi-scale deformable attention [71], applies several ViT blocks, and extracts refined features from the ViT into the multi-scale CNN features. The output of the adapter is a set of multi-scale ViT- and CNN-based features, $\{F_4, F_8, F_{16}, F_{32}\}$ , with $F_i \in \mathbb{R}^{D \times \frac{H}{i} \times \frac{W}{i}}$ .
|
| 87 |
+
|
| 88 |
+
Mask Transformers. To make segmentation predictions with these features, state-of-the-art models follow the Mask Transformer framework [8, 14, 59]. In this work, we study the state-of-the-art method Mask2Former (M2F) [15]. As a first step, to further enhance the features extracted by the ViT-Adapter, M2F applies a pixel decoder. This pixel decoder takes the features $\{F_4,F_8,F_{16},F_{32}\}$ and applies a series of multi-scale deformable attention layers [71], outputting processed features $\{\widehat{F}_4,\widehat{F}_8,\widehat{F}_{16},\widehat{F}_{32}\}$ . In this process, multi-scale features from different backbone layers are processed into a consistent yet scale-specific representation.
|
| 89 |
+
|
| 90 |
+
The final component of M2F, the Transformer decoder, generates and outputs the actual segmentation predictions. As inputs, it takes not only the multi-scale features from the pixel decoder, but also a set of $K$ learned queries $\mathcal{Q}^0 = \{\pmb {q}_i^0\in \mathbb{R}^D\}_{i = 1}^K$ . In the Transformer decoder, each of these queries learns to represent one individual segment per image (i.e., a stuff class or a thing instance). To do so,
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
Figure 2. EoMT architecture. Learnable queries are concatenated to the patch tokens after the first $L_{1}$ ViT encoder blocks. These concatenated tokens are then jointly processed by the last $L_{2}$ blocks and used to predict class and mask logits.
|
| 94 |
+
|
| 95 |
+
these queries are subjected to $J$ blocks with cross-attention to the multi-scale features and multi-head self-attention between queries, yielding processed queries $\mathcal{Q}^J$ . The Transformer decoder then generates the segmentation predictions by predicting a class label and segmentation mask for each query $\pmb{q}_i^J$ . Class logits $\pmb{c}_i^J \in \mathbb{R}^C$ are predicted by applying a linear layer to $\pmb{q}_i^J$ . Mask logits $\pmb{m}_i^J \in \mathbb{R}^{\frac{H}{4} \times \frac{W}{4}}$ are obtained by first applying an MLP to yield mask embedding $\widehat{\pmb{q}}_i^J$ and subsequently taking the dot product between the features $\widehat{\pmb{F}}_4$ and $\widehat{\pmb{q}}_i^J$ . By assigning each ground-truth segment to a unique query and supervising both class and mask predictions, the model learns the segmentation task.
|
| 96 |
+
|
| 97 |
+
A key feature of the M2F Transformer decoder is the use of masked cross-attention. In each of the $J$ blocks, prior to cross-attention between queries and image features, an intermediate segmentation mask and class label are predicted and supervised per query using the above procedure. The predicted masks are then used to mask the attention, allowing a query to only attend to the image region that corresponds to its predicted segmentation mask. This masked attention results in improved segmentation accuracy [15].
|
| 98 |
+
|
| 99 |
+
# 3.2. Removing Task-Specific Components
|
| 100 |
+
|
| 101 |
+
To study the importance of the aforementioned task-specific components, we gradually remove them while assessing the effect on segmentation accuracy. We continue until all task-
|
| 102 |
+
|
| 103 |
+
specific components are removed, ending up with our simple Encoder-only Mask Transformer (EoMT). The different configurations are described in this section and visualized in Fig. A, with the results reported in Sec. 4.2.
|
| 104 |
+
|
| 105 |
+
Removing the adapter. Removing the adapter eliminates the inductive biases and multi-resolution features provided by a CNN. However, we hypothesize that with sufficient pre-training, large ViTs can learn these features without requiring additional components. In the absence of an adapter, we construct a feature pyramid by upscaling the ViT output features $F^{\mathrm{vit}} = F_{16}$ (patch size $16 \times 16$ ) to compute $F_{4}$ and $F_{8}$ , and downscaling them to compute $F_{32}$ . We follow the approach of ViTDet [42] by using transposed convolutions for upscaling and normal convolutions for downscaling. For each scale of the feature pyramid, we independently up- or downscale $F^{\mathrm{vit}}$ with a sequence of operations. We repeat a (transposed) $2 \times 2$ convolution with stride $2 \times 2$ , GELU activation, depthwise $3 \times 3$ convolution, and final Norm, until the required scales are reached. This approach mimics the multi-scale feature extraction of the ViT-Adapter in a much simpler manner.
|
| 106 |
+
|
| 107 |
+
Removing the pixel decoder. Without the adapter, the resulting features no longer originate from different stages of a hierarchical backbone and thus should not need further consolidation. As such, the heavy pixel decoder should be obsolete, and we remove it by directly feeding the simplified feature pyramid to the Transformer decoder. Specifically, instead of $\{\widehat{F}_4,\widehat{F}_8,\widehat{F}_{16},\widehat{F}_{32}\}$ , we input $\{F_4,F_8,F_{16},F_{32}\}$ to the Transformer decoder.
|
| 108 |
+
|
| 109 |
+
Removing multi-scale feature processing. To further simplify, we question the necessity of using features at multiple scales, since all features are derived from a shared single-scale feature map $F^{\mathrm{vit}}$ . Therefore, we do not generate multi-scale features $F_{8}, F_{32}$ . In the Transformer decoder, instead, queries cross-attend exclusively to the ViT output $F^{\mathrm{vit}} = F_{16}$ . We only upscale $F^{\mathrm{vit}}$ to $F_{4}$ to compute the mask logits via dot product with the mask embeddings $\widehat{q}_{i}^{J}$ , to ensure high-resolution output masks.
|
| 110 |
+
|
| 111 |
+
# 3.3. Encoder-only Mask Transformer
|
| 112 |
+
|
| 113 |
+
By removing the previous task-specific components, the model is reduced to a ViT with a single-scale Transformer decoder. The next step is to completely remove the decoder. This requires minor modifications to the plain ViT architecture, such that it can perform image segmentation without a dedicated decoder. We call the resulting method the Encoder-only Mask Transformer (EoMT).
|
| 114 |
+
|
| 115 |
+
Querying the ViT for masks. Differently from standard Mask Transformers for image segmentation [14, 15, 34, 35, 40, 66], which adopt heavy and complex ad-hoc decoders, EoMT only uses the architecture of the plain ViT with a few extra learned queries and a small mask prediction module.
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
Figure 3. Masked self-attention during training. In the final $L_{2}$ blocks of EoMT, patch tokens and queries are jointly processed by self-attention. During training, the intermediate mask predictions are used to mask the query-to-patch portion of the attention operation, mimicking the masked cross-attention of M2F [15].
|
| 119 |
+
|
| 120 |
+
An overview of EoMT is provided in Fig. 2.
|
| 121 |
+
|
| 122 |
+
The first $L_{1}$ Transformer encoder blocks of the ViT are unchanged and only process the input image. After these encoder blocks, we introduce a set of $K$ learnable queries that are concatenated to the patch tokens. These are then jointly processed by the remaining $L_{2}$ ViT encoder blocks, following Eq. (1). The final blocks thus have to process the patch tokens as before, but also replace the Transformer decoder that processes the queries.
|
| 123 |
+
|
| 124 |
+
A standard Mask Transformer introduces $(i)$ interaction between individual queries through self-attention, enabling queries to coordinate the objects they should attend to, and $(ii)$ transfer of information from visual tokens to object queries through query-to-patch cross-attention. Normally, these operations are performed sequentially. In contrast, by using the MHSA operation of the ViT, EoMT performs them jointly in a single layer (see Fig. 3).
|
| 125 |
+
|
| 126 |
+
In addition to the ViT, we introduce a small mask module to predict masks and corresponding classes for each query. Here, we follow the same design as M2F [15], passing the query tokens through a linear layer to predict class logits and using a three-layer MLP followed by a dot product with the upscaled image features $F_{4}$ to obtain mask logits.
|
| 127 |
+
|
| 128 |
+
To enable masked attention between queries and image features during training, as in the M2F Transformer decoder, we additionally apply the previously introduced mask module before each of the last $L_{2}$ ViT blocks, to predict intermediate segmentation masks for each query. In turn, these masks can be used to mask the query-to-patch attention in the plain self-attention block, constraining the attention of each query to the segmentation mask that is predicted for it. This is visualized in Fig. 3.
|
| 129 |
+
|
| 130 |
+
Mask annealing. While using masked attention during training improves performance, predicting intermediate masks and applying them to the self-attention operation for each block during inference is computationally expensive and inefficient. To address this, we propose a mask annealing scheme that gradually phases out masked attention over the course of training. Specifically, in each block
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
Figure 4. Mask annealing during training. Self-attention is initially masked in the final $L_{2}$ (= 4 for ViT-L) EoMT blocks. The masking probability is gradually annealed, starting from early blocks, until it is no longer needed at the end of training.
|
| 134 |
+
|
| 135 |
+
with masked attention, we apply the mask for each query with a probability $P_{\mathrm{mask}}$ , which starts at 1.0 and decays block by block to 0.0 throughout the training, as shown in Fig. 4. This strategy allows the model to initially benefit from masked attention to aid convergence, while gradually learning to operate without it, thus maintaining high performance. By eliminating the need for masked attention during inference, the final model leverages the highly optimized plain ViT architecture without additional intermediate modules or modified attention operations, ensuring optimal efficiency.
|
| 136 |
+
|
| 137 |
+
# 4. Experiments
|
| 138 |
+
|
| 139 |
+
# 4.1. Experimental Setup
|
| 140 |
+
|
| 141 |
+
Datasets. We use widely adopted benchmarks for image segmentation: COCO [43] and ADE20K [70] for panoptic, Cityscapes [17] and ADE20K for semantic, and COCO for instance segmentation.
|
| 142 |
+
|
| 143 |
+
Models and training. Unless specified otherwise, we use DINOV2-L [51] with a $640 \times 640$ input size and a $16 \times 16$ patch size. For EoMT with ViT-L, we use $L_{2} = 4$ , which provides optimal performance (see Appendix B and Tab. D). We train our models in mixed precision with the AdamW [47] optimizer (learning rate $10^{-4}$ ), layer-wise learning rate decay (LLRD) [5] (factor 0.8), polynomial learning rate decay (factor 0.9), and polynomial mask annealing (factor 0.9). The number of epochs is set to 12 ( $1 \times$ schedule [62]) on COCO, 31 [36] on ADE20K, and 107 [36] on Cityscapes.
|
| 144 |
+
|
| 145 |
+
Evaluation. We use standard evaluation metrics: Panoptic Quality (PQ) for panoptic [39], mean Intersection over Union (mIoU) for semantic [26], and Average Precision (AP) for instance segmentation [43]. We evaluate model efficiency in terms of average inference speed in frames per second (FPS) and average number of floating point operations (FLOPs) over all images in the validation set, as well
|
| 146 |
+
|
| 147 |
+
<table><tr><td>Method</td><td>Params</td><td>GFLOPs</td><td>FPS</td><td>PQ</td></tr><tr><td>(0) ViT-Adapter + Mask2Former</td><td>349M</td><td>830</td><td>29</td><td>57.1</td></tr><tr><td>(1) w/o ViT-Adapter</td><td>342M</td><td>700</td><td>36↑7</td><td>56.7↓0.4</td></tr><tr><td>(2) w/o Pixel decoder</td><td>337M</td><td>685</td><td>61↑25</td><td>56.9↑0.2</td></tr><tr><td>(3) w/o Multi-scale</td><td>328M</td><td>673</td><td>64↑3</td><td>56.7↓0.2</td></tr><tr><td>(4) w/o Transformer decoder</td><td>316M</td><td>828</td><td>61↓3</td><td>56.2↓0.5</td></tr><tr><td>(5) w/o Masking = EoMT</td><td>316M</td><td>669</td><td>128↑67</td><td>56.0↓0.2</td></tr></table>
|
| 148 |
+
|
| 149 |
+
as the number of model parameters. We use an NVIDIA H100 GPU, FlashAttention-2 [18], torch.compile [2], and a batch size of 1, unless otherwise specified. The FLOPs are obtained using fvcore [53] and reported in terms of GFLOPs (FLOPs $\times 10^{9}$ ).
|
| 150 |
+
|
| 151 |
+
See Appendix A for additional implementation details.
|
| 152 |
+
|
| 153 |
+
# 4.2. Main Results
|
| 154 |
+
|
| 155 |
+
From ViT-Adapter + Mask2Former to EoMT. In Tab. 1, we evaluate the stepwise removal of task-specific components from ViT-Adapter + Mask2Former (M2F) [13, 15] to obtain our proposed EoMT model. We find that removing all task-specific components reduces the PQ only slightly, from 57.1 to 56.0, but increases the prediction speed by a substantial $4.4 \times$ . Interestingly, this FPS improvement is much larger than the FLOPs improvement. This is because EoMT relies solely on the plain ViT, allowing it to leverage the highly optimized Transformer architecture without being bottlenecked by custom components that contribute little to the segmentation accuracy. Looking at the intermediate steps, we find that removing the ViT-Adapter, pixel decoder, and multi-scale features in steps (1-3) reduce performance by just 0.4 PQ while making the model $2.2 \times$ faster. Step (4) temporarily increases FLOPs and reduces FPS, as the mask module is repeatedly applied to generate intermediate segmentation masks for masked attention, with upscaling being the most compute-intensive part. However, in step (5), we remove masked attention entirely, eliminating this overhead. The proposed mask annealing strategy enables masked-attention-free inference, further accelerating the model by $2.1 \times$ with minimal impact on the PQ. More detailed results on these steps are provided in Appendix B and Tab. B. Overall, the stepwise removal of task-specific components ultimately yields a model that is significantly faster, remarkably simpler, and nearly as accurate.
|
| 156 |
+
|
| 157 |
+
Impact of pre-training. Next, we explore the impact of pre-training. Specifically, we consider large-scale self- and weakly-supervised pre-training with DINOv2 [51] and EVA-02 [28], respectively, as well as supervised ImageNet-21K and ImageNet-1K pre-training with DeiT III [56], all with ViT-L. The results in Tab. 2 show that large-scale pretraining allows EoMT to obtain a similar PQ to ViT-Adapter + M2F. For DINOv2 and EVA-02, the overall PQ gap is
|
| 158 |
+
|
| 159 |
+
Table 1. From ViT-Adapter + Mask2Former to EoMT. Evaluated on COCO val2017 [43].
|
| 160 |
+
|
| 161 |
+
<table><tr><td>Model</td><td>Pre-train</td><td>Params</td><td>GFLOPs</td><td>FPS</td><td>PQ</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>349M</td><td>830</td><td>29</td><td>57.1</td></tr><tr><td>EoMT w/ Masking</td><td>DINOv2</td><td>316M</td><td>828</td><td>61↑32</td><td>56.2↓0.9</td></tr><tr><td>EoMT</td><td></td><td>316M</td><td>669</td><td>128↑99</td><td>56.0↓1.1</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>349M</td><td>829</td><td>25</td><td>56.7</td></tr><tr><td>EoMT w/ Masking</td><td>EVA-02</td><td>316M</td><td>826</td><td>52↑27</td><td>56.0↓0.7</td></tr><tr><td>EoMT</td><td></td><td>316M</td><td>667</td><td>77↑52</td><td>55.5↓1.2</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>349M</td><td>830</td><td>29</td><td>53.9</td></tr><tr><td>EoMT w/ Masking</td><td>IN21K</td><td>316M</td><td>828</td><td>61↑32</td><td>51.0↓2.9</td></tr><tr><td>EoMT</td><td></td><td>316M</td><td>669</td><td>128↑99</td><td>50.0↓3.9</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>349M</td><td>830</td><td>29</td><td>50.4</td></tr><tr><td>EoMT w/ Masking</td><td>IN1K</td><td>316M</td><td>828</td><td>61↑32</td><td>45.9↓4.5</td></tr><tr><td>EoMT</td><td></td><td>316M</td><td>669</td><td>128↑99</td><td>44.3↓6.1</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Table 2. Pre-training. EoMT performs significantly better with advanced pre-training, i.e., DINOv2 [51] or EVA-02 [28]. Evaluated on COCO val2017 [43].
|
| 164 |
+
|
| 165 |
+
<table><tr><td>Model</td><td>Size</td><td>Params</td><td>GFLOPS</td><td>FPS</td><td>PQ</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>1209M</td><td>2510</td><td>20</td><td>57.7</td></tr><tr><td>EoMT w/ Masking</td><td>g</td><td>1164M</td><td>2689</td><td>35↑15</td><td>57.2↓0.5</td></tr><tr><td>EoMT</td><td></td><td>1164M</td><td>2261</td><td>55↑35</td><td>57.0↓0.7</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>349M</td><td>830</td><td>29</td><td>57.1</td></tr><tr><td>EoMT w/ Masking</td><td>L</td><td>316M</td><td>828</td><td>61↑32</td><td>56.2↓0.9</td></tr><tr><td>EoMT</td><td></td><td>316M</td><td>669</td><td>128↑99</td><td>56.0↓1.1</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>121M</td><td>347</td><td>32</td><td>54.4</td></tr><tr><td>EoMT w/ Masking</td><td>B</td><td>93M</td><td>286</td><td>104↑72</td><td>51.5↓2.9</td></tr><tr><td>EoMT</td><td></td><td>93M</td><td>216</td><td>261↑229</td><td>50.6↓3.8</td></tr><tr><td>ViT-Adapter + M2F</td><td></td><td>47M</td><td>165</td><td>33</td><td>50.5</td></tr><tr><td>EoMT w/ Masking</td><td>S</td><td>24M</td><td>89</td><td>108↑75</td><td>46.1↓4.4</td></tr><tr><td>EoMT</td><td></td><td>24M</td><td>68</td><td>330↑297</td><td>44.7↓5.8</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Table 3. Model size. EoMT performs significantly better as the ViT [23] model size increases. Evaluated on COCO val2017 [43].
|
| 168 |
+
|
| 169 |
+
only 1.1 or 1.2, whereas it increases to 3.9 and 6.1 for ImageNet-21K [22] and ImageNet-1K [54], respectively. These results confirm our hypothesis that large-scale pretraining renders complex task-specific components increasingly redundant. We expect that the masked image modeling pre-training objectives of DINOv2 and EVA-02 contribute to this effect, as it enhances the semantic understanding of patches, which is essential for image segmentation [36].
|
| 170 |
+
|
| 171 |
+
Impact of model size. So far, we have only shown results for one model size, i.e., ViT-L. In Tab. 3, we assess the impact of model size on the importance of task-specific components. The results show that the relative performance of EoMT compared to ViT-Adapter + M2F improves as the size of the model increases. While EoMT lags behind the more complex model by 5.8 PQ for ViT-S, the performance gap narrows significantly as the model scales up, becoming only 0.7 PQ for ViT-g. This shows that increasing the capacity of the ViT, combined with strong pre-training, allows EoMT to better solve the image segmentation task. This further confirms our hypothesis that the necessity for task-specific components decreases as the capacity of the model increases. At the same time, we also observe that replacing the complex ViT-Adapter + M2F model with EoMT significantly improves the prediction speed at all model sizes. Interestingly, this allows EoMT to use larger models and
|
| 172 |
+
|
| 173 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Backbone</td><td rowspan="2">Pre-training</td><td rowspan="2">Params</td><td colspan="4">COCO val2017 [43]</td><td colspan="4">ADE20K val [70]</td></tr><tr><td>Input size</td><td>GFLOPs</td><td>FPS</td><td>PQ</td><td>Input size</td><td>GFLOPs</td><td>FPS</td><td>PQ</td></tr><tr><td>Mask2Former†[15]</td><td>Swin-L [45]</td><td>IN21K</td><td>216M</td><td>8002</td><td>868</td><td>24</td><td>57.8</td><td>6402</td><td>-</td><td>33</td><td>48.1</td></tr><tr><td>kMaxX-DeepLab [66]</td><td>ConvNext-L [46]</td><td>IN21K</td><td>232M</td><td>12812</td><td>-</td><td>-</td><td>58.0</td><td>12812</td><td>1302</td><td>-</td><td>50.9</td></tr><tr><td>OneFormer†[35]</td><td>DiNAT-L [30]</td><td>IN21K</td><td>223M</td><td>8002</td><td>736</td><td>20</td><td>58.0</td><td>12802</td><td>1369</td><td>10</td><td>51.5</td></tr><tr><td>OneFormer†[35]</td><td>DiNAT-L [30]</td><td>IN21K</td><td>223M</td><td>-</td><td>-</td><td>-</td><td>-</td><td>12802</td><td>1369</td><td>10</td><td>53.5c</td></tr><tr><td>MaskDINO†[40]</td><td>Swin-L [45]</td><td>IN21K</td><td>223M</td><td>8002</td><td>1326</td><td>14</td><td>58.3</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-L‡[13]</td><td>DINOv2</td><td>349M</td><td>6402</td><td>830</td><td>29</td><td>57.1</td><td>6402</td><td>830</td><td>29</td><td>51.8c</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-L‡[13]</td><td>DINOv2</td><td>354M</td><td>12802</td><td>4817</td><td>10</td><td>59.7</td><td>12802</td><td>4817</td><td>10</td><td>53.0c</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-g‡[13]</td><td>DINOv2</td><td>1209M</td><td>6402</td><td>2510</td><td>20</td><td>57.7</td><td>6402</td><td>2510</td><td>20</td><td>52.6c</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-g‡[13]</td><td>DINOv2</td><td>1216M</td><td>12802</td><td>13790</td><td>6</td><td>59.9</td><td>12802</td><td>13790</td><td>6</td><td>54.2c</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>DINOv2</td><td>316M</td><td>6402</td><td>669</td><td>128</td><td>56.0</td><td>6402</td><td>669</td><td>128</td><td>50.6c</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>DINOv2</td><td>322M</td><td>12802</td><td>4146</td><td>30</td><td>58.3</td><td>12802</td><td>4146</td><td>30</td><td>51.7c</td></tr><tr><td>EoMT (Ours)</td><td>ViT-g [51]</td><td>DINOv2</td><td>1164M</td><td>6402</td><td>2261</td><td>55</td><td>57.0</td><td>6402</td><td>2261</td><td>55</td><td>51.3c</td></tr><tr><td>EoMT (Ours)</td><td>ViT-g [51]</td><td>DINOv2</td><td>1171M</td><td>12802</td><td>12712</td><td>12</td><td>59.2</td><td>12802</td><td>12712</td><td>12</td><td>52.8c</td></tr></table>
|
| 174 |
+
|
| 175 |
+
Table 4. EoMT for panoptic segmentation. ${}^{ \dagger }$ During inference,these models resize the shortest side of images to the indicated scale,while preserving the aspect ratio. ${}^{ \ddagger }$ Our re-implementation. ${}^{c}$ Models for these ADE20K results are pre-trained for COCO panoptic segmentation.
|
| 176 |
+
|
| 177 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Backbone</td><td rowspan="2">Pre-training</td><td rowspan="2">Params</td><td colspan="4">Cityscapes val [17]</td><td colspan="4">ADE20K val [70]</td></tr><tr><td>Input size</td><td>GFLOPs</td><td>FPS</td><td>mIoU</td><td>Input size</td><td>GFLOPs</td><td>FPS</td><td>mIoU</td></tr><tr><td>Mask2Former†[15]</td><td>Swin-L [45]</td><td>IN21K</td><td>216M</td><td>1024 × 2048</td><td>-</td><td>14</td><td>83.3</td><td>6402</td><td>-</td><td>33</td><td>56.1</td></tr><tr><td>MaskDINO†[40]</td><td>Swin-L [45]</td><td>IN21K</td><td>223M</td><td>-</td><td>-</td><td>-</td><td>-</td><td>6402</td><td>-</td><td>-</td><td>56.6</td></tr><tr><td>OneFormer†[35]</td><td>ConvNext-XL [46]</td><td>IN21K</td><td>373M</td><td>1024 × 2048</td><td>775</td><td>7</td><td>83.6</td><td>6402</td><td>607</td><td>21</td><td>57.4</td></tr><tr><td>OneFormer†[35]</td><td>DiNAT-L [30]</td><td>IN21K</td><td>223M</td><td>1024 × 2048</td><td>450</td><td>14</td><td>83.1</td><td>8962</td><td>678</td><td>19</td><td>58.1</td></tr><tr><td>kMaX-DeepLab [66]</td><td>ConvNext-L [46]</td><td>IN21K</td><td>232M</td><td>1025 × 2049</td><td>1673</td><td>-</td><td>83.5</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Mask2Former [15]</td><td>ViT-L [23]</td><td>DINOv2 + DA</td><td>-</td><td>896 × 1792</td><td>-</td><td>-</td><td>84.8</td><td>8962</td><td>-</td><td>-</td><td>59.4</td></tr><tr><td>Mask2Former†[15]</td><td>ViT-Adapter-L‡[13]</td><td>DINOv2</td><td>351M</td><td>10242</td><td>5200</td><td>7</td><td>84.5</td><td>5122</td><td>910</td><td>21</td><td>58.9</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>DINOv2</td><td>319M</td><td>10242</td><td>4350</td><td>25</td><td>84.2</td><td>5122</td><td>721</td><td>92</td><td>58.4</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Table 5. EoMT for semantic segmentation. ${}^{ \dagger }$ On ADE20K,these models resize the shortest side of images to the indicated scale during inference, while preserving the aspect ratio. ${}^{ \ddagger }$ Our re-implementation. ViT-Adapter + Mask2Former and EoMT use windowed inference, dividing each image into multiple crops, and the FLOPs and FPS results account for this. DA is Depth Anything [65].
|
| 180 |
+
|
| 181 |
+
obtain higher scores than ViT-Adapter + M2F, while being significantly faster. For example, EoMT with ViT-L obtains a PQ of 56.0 at 128 FPS, which is both significantly faster and more accurate than ViT-Adapter + M2F with ViT-B, at a PQ of 54.4 at 32 FPS. As shown in Fig. 1, EoMT obtains a better PQ vs. FPS trade-off across all model sizes that we tested. This shows the power of EoMT and its simplicity.
|
| 182 |
+
|
| 183 |
+
EoMT on different benchmarks. To demonstrate EoMT's versatility across image segmentation tasks and datasets, we evaluate its performance on multiple benchmarks. For panoptic segmentation, as shown in Tab. 4, EoMT achieves a significantly better PQ vs. FPS trade-off than ViT-Adapter + M2F [13, 15] on COCO [43], and a similar trade-off on ADE20K [70], while being significantly simpler. Moreover, on COCO, EoMT achieves a PQ that is on par with existing state-of-the-art methods while being up to $2.1 \times$ faster, highlighting the strength of our simplified design.
|
| 184 |
+
|
| 185 |
+
For semantic segmentation on Cityscapes [17] and ADE20K, Tab. 5 shows that EoMT performs comparably to the more complex ViT-Adapter + M2F baseline in terms of mIoU, while being considerably faster, i.e., up to $4.4 \times$ . Compared to other state-of-the-art methods, EoMT again obtains competitive performance, even though it is a much simpler and more efficient model. This shows that EoMT is also highly effective for semantic segmentation, providing
|
| 186 |
+
|
| 187 |
+
a strong balance between speed and accuracy.
|
| 188 |
+
|
| 189 |
+
For instance segmentation on COCO, we see similar positive results in Tab. 6. Although the overall accuracy drop is slightly higher than for the other tasks, EoMT still achieves a better AP vs. FPS trade-off than ViT-Adapter + M2F, e.g., 48.8 AP at 30 FPS vs. 47.6 AP at 29 FPS. Overall, these results demonstrate the strength and general applicability of EoMT, as it performs effectively across a variety of segmentation tasks and datasets.
|
| 190 |
+
|
| 191 |
+
Importance of mask annealing. In Tab. 1, we observed that mask annealing allows EoMT to remove masked attention during inference to improve efficiency, while keeping the PQ roughly the same. In Tab. 7, we compare mask annealing to alternative strategies. The first alternative approach of training with masked attention and simply disabling it during inference causes a severe performance drop, showing that using a different strategy at training and inference time is ineffective. The second alternative approach of disabling masked attention during both training and inference does not fail catastrophically, but it results in a PQ that is still significantly lower than when using masked attention. In contrast, mask annealing enables EoMT to leverage masked attention during early training stages to improve convergence and roughly maintain the PQ, while eliminating the need for masking during inference, thereby more
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Method</td><td>Backbone</td><td>PT</td><td>Params</td><td>Input</td><td>GFLOPs</td><td>FPS</td><td>AP</td></tr><tr><td>OneFormer†[35]</td><td>DiNAT-L [30]</td><td>I</td><td>223M</td><td>8002</td><td>736</td><td>20</td><td>49.2</td></tr><tr><td>Mask2Former†[15]</td><td>Swin-L [45]</td><td>I</td><td>216M</td><td>8002</td><td>868</td><td>24</td><td>50.1</td></tr><tr><td>MaskDINO†[40]</td><td>Swin-L [45]</td><td>I</td><td>223M</td><td>8002</td><td>1326</td><td>14</td><td>52.3</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-L‡[13]</td><td>D</td><td>349M</td><td>6402</td><td>830</td><td>29</td><td>47.6</td></tr><tr><td>Mask2Former‡[15]</td><td>ViT-Adapter-L‡[13]</td><td>D</td><td>354M</td><td>12802</td><td>4817</td><td>10</td><td>51.4</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>D</td><td>316M</td><td>6402</td><td>669</td><td>128</td><td>45.2</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>D</td><td>322M</td><td>12802</td><td>4146</td><td>30</td><td>48.8</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 6. EoMT for instance segmentation. Evaluated on COCO val2017 [43]. ${}^{ \dagger }$ During inference,these models resize the shortest side of images to the indicated scale while preserving the aspect ratio. ${}^{ \ddagger }$ Our re-implementation. PT indicates pre-training (I for ImageNet-21K [22] and D for DINOv2 [51]).
|
| 196 |
+
|
| 197 |
+
<table><tr><td colspan="2">Training</td><td>Inference</td><td>GFLOPs</td><td>FPS</td><td>PQ</td></tr><tr><td>✓</td><td>Masking</td><td>✓ Masking</td><td>828</td><td>61</td><td>56.2</td></tr><tr><td>✓</td><td>Masking</td><td>× w/o Masking</td><td>669</td><td>128</td><td>27.4↓28.8</td></tr><tr><td>×</td><td>w/o Masking</td><td>× w/o Masking</td><td>669</td><td>128</td><td>53.2↓3.0</td></tr><tr><td colspan="2">✓→ × Mask annealing</td><td>× w/o Masking</td><td>669</td><td>128</td><td>56.0↓0.2</td></tr></table>
|
| 198 |
+
|
| 199 |
+
than doubling inference speed. As such, it is a key component of EoMT. More results on the general applicability of mask annealing are provided in Appendix B and Tab. C.
|
| 200 |
+
|
| 201 |
+
# 4.3. Further Experiments
|
| 202 |
+
|
| 203 |
+
Out-of-distribution generalization. Since EoMT is ViT-based, it supports initialization with vision foundation model (VFMs) like DINOv2 [51], which achieve state-of-the-art out-of-distribution (OOD) generalization [25, 37]. In contrast, prior segmentation models typically rely on ConvNeXt [46], Swin [45], or other non-plain ViT backbones, which cannot leverage VFM pre-training due to their incompatible architectures. We evaluate OOD generalization on the BRAVO [58] benchmark, by training on Cityscapes and evaluating on multiple OOD datasets. Tab. 8 shows that DINOv2-based models demonstrate superior OOD generalization, outperforming the Swin-based model by more than $7.8\mathrm{mIoU}$ despite similar in-distribution performance on Cityscapes. Importantly, EoMT leverages the strong OOD performance of VFMs while being significantly more efficient than existing VFM-based methods, which is essential for real-world applications. Further analysis of OOD confidence estimation, showing that EoMT produces significantly more reliable confidence scores than ViT-Adapter + M2F, is provided in Appendix C.
|
| 204 |
+
|
| 205 |
+
Token merging. EoMT benefits from ongoing ViT advancements by using the plain ViT architecture. One such advancement is token merging [7, 48, 50], which improves efficiency by merging semantically redundant tokens while preserving segmentation accuracy. As shown in Tab. 9, EoMT is compatible with ALGM [50], the state-of-the-art token merging method for semantic segmentation. With ALGM, EoMT's throughput increases by up to $31\%$ with
|
| 206 |
+
|
| 207 |
+
Table 7. Mask annealing. Effectively removes masked attention during inference. When never masking, intermediate masks are not predicted or supervised. Evaluated on COCO val2017 [43].
|
| 208 |
+
|
| 209 |
+
<table><tr><td>Method</td><td>Backbone</td><td>Pre-training</td><td>mIoUid</td><td>mIoU0D</td></tr><tr><td>M2F [15]</td><td>Swin-L [45]</td><td>IN21K</td><td>83.3</td><td>69.4</td></tr><tr><td>M2F‡ [15]</td><td>ViT-Adapter-L‡ [13]</td><td>DINOv2</td><td>84.5</td><td>78.0</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>DINOv2</td><td>84.2</td><td>77.2</td></tr></table>
|
| 210 |
+
|
| 211 |
+
Table 8. Out-of-distribution generalization. Despite similar in-distribution performance on Cityscapes val (mIoU<sub>ID</sub>), DINOv2-based models generalize significantly better out-of-distribution (mIoU<sub>OOD</sub>). Trained on Cityscapes train [17], evaluated on BRAVO [58]. Our re-implementation.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Method</td><td>Token merging</td><td>GFLOPs</td><td>Throughput</td><td>mIoU</td></tr><tr><td rowspan="2">ViT-Adapter + M2F</td><td>X</td><td>5200</td><td>9</td><td>84.5</td></tr><tr><td>✓</td><td>3031</td><td>9↑0%</td><td>84.3</td></tr><tr><td rowspan="2">EoMT (Ours)</td><td>X</td><td>4350</td><td>29</td><td>84.2</td></tr><tr><td>✓</td><td>1183</td><td>38↑31%</td><td>84.2</td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 9. Token merging. EoMT's ViT-only design is no longer bottlenecked by additional task-specific components, enabling ViT optimizations like ALGM [50]. Throughput is in images per second, with a batch size of 32. Evaluated on Cityscapes val [17].
|
| 216 |
+
|
| 217 |
+
out affecting mIoU. In contrast, while ViT-Adapter + M2F reduces in FLOPs, it sees no throughput gain, as it is bottlenecked by its additional components and the overhead of 'unmerging' tokens for ViT-Adapter interaction. This highlights the benefit of keeping EoMT close to the plain ViT.
|
| 218 |
+
|
| 219 |
+
# 5. Conclusion
|
| 220 |
+
|
| 221 |
+
In this paper, we show that task-specific components for image segmentation with Vision Transformers (ViTs) become increasingly redundant as model size and pre-training are scaled up. By removing all such components, we introduce the Encoder-only Mask Transformer (EoMT), a segmentation model that purely uses a plain ViT, revealing that your ViT is secretly an image segmentation model. EoMT delivers both high accuracy and impressive speed, with a significantly simpler design than existing models. Our findings indicate that compute resources should not be spent on adding architectural complexity, but rather on scaling the ViT and pre-training, as we found that these factors are critical in improving performance. As a simple and scalable approach, EoMT provides a solid foundation for next-generation segmentation models that readily adapts to advances in the rapidly evolving fields of Transformers and foundation models.
|
| 222 |
+
|
| 223 |
+
Acknowledgements. This work was supported by Chips Joint Undertaking (Chips JU) in EdgeAI "Edge AI Technologies for Optimised Performance Embedded Processing" project, grant agreement no. 101097300. Niccolò Cavagnero acknowledges travel support from the European Union's Horizon 2020 research and innovation program, grant agreement no. 951847. Giuseppe Averta was supported by FAIR - Future Artificial Intelligence Research, Next-GenEU (PNRR - MISS. 4 COMP. 2, INV. 1.3 - D.D. 1555 11/10/2022, PE00000013). We also acknowledge the CINECA award under the ISCRA initiative and the Dutch national e-infrastructure with the support of the SURF Cooperative, grant agreement no. EINF-9663 and EINF-11151, financed by the Dutch Research Council (NWO), for the availability of high-performance computing resources and support.
|
| 224 |
+
|
| 225 |
+
# References
|
| 226 |
+
|
| 227 |
+
[1] Ibrahim Alabdulmohsin, Xiaohua Zhai, Alexander Kolesnikov, and Lucas Beyer. Getting ViT in Shape: Scaling Laws for Compute-Optimal Model Design. In NeurIPS, 2023. 1
|
| 228 |
+
[2] Jason Ansel, Edward Yang, Horace He, Natalia Gimelshein, Animesh Jain, Michael Voznesensky, Bin Bao, Peter Bell, David Berard, Evgeni Burovski, Geeta Chauhan, Anjali Chourdia, Will Constable, Alban Desmaison, Zachary DeVito, Elias Ellison, Will Feng, Jiong Gong, Michael Gschwind, Brian Hirsh, Sherlock Huang, Kshiteej Kalambarkar, Laurent Kirsch, Michael Lazos, Mario Lezcano, Yanbo Liang, Jason Liang, Yinghai Lu, C. K. Luk, Bert Maher, et al. PyTorch 2: Faster Machine Learning Through Dynamic Python Bytecode Transformation and Graph Compilation. In ASPLOS, 2024. 6, 13
|
| 229 |
+
[3] Ali Athar, Alexander Hermans, Jonathon Luiten, Deva Ramanan, and Bastian Leibe. TarViS: A Unified Approach for Target-Based Video Segmentation. In CVPR, 2023. 2
|
| 230 |
+
[4] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. Layer Normalization. arXiv preprint arXiv:1607.06450, 2016.3
|
| 231 |
+
[5] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. BEiT: BERT Pre-Training of Image Transformers. In ICLR, 2022. 1, 5
|
| 232 |
+
[6] Lucas Beyer, Pavel Izmailov, Alexander Kolesnikov, Mathilde Caron, Simon Kornblith, Xiaohua Zhai, Matthias Minderer, Michael Tschannen, Ibrahim Alabdulmohsin, and Filip Pavetic. FlexiViT: One Model for All Patch Sizes. In CVPR, 2023. 12
|
| 233 |
+
[7] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token Merging: Your ViT But Faster. In ICLR, 2023. 2, 8, 13
|
| 234 |
+
[8] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-End Object Detection with Transformers. In ECCV, 2020. 2, 3
|
| 235 |
+
[9] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging Properties in Self-Supervised Vision Transformers. In ICCV, 2021. 3
|
| 236 |
+
[10] Niccolò Cavagnero, Gabriele Rosi, Claudia Cuttano, Francesca Pistilli, Marco Ciccone, Giuseppe Averta, and Fabio Cermelli. PEM: Prototype-based Efficient Mask-Former for Image Segmentation. In CVPR, 2024. 2
|
| 237 |
+
[11] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation. In ECCV, 2018. 2
|
| 238 |
+
[12] Wuyang Chen, Xianzhi Du, Fan Yang, Lucas Beyer, Xiaohua Zhai, Tsung-Yi Lin, Huizhong Chen, Jing Li, Xiaodan Song, Zhangyang Wang, et al. A Simple Single-Scale Vision Transformer for Object Localization and Instance Segmentation. In ECCV, 2022. 3
|
| 239 |
+
[13] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision Transformer Adapter for Dense Predictions. In ICLR, 2023. 1, 3, 6, 7, 8, 14, 15
|
| 240 |
+
|
| 241 |
+
[14] Bowen Cheng, Alex Schwing, and Alexander Kirillov. PerPixel Classification is Not All You Need for Semantic Segmentation. In NeurIPS, 2021. 1, 2, 3, 4
|
| 242 |
+
[15] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention Mask Transformer for Universal Image Segmentation. In CVPR, 2022. 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15
|
| 243 |
+
[16] Jack Choquette. NVIDIA Hopper H100 GPU: Scaling Performance. IEEE Micro, 43(3):9-17, 2023. 2
|
| 244 |
+
[17] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The Cityscapes Dataset for Semantic Urban Scene Understanding. In CVPR, 2016. 5, 7, 8, 12, 14
|
| 245 |
+
[18] Tri Dao. FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning. In ICLR, 2024. 2, 6
|
| 246 |
+
[19] Tri Dao, Daniel Y Fu, Stefano Ermon, Atri Rudra, and Christopher Re. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. In NeurIPS, 2022. 2
|
| 247 |
+
[20] Timothee Darcet, Maxime Oquab, Julien Mairal, and Piotr Bojanowski. Vision Transformers Need Registers. In ICLR, 2024. 13
|
| 248 |
+
[21] Mostafa Dehghani, Josip Djolonga, Basil Mustafa, Piotr Padlewski, Jonathan Heek, Justin Gilmer, Andreas Peter Steiner, Mathilde Caron, Robert Geirhos, Ibrahim Alabdulmohsin, Rodolphe Jenatton, Lucas Beyer, Michael Tschannen, Anurag Arnab, Xiao Wang, Carlos Riquelme Ruiz, Matthias Minderer, Joan Puigcerver, Utku Evci, Manoj Kumar, Sjoerd Van Steenkiste, Gamaleldin Fathy Elsayed, Aravindh Mahendran, Fisher Yu, Avital Oliver, Fantine Huot, Jasmijn Bastings, Mark Collier, Alexey A. Gritsenko, Vighnesh Birodkar, Cristina Nader Vasconcelos, Yi Tay, Thomas Mensink, Alexander Kolesnikov, Filip Pavetic, Dustin Tran, Thomas Kipf, Mario Lucic, Xiaohua Zhai, Daniel Keysers, Jeremiah J. Harmsen, and Neil Houlsby. Scaling Vision Transformers to 22 Billion Parameters. In ICML, 2023. 1
|
| 249 |
+
[22] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In CVPR, 2009. 3, 6, 8, 13
|
| 250 |
+
[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In ICLR, 2021. 1, 3, 6, 7, 8, 13, 14
|
| 251 |
+
[24] Anne C. Elster and Tor A. Haugdahl. Nvidia Hopper GPU and Grace CPU Highlights. Computing in Science & Engineering, 24(2):95-100, 2022. 2
|
| 252 |
+
[25] Bruno B. Englert, Fabrizio J. Piva, Tommie Kerssies, Daan De Geus, and Gijs Dubbelman. Exploring the Benefits of Vision Foundation Models for Unsupervised Domain Adaptation. In CVPR Workshops, 2024. 8
|
| 253 |
+
[26] Mark Everingham, Luc Van Gool, Christopher K. I. Williams, John Winn, and Andrew Zisserman. The Pascal Visual Object Classes (VOC) Challenge. *IJCV*, 2010. 5
|
| 254 |
+
[27] Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, and Wenyu Liu. You Only
|
| 255 |
+
|
| 256 |
+
Look at One Sequence: Rethinking Transformer in Vision through Object Detection. In NeurIPS, 2021. 3
|
| 257 |
+
[28] Yuxin Fang, Quan Sun, Xinggang Wang, Tiejun Huang, Xinlong Wang, and Yue Cao. EVA-02: A Visual Representation for Neon Genesis. Image and Vision Computing, 2024. 1, 2, 3, 6, 13
|
| 258 |
+
[29] Golnaz Ghiasi, Yin Cui, Aravind Srinivas, Rui Qian, Tsung-Yi Lin, Ekin D Cubuk, Quoc V Le, and Barret Zoph. Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation. In CVPR, 2021. 12
|
| 259 |
+
[30] Ali Hassani and Humphrey Shi. Dilated Neighborhood Attention Transformer. arXiv preprint arXiv:2209.15001, 2022. 3, 7, 8, 13
|
| 260 |
+
[31] Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. Neighborhood Attention Transformer. In CVPR, 2023. 3
|
| 261 |
+
[32] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep Residual Learning for Image Recognition. In CVPR, 2016. 3
|
| 262 |
+
[33] Kaiming He, Georgia Gkioxari, Piotr Dollar, and Ross Girshick. Mask R-CNN. In ICCV, 2017. 2
|
| 263 |
+
[34] Jie Hu, Linyan Huang, Tianhe Ren, Shengchuan Zhang, Rongrong Ji, and Liujuan Cao. You Only Segment Once: Towards Real-Time Panoptic Segmentation. In CVPR, 2023. 4
|
| 264 |
+
[35] Jitesh Jain, Jiachen Li, Mang Tik Chiu, Ali Hassani, Nikita Orlov, and Humphrey Shi. OneFormer: One Transformer To Rule Universal Image Segmentation. In CVPR, 2023. 1, 2, 4, 7, 8, 13
|
| 265 |
+
[36] Tommie Kerssies, Daan De Geus, and Gijs Dubbelman. How to Benchmark Vision Foundation Models for Semantic Segmentation? In CVPR Workshops, 2024. 2, 3, 5, 6, 13
|
| 266 |
+
[37] Tommie Kerssies, Daan de Geus, and Gijs Dubbelman. First Place Solution to the ECCV 2024 BRAVO Challenge: Evaluating Robustness of Vision Foundation Models for Semantic Segmentation. arXiv preprint arXiv:2409.17208, 2024. 8
|
| 267 |
+
[38] Alexander Kirillov, Ross Girshick, Kaiming He, and Piotr Dollar. Panoptic Feature Pyramid Networks. In CVPR, 2019. 2
|
| 268 |
+
[39] Alexander Kirillov, Kaiming He, Ross Girshick, Carsten Rother, and Piotr Dólár. Panoptic Segmentation. In CVPR, 2019. 1, 2, 5
|
| 269 |
+
[40] Feng Li, Hao Zhang, Huaizhe Xu, Shilong Liu, Lei Zhang, Lionel M. Ni, and Heung-Yeung Shum. Mask DINO: Towards a Unified Transformer-Based Framework for Object Detection and Segmentation. In CVPR, 2023. 2, 4, 7, 8
|
| 270 |
+
[41] Xiangtai Li, Haobo Yuan, Wei Li, Henghui Ding, Size Wu, Wenwei Zhang, Yining Li, Kai Chen, and Chen Change Loy. Omg-seg: Is one model good enough for all segmentation? In CVPR, 2024. 2
|
| 271 |
+
[42] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring Plain Vision Transformer Backbones for Object Detection. In ECCV, 2022. 4
|
| 272 |
+
[43] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In ECCV, 2014. 1, 5, 6, 7, 8, 13, 14, 15
|
| 273 |
+
|
| 274 |
+
[44] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature Pyramid Networks for Object Detection. In CVPR, 2017. 3
|
| 275 |
+
[45] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin Transformer: Hierarchical Vision Transformer Using Shfted Windows. In ICCV, 2021. 3, 7, 8, 13, 14
|
| 276 |
+
[46] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A ConvNet for the 2020s. In CVPR, 2022. 3, 7, 8
|
| 277 |
+
[47] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay Regularization. In ICLR, 2019. 5
|
| 278 |
+
[48] Chenyang Lu, Daan de Geus, and Gijs Dubbelman. Content-Aware Token Sharing for Efficient Semantic Segmentation With Vision Transformers. In CVPR, 2023. 2, 8
|
| 279 |
+
[49] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation. In 3DV, 2016. 12
|
| 280 |
+
[50] Narges Norouzi, Svetlana Orlova, Daan de Geus, and Gijs Dubbelman. ALGM: Adaptive Local-then-Global Token Merging for Efficient Semantic Segmentation with Plain Vision Transformers. In CVPR, 2024. 2, 8, 13
|
| 281 |
+
[51] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. DINOv2: Learning Robust Visual Features without Supervision. TMLR, 2024. 1, 2, 3, 5, 6, 7, 8, 13, 15
|
| 282 |
+
[52] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning Transferable Visual Models From Natural Language Supervision. In ICML, 2021. 3
|
| 283 |
+
[53] Meta Research. fvcore, 2023. 6
|
| 284 |
+
[54] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. ImageNet Large Scale Visual Recognition Challenge. IJCV, 2015. 6
|
| 285 |
+
[55] Robin Strudel, Ricardo Garcia, Ivan Laptev, and Cordelia Schmid. Segmenter: Transformer for Semantic Segmentation. In ICCV, 2021. 2
|
| 286 |
+
[56] Hugo Touvron, Matthieu Cord, and Hervé Jégou. DeiT III: Revenge of the ViT. In ECCV, 2022. 6, 13
|
| 287 |
+
[57] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is All you Need. In NeurlPS, 2017. 2, 3
|
| 288 |
+
[58] Tuan-Hung Vu, Eduardo Valle, Andrei Bursuc, Tommie Kerssies, Daan de Geus, Gijs Dubbelman, Long Qian, Bingke Zhu, Yingying Chen, Ming Tang, Jinqiao Wang, Tomás Vojíř, Jan Sochman, Jií Matas, Michael Smith, Frank Ferrie, Shamik Basu, Christos Sakaridis, and Luc Van Gool. The BRAVO Semantic Segmentation Challenge Results in UNCV2024. In ECCV Workshops, 2024. 8, 14
|
| 289 |
+
[59] Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, and Liang-Chieh Chen. MaX-DeepLab: End-to-End Panoptic Segmentation with Mask Transformers. In CVPR, 2021. 2, 3
|
| 290 |
+
|
| 291 |
+
[60] Ross Wightman. PyTorch Image Models, 2019. 12, 13
|
| 292 |
+
[61] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-Art Natural Language Processing. In EMNLP Demos, 2020. 12
|
| 293 |
+
[62] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. *Detectron*2, 2019. 5
|
| 294 |
+
[63] Chunlong Xia, Xinliang Wang, Feng Lv, Xin Hao, and Yifeng Shi. ViT-CoMer: Vision Transformer with Convolutional Multi-scale Feature Interaction for Dense Predictions. In CVPR, 2024. 1, 3
|
| 295 |
+
[64] Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, and Ping Luo. SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers. In NeurIPS, 2021. 3
|
| 296 |
+
[65] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data. In CVPR, 2024. 7
|
| 297 |
+
[66] Qihang Yu, Huiyu Wang, Siyuan Qiao, Maxwell Collins, Yukun Zhu, Hartwig Adam, Alan Yuille, and Liang-Chieh Chen. k-means Mask Transformer. In ECCV, 2022. 1, 2, 4, 7
|
| 298 |
+
[67] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling Vision Transformers. In CVPR, 2022. 1
|
| 299 |
+
[68] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid Loss for Language Image Pre-Training. In ICCV, 2023. 1
|
| 300 |
+
[69] Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. Pyramid Scene Parsing Network. In CVPR, 2017. 2
|
| 301 |
+
[70] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene Parsing through ADE20K Dataset. In CVPR, 2017. 5, 7, 12, 13
|
| 302 |
+
[71] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: Deformable Transformers for End-to-End Object Detection. In ICLR, 2021. 3
|
| 303 |
+
|
| 304 |
+
# Appendix
|
| 305 |
+
|
| 306 |
+
# Table of contents:
|
| 307 |
+
|
| 308 |
+
$\S A$ : Implementation Details
|
| 309 |
+
$\S \mathbf{B}$ : Detailed Experimental Analysis
|
| 310 |
+
$\S C$ : Out-of-distribution Confidence Estimation
|
| 311 |
+
$\S \mathrm{D}$ : Qualitative Examples
|
| 312 |
+
|
| 313 |
+
# A. Implementation Details
|
| 314 |
+
|
| 315 |
+
# A.1. Models
|
| 316 |
+
|
| 317 |
+
Visualizations of model configurations. In Sec. 3.2, we explain how we gradually remove task-specific components. We visualize the architectures of the resulting intermediate configurations in Fig. A. Here, the subscript $F_{i}$ indicates that the features have a resolution of $\frac{1}{i}$ of the input image. The visualized model numbers correspond to those reported in Tab. 1.
|
| 318 |
+
|
| 319 |
+
Libraries. For Mask2Former [15], we use the implementation of Huggingface Transformers [61]. For pretrained models, we use timm [60].
|
| 320 |
+
|
| 321 |
+
Pre-trained models. In Tab. A, we specify the timm model weights that we use for the experiments in this work. To support a patch size of $16 \times 16$ and different input sizes, we resize the patch embedding kernel and positional embeddings of pre-trained models following the FlexiViT [6] implementation of timm. Specifically, the patch embedding kernel is resized to a $16 \times 16$ patch size by approximately inverting the effect of patch resizing. The positional embeddings are resized to the required token grid size by using bicubic interpolation. The patch embedding kernel and positional embeddings are resized prior to fine-tuning, and keep the same size during fine-tuning.
|
| 322 |
+
|
| 323 |
+
Queries. In accordance with Mask2Former [15], the models for panoptic and instance segmentation use $K = 200$ queries, while the models for semantic segmentation use $K = 100$ queries. For ViT-S and ViT-B we use $L_{2} = 3$ , for ViT-L we use $L_{2} = 4$ , and for ViT-g we use $L_{2} = 5$ . For EoMT, adding 200 tokens to a model that processes $640 \times 640$ images with a $16 \times 16$ patch size results in an increase of $12.5\%$ of the tokens processed by a ViT block, but only for the last $L_{2}$ ViT blocks. As $L_{1} = 20$ and $L_{2} = 4$ for ViT-L, the total number of tokens processed in the entire ViT increases by only $2.1\%$ .
|
| 324 |
+
|
| 325 |
+
# A.2. Training
|
| 326 |
+
|
| 327 |
+
Augmentation. During training, we apply the same data augmentation techniques as used by Mask2Former [15]. Specifically, training images undergo random horizontal flipping, random scale jittering, padding if necessary, and random cropping. Random color jittering is additionally applied for ADE20K [70] and Cityscapes [17]. For panoptic and instance segmentation, we use large-scale jitter [29]
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
(1) w/o ViT-Adapter
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
(2) w/o Pixel Decoder
|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
(3) w/o Multi-scale
|
| 339 |
+
Figure A. Removing task-specific components. We visualize the architectures of the resulting intermediate configurations.
|
| 340 |
+
|
| 341 |
+
(between $0.1\times$ and $2.0\times$ ),and for semantic segmentation we use normal-scale jitter (between $0.5\times$ and $2.0\times$ ).
|
| 342 |
+
|
| 343 |
+
Loss function. To supervise our models, we adopt the same loss function as Mask2Former [15]. Specifically, across all tasks and datasets, we use the cross-entropy (CE) loss for the class logits, and the binary-cross entropy (BCE) and the Dice loss [49] for the mask logits. The individual losses are weighted using scalars, resulting in the total loss function:
|
| 344 |
+
|
| 345 |
+
$$
|
| 346 |
+
\mathcal {L} _ {\text {t o t}} = \lambda_ {\mathrm {b c e}} \mathcal {L} _ {\mathrm {b c e}} + \lambda_ {\mathrm {d i c e}} \mathcal {L} _ {\mathrm {d i c e}} + \lambda_ {\mathrm {c e}} \mathcal {L} _ {\mathrm {c e}}, \tag {2}
|
| 347 |
+
$$
|
| 348 |
+
|
| 349 |
+
<table><tr><td>Model</td><td>Pre-training</td><td>timm model</td></tr><tr><td>ViT-g</td><td>DINOv2 [20, 51]</td><td>vit_giant_batch14_reg4_dinov2</td></tr><tr><td>ViT-L</td><td>DINOv2 [20, 51]</td><td>vit_large_batch14_reg4_dinov2</td></tr><tr><td>ViT-B</td><td>DINOv2 [20, 51]</td><td>vit_base_batch14_reg4_dinov2</td></tr><tr><td>ViT-S</td><td>DINOv2 [20, 51]</td><td>vit_small_batch14_reg4_dinov2</td></tr><tr><td>ViT-L</td><td>EVA-02 [28]</td><td>eva02_large_batch14_224.mim_m38m</td></tr><tr><td>ViT-L</td><td>DeiT-III (ImageNet-21K) [22, 56]</td><td>deit3_large_batch16_384.fb_in22k_ft_in1k</td></tr><tr><td>ViT-L</td><td>DeiT-III (ImageNet-1K) [22, 56]</td><td>deit3_large_batch16_384.fb_in1k</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Table A. Model specification. For each ViT backbone [23] used in this work, we specify the timm model [60] that we use.
|
| 352 |
+
|
| 353 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Params</td><td rowspan="2">GFLOPs</td><td rowspan="2">FPS</td><td colspan="3">Panoptic Quality (PQ)</td><td colspan="4">Average Precision (AP)</td></tr><tr><td>All</td><td>Things</td><td>Stuff</td><td>All</td><td>Large</td><td>Medium</td><td>Small</td></tr><tr><td>(0) ViT-Adapter + Mask2Former</td><td>349M</td><td>830</td><td>29</td><td>57.1</td><td>62.7</td><td>48.7</td><td>47.6</td><td>73.2</td><td>53.4</td><td>23.4</td></tr><tr><td>(1) w/o ViT-Adapter</td><td>342M</td><td>700</td><td>36</td><td>56.7</td><td>62.3</td><td>48.3</td><td>46.9</td><td>72.7</td><td>52.9</td><td>22.7</td></tr><tr><td>(2) w/o Pixel decoder</td><td>337M</td><td>685</td><td>62</td><td>56.9</td><td>62.3</td><td>48.6</td><td>46.8</td><td>73.1</td><td>52.6</td><td>22.1</td></tr><tr><td>(3) w/o Multi-scale</td><td>328M</td><td>673</td><td>64</td><td>56.7</td><td>62.2</td><td>48.4</td><td>46.2</td><td>73.1</td><td>52.3</td><td>21.4</td></tr><tr><td>(4) w/o Transformer decoder</td><td>316M</td><td>828</td><td>61</td><td>56.2</td><td>61.4</td><td>48.4</td><td>45.6</td><td>72.1</td><td>51.4</td><td>20.8</td></tr><tr><td>(5) w/o Masking = EoMT</td><td>316M</td><td>669</td><td>128</td><td>56.0</td><td>61.2</td><td>48.2</td><td>45.2</td><td>72.2</td><td>51.0</td><td>20.3</td></tr></table>
|
| 354 |
+
|
| 355 |
+
Table B. From ViT-Adapter + Mask2Former to EoMT in detail. Evaluated on COCO val2017.
|
| 356 |
+
|
| 357 |
+
where $\lambda_{\mathrm{bce}}$ , $\lambda_{\mathrm{dice}}$ , and $\lambda_{\mathrm{ce}}$ are set to 5.0, 5.0, and 2.0, respectively, following Mask2Former [15].
|
| 358 |
+
|
| 359 |
+
Learning rate warm-up. We use a two-stage linear learning rate warm-up for all models. In practice, we first warm-up the randomly initialized parameters for 500 iterations, while keeping the pre-trained parameters frozen. After 500 iterations, we warm-up the pre-trained parameters for 1000 iterations. In both cases, the initial learning rate is set to 0.
|
| 360 |
+
|
| 361 |
+
# A.3. Evaluation
|
| 362 |
+
|
| 363 |
+
Image processing. For panoptic and instance segmentation, we use padded inference, resizing the longer side of the image to the input size, and padding the shorter side with zeros to create a square image. For semantic segmentation, we apply windowed inference, resizing the shorter side of the image to the input size, and processing the image through the model in several proportionally spaced square crops, in a sliding-window manner [36].
|
| 364 |
+
|
| 365 |
+
Efficiency measurements. For existing works, we report FLOPs from the respective papers but measure FPS the same way that we measure it for our models, on the same hardware. For ViT-Adapter + M2F and our models, we calculate the FLOPs ourselves. When measuring FPS, torch.compile [2] is disabled for Mask2Former [15] with Swin-L [45] on ADE20K [70] due to compilation errors. On COCO [43], torch.compile only yields a small speedup for this model ( $< 10\%$ ). Additionally, mixed precision is not supported for OneFormer [35] with DiNAT-L [30], thus we use full precision here.
|
| 366 |
+
|
| 367 |
+
Token merging. For our token merging experiment in Sec. 4.3 and Tab. 9, we evaluate the throughput of the model in images per second, following existing work for token merging [7, 50]. This means that we use a batch size of 32, apply ALGM [50] for token merging, and report the
|
| 368 |
+
|
| 369 |
+
number of images that are processed per second, averaged over the entire validation set. ALGM adaptively determines the number of tokens that should be merged per image, based on image complexity. To allow batch processing, we identify the lowest number of mergeable tokens per image across the batch according to the ALGM token merging criterion, and use that number of merged tokens for all images in the batch.
|
| 370 |
+
|
| 371 |
+
Importantly, ALGM is applied only during inference. Thus, the throughput improvement in Tab. 9 is achieved simply by applying ALGM to EoMT and processing batches of images, with no additional training required.
|
| 372 |
+
|
| 373 |
+
# B. Detailed Experimental Analysis
|
| 374 |
+
|
| 375 |
+
From ViT-Adapter + M2F to EoMT in detail. In Tab. B, we provide more detailed results on the impact of the removal of task-specific components on both panoptic and instance segmentation on COCO [43]. For panoptic segmentation, we not only report the overall Panoptic Quality (PQ), but also separately the PQ for countable thing classes $(\mathrm{PQ}^{\mathrm{th}})$ and uncountable stuff classes $(\mathrm{PQ}^{\mathrm{st}})$ . Similarly, for instance segmentation, we separately report AP for large $(\mathrm{AP}^{\mathrm{L}})$ , medium $(\mathrm{AP}^{\mathrm{M}})$ , and small objects $(\mathrm{AP}^{\mathrm{S}})$ .
|
| 376 |
+
|
| 377 |
+
General applicability of mask annealing. In Tab. C, we assess the effect of our mask annealing strategy for both EoMT and the ViT-Adapter + M2F baseline. The results demonstrate the general applicability of mask annealing, as it is also effective for ViT-Adapter + M2F.
|
| 378 |
+
|
| 379 |
+
Number of blocks that process queries. In Tab. D, we examine the impact of varying $L_{2}$ , i.e., the number of ViT blocks in EoMT that process queries as well as patch tokens. EoMT demonstrates stable performance across different configurations, with the highest PQ for ViT-L observed around $L_{2} = 4$ , while the prediction speed in FPS is not
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
(1) Input image
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
(2) ViT-Adapter + M2F [13, 15] confidence
|
| 386 |
+
(3) EoMT (Ours) confidence
|
| 387 |
+
Figure B. Qualitative comparison of out-of-distribution (OOD) confidence estimation. EoMT reliably assigns low confidence to the full OOD object, while ViT-Adapter + M2F only does so partially. Darker colors indicate lower confidence. Trained on Cityscapes train [17], evaluated on BRAVO [58].
|
| 388 |
+
|
| 389 |
+
<table><tr><td rowspan="2" colspan="2">Training</td><td rowspan="2">Inference</td><td colspan="2">Panoptic Quality (PQ)</td></tr><tr><td>EoMT</td><td>ViT-Ad. + M2F</td></tr><tr><td>✓</td><td>Masking</td><td>✓ Masking</td><td>56.2</td><td>57.1</td></tr><tr><td>×</td><td>w/o Masking</td><td>× w/o Masking</td><td>53.2↓3.0</td><td>54.0↓3.1</td></tr><tr><td>✓→</td><td>× Mask annealing</td><td>× w/o Masking</td><td>56.0↓0.2</td><td>56.8↓0.3</td></tr></table>
|
| 390 |
+
|
| 391 |
+
Table C. Mask annealing. Effective for both EoMT and ViT-Adapter + M2F [13, 15]. When never masking, intermediate masks are not predicted or supervised. Evaluated on COCO val2017 [43].
|
| 392 |
+
|
| 393 |
+
<table><tr><td># Blocks (L2)</td><td>Params</td><td>GFLOPs</td><td>FPS</td><td>PQ</td></tr><tr><td>9</td><td>316</td><td>688</td><td>126</td><td>55.7</td></tr><tr><td>6</td><td>316</td><td>676</td><td>127</td><td>55.7</td></tr><tr><td>4</td><td>316</td><td>669</td><td>128</td><td>56.0</td></tr><tr><td>2</td><td>316</td><td>660</td><td>128</td><td>55.4</td></tr></table>
|
| 394 |
+
|
| 395 |
+
Table D. Number of blocks that process queries. The model with $L_{2} = 4$ achieves the best PQ, while FPS is not significantly affected by changing $L_{2}$ . Evaluated on COCO val2017 [43].
|
| 396 |
+
|
| 397 |
+
<table><tr><td>Method</td><td>Backbone</td><td>Pre-training</td><td>AUPRCOOD</td></tr><tr><td>M2F [15]</td><td>Swin-L [45]</td><td>IN21K</td><td>56.8</td></tr><tr><td>M2F‡ [15]</td><td>ViT-Adapter-L‡ [13]</td><td>DINOv2</td><td>68.7</td></tr><tr><td>EoMT (Ours)</td><td>ViT-L [23]</td><td>DINOv2</td><td>89.7</td></tr></table>
|
| 398 |
+
|
| 399 |
+
Table E. Quantitative comparison of out-of-distribution (OOD) confidence estimation. EoMT achieves the highest $\mathrm{AUPRC}_{\mathrm{OOD}}$ , demonstrating its superior confidence estimation. Trained on Cityscapes train [17], evaluated on BRAVO [58]. Our re-implementation.
|
| 400 |
+
|
| 401 |
+
significantly affected by changing $L_{2}$ . Consequently, we set $L_{2} = 4$ as the default configuration for ViT-L.
|
| 402 |
+
|
| 403 |
+
# C. Out-of-distribution Confidence Estimation
|
| 404 |
+
|
| 405 |
+
In Sec. 4.3, we discuss the out-of-distribution (OOD) generalization capabilities of EoMT. There, we show that DINOv2-based models, such as EoMT, significantly outperform non-ViT-based models such as Swin [45] in OOD generalization despite similar in-distribution (ID) performance.
|
| 406 |
+
|
| 407 |
+
Next, we also assess how well different models distinguish OOD regions from ID regions with their confidence scores. OOD regions, as defined in the BRAVO [58] benchmark, refer to novel object classes that were not present in
|
| 408 |
+
|
| 409 |
+
the training data. We report the $\mathrm{AUPRC_{OOD}}$ metric, which quantifies the model's ability to assign lower confidence to these unseen objects, ensuring they can be correctly identified as OOD.
|
| 410 |
+
|
| 411 |
+
As shown in Tab. E, EoMT achieves an $\mathrm{AUPRC_{OOD}}$ of 89.7, significantly outperforming ViT-Adapter + M2F [13, 15] with a score of 68.7 and Swin [45] + M2F with a score of 56.8. The visualization in Fig. B further highlights that EoMT consistently assigns low confidence to the OOD object while maintaining high confidence for ID regions. In contrast, ViT-Adapter + M2F [13, 15] fails to reliably assign low confidence to all OOD pixels.
|
| 412 |
+
|
| 413 |
+
# D. Qualitative Examples
|
| 414 |
+
|
| 415 |
+
In Fig. C we visualize predictions of ViT-Adapter + M2F [13, 15] and EoMT for panoptic segmentation on COCO [43].
|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
(1) Input images
|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
(2) Ground-truth annotations
|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
|
| 433 |
+

|
| 434 |
+
|
| 435 |
+

|
| 436 |
+
|
| 437 |
+

|
| 438 |
+
(3) ViT-Adapter + M2F [13, 15] predictions
|
| 439 |
+
|
| 440 |
+

|
| 441 |
+
|
| 442 |
+

|
| 443 |
+
|
| 444 |
+

|
| 445 |
+
Figure C. Qualitative examples for panoptic segmentation on COCO [43]. Using DINov2-g [51] and a $1280 \times 1280$ input size.
|
| 446 |
+
|
| 447 |
+

|
| 448 |
+
(4) EoMT (Ours) predictions
|
| 449 |
+
|
| 450 |
+

|
| 451 |
+
|
| 452 |
+

|
data/2025/2503_19xxx/2503.19108/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a44d67e2f24a83be2f6bf1e4ec872a9da123b58652fd2740ef2a4cfb67d552e
|
| 3 |
+
size 1011110
|
data/2025/2503_19xxx/2503.19108/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_content_list.json
ADDED
|
@@ -0,0 +1,1462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Open-Vocabulary Functional 3D Scene Graphs for Real-World Indoor Spaces",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
107,
|
| 8 |
+
130,
|
| 9 |
+
888,
|
| 10 |
+
152
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Chenyangguang Zhang $^{1,2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
109,
|
| 19 |
+
179,
|
| 20 |
+
316,
|
| 21 |
+
200
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Alexandros Delitzas $^{2,3}$",
|
| 28 |
+
"bbox": [
|
| 29 |
+
357,
|
| 30 |
+
180,
|
| 31 |
+
535,
|
| 32 |
+
198
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Fangjinhua Wang²",
|
| 39 |
+
"bbox": [
|
| 40 |
+
576,
|
| 41 |
+
180,
|
| 42 |
+
727,
|
| 43 |
+
199
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Ruida Zhang<sup>1</sup>",
|
| 50 |
+
"bbox": [
|
| 51 |
+
767,
|
| 52 |
+
180,
|
| 53 |
+
877,
|
| 54 |
+
199
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Xiangyang Ji",
|
| 61 |
+
"bbox": [
|
| 62 |
+
240,
|
| 63 |
+
200,
|
| 64 |
+
354,
|
| 65 |
+
220
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Marc Pollefeys $^{2,4}$",
|
| 72 |
+
"bbox": [
|
| 73 |
+
397,
|
| 74 |
+
202,
|
| 75 |
+
535,
|
| 76 |
+
220
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Francis Engelmann2,5",
|
| 83 |
+
"bbox": [
|
| 84 |
+
576,
|
| 85 |
+
202,
|
| 86 |
+
750,
|
| 87 |
+
220
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "$^{1}$ Tsinghua University",
|
| 94 |
+
"bbox": [
|
| 95 |
+
192,
|
| 96 |
+
226,
|
| 97 |
+
323,
|
| 98 |
+
241
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "$^{2}$ ETH Zürich",
|
| 105 |
+
"bbox": [
|
| 106 |
+
336,
|
| 107 |
+
226,
|
| 108 |
+
419,
|
| 109 |
+
241
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "$^{3}$ MPI for Informatics",
|
| 116 |
+
"bbox": [
|
| 117 |
+
434,
|
| 118 |
+
226,
|
| 119 |
+
562,
|
| 120 |
+
241
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "4 Microsoft",
|
| 127 |
+
"bbox": [
|
| 128 |
+
576,
|
| 129 |
+
226,
|
| 130 |
+
645,
|
| 131 |
+
241
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "5Stanford University",
|
| 138 |
+
"bbox": [
|
| 139 |
+
658,
|
| 140 |
+
226,
|
| 141 |
+
785,
|
| 142 |
+
242
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "Abstract",
|
| 149 |
+
"text_level": 1,
|
| 150 |
+
"bbox": [
|
| 151 |
+
246,
|
| 152 |
+
277,
|
| 153 |
+
326,
|
| 154 |
+
292
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "text",
|
| 160 |
+
"text": "We introduce the task of predicting functional 3D scene graphs for real-world indoor environments from posed RGB-D images. Unlike traditional 3D scene graphs that focus on spatial relationships of objects, functional 3D scene graphs capture objects, interactive elements, and their functional relationships. Due to the lack of training data, we leverage foundation models, including visual language models (VLMs) and large language models (LLMs), to encode functional knowledge. We evaluate our approach on an extended SceneFun3D dataset and a newly collected dataset, FunGraph3D, both annotated with functional 3D scene graphs. Our method significantly outperforms adapted baselines, including Open3DSG and ConceptGraph, demonstrating its effectiveness in modeling complex scene functionalities. We also demonstrate downstream applications such as 3D question answering and robotic manipulation using functional 3D scene graphs. See our project page at https://openfungraph.github.io.",
|
| 161 |
+
"bbox": [
|
| 162 |
+
88,
|
| 163 |
+
304,
|
| 164 |
+
483,
|
| 165 |
+
578
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "1. Introduction",
|
| 172 |
+
"text_level": 1,
|
| 173 |
+
"bbox": [
|
| 174 |
+
89,
|
| 175 |
+
590,
|
| 176 |
+
220,
|
| 177 |
+
606
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "This paper introduces functional 3D scene graphs for real-world indoor spaces from posed RGB-D images. 3D scene graphs offer a lightweight, abstract representation for capturing the comprehensive semantic structure of an environment [4]. They support a variety of applications, including 3D scene alignment [66], image localization [51], graph-conditioned 3D scene generation [21, 97], as well as robotics navigation [83] and task planning [2, 61].",
|
| 184 |
+
"bbox": [
|
| 185 |
+
88,
|
| 186 |
+
613,
|
| 187 |
+
482,
|
| 188 |
+
733
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 0
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "Recent advances in 3D scene graph prediction [4, 11, 27, 40, 41, 63, 64, 78, 84], have enabled exciting developments across multiple areas, including scene graph inference from 3D reconstructions [11, 78], applications for robotic interactions [27, 84], online scene graph generation [84], open-vocabulary 3D scene graphs [40, 41] and large-scale, hierarchical scene graphs [4, 63, 64]. The performance of recent scene graph methods also benefits from advancements in 3D scene understanding techniques [14, 57, 70], which they rely on to extract objects and their semantics for modeling inter-object relationships. However, existing 3D",
|
| 195 |
+
"bbox": [
|
| 196 |
+
88,
|
| 197 |
+
734,
|
| 198 |
+
482,
|
| 199 |
+
901
|
| 200 |
+
],
|
| 201 |
+
"page_idx": 0
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "image",
|
| 205 |
+
"img_path": "images/5dda4c2d398668b77f0e1225877dc556c9505419c5c79137f40097c448cfb47f.jpg",
|
| 206 |
+
"image_caption": [
|
| 207 |
+
"Posed RGB-D Frames"
|
| 208 |
+
],
|
| 209 |
+
"image_footnote": [],
|
| 210 |
+
"bbox": [
|
| 211 |
+
516,
|
| 212 |
+
291,
|
| 213 |
+
651,
|
| 214 |
+
375
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 0
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "image",
|
| 220 |
+
"img_path": "images/88786f19f817f2762a16b67e9321b6366be95ee6499b05213d9b4352a523cdd3.jpg",
|
| 221 |
+
"image_caption": [],
|
| 222 |
+
"image_footnote": [],
|
| 223 |
+
"bbox": [
|
| 224 |
+
516,
|
| 225 |
+
375,
|
| 226 |
+
651,
|
| 227 |
+
452
|
| 228 |
+
],
|
| 229 |
+
"page_idx": 0
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"type": "image",
|
| 233 |
+
"img_path": "images/c8b0f6e3b5b3a1c6f45bf15051966b39c2c45048f338a5d3ada2f0bda0b443c0.jpg",
|
| 234 |
+
"image_caption": [
|
| 235 |
+
"Fig. 1. Functional 3D Scene Graphs. Given an input sequence of posed RGB-D frames of an indoor environment, our method predicts a functional 3D scene graph by detecting objects, identifying interactive elements, and inferring functional relationships. This enables the representation of interactions, functions, and scene dynamics, going beyond existing 3D scene graph methods that are constrained to spatial relationships between static objects."
|
| 236 |
+
],
|
| 237 |
+
"image_footnote": [],
|
| 238 |
+
"bbox": [
|
| 239 |
+
516,
|
| 240 |
+
452,
|
| 241 |
+
651,
|
| 242 |
+
534
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 0
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "image",
|
| 248 |
+
"img_path": "images/69cfc2d7a08b4cc566fa34dc8dba48c6335a5035d45c0c300b5901e31e880ee9.jpg",
|
| 249 |
+
"image_caption": [],
|
| 250 |
+
"image_footnote": [],
|
| 251 |
+
"bbox": [
|
| 252 |
+
651,
|
| 253 |
+
279,
|
| 254 |
+
903,
|
| 255 |
+
452
|
| 256 |
+
],
|
| 257 |
+
"page_idx": 0
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"type": "image",
|
| 261 |
+
"img_path": "images/a000e64b49e64721208506a4b3e3b8ea42fc6084dcd4e35204a80046d9f4505f.jpg",
|
| 262 |
+
"image_caption": [],
|
| 263 |
+
"image_footnote": [],
|
| 264 |
+
"bbox": [
|
| 265 |
+
651,
|
| 266 |
+
452,
|
| 267 |
+
903,
|
| 268 |
+
537
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 0
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "text",
|
| 274 |
+
"text": "scene graph estimation methods [27, 40, 78, 84] face important limitations: graph nodes are typically restricted to objects, and edges represent only spatial relationships. For instance, edges primarily capture relative positions, such as 'the TV is mounted on the wall' or 'the flower is placed on the table'—information already implicitly encoded by object positions. Crucially, these methods lack representations of small interactive elements [17] and their functional relationships with other scene objects, which are essential for finer-grained interactions (e.g., flipping a switch to turn on a light), making them less suitable for higher-level functional reasoning. The key idea of this paper is to enhance 3D scene graphs with the capability to represent functional relationships between objects and their interactive elements. A 3D scene graph that captures both functionalities and interactions opens up significant opportunities for future research.",
|
| 275 |
+
"bbox": [
|
| 276 |
+
509,
|
| 277 |
+
657,
|
| 278 |
+
906,
|
| 279 |
+
902
|
| 280 |
+
],
|
| 281 |
+
"page_idx": 0
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"type": "aside_text",
|
| 285 |
+
"text": "arXiv:2503.19199v1 [cs.CV] 24 Mar 2025",
|
| 286 |
+
"bbox": [
|
| 287 |
+
22,
|
| 288 |
+
258,
|
| 289 |
+
57,
|
| 290 |
+
705
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 0
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "text",
|
| 296 |
+
"text": "ties. For example, robotic agents can identify interactive elements and their functional relationships with objects to perform effective manipulation tasks, or graph-guided 3D scene generation methods [21, 97] can, with this enriched representation, generate more dynamic and realistic environments by incorporating interactive elements and their effects. However, creating functional 3D scene graphs is challenging. Most importantly, there is a lack of training data to learn the complex functional relationships between objects and their interactive elements. Unlike existing 3D scene graphs, functional 3D scene graphs require a more nuanced understanding of interactions and object affordances. To address this, our approach implements an open-vocabulary pipeline for functional 3D scene graph inference, termed OpenFunGraph, leveraging the extensive knowledge encoded within foundation models, including visual language models (VLM) and large language models (LLM). These models, pre-trained on vast amounts of multimodal data, include rich semantic information that can potentially be adapted for functional understanding. This leads us to the central question of this work: \"Can we harness foundation models to construct functional 3D scene graphs?\"",
|
| 297 |
+
"bbox": [
|
| 298 |
+
89,
|
| 299 |
+
90,
|
| 300 |
+
480,
|
| 301 |
+
422
|
| 302 |
+
],
|
| 303 |
+
"page_idx": 1
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"type": "text",
|
| 307 |
+
"text": "We evaluate our approach on two challenging datasets: an extended version of SceneFun3D [17] with newly added functional relationship annotations, and FunGraph3D, a freshly collected real-world dataset featuring high-precision 3D laser scans, accurately registered To address these limitations, we introduce functional 3D scene graphs, which model objects, interactive elements, and their functional relationships within a unified structure (formally defined in Section 3). This representation extends traditional 3D scene graphs by incorporating interactive sub-parts alongside objects and representing functional relationships beyond simple spatial ones. We argue that functional 3D scene graphs should possess the following characteristics. First, the representation should operate in an open-vocabulary manner to enhance generalization and applicability. Second, it should be flexible, allowing various attributes to be attached to nodes (e.g., sensor data, natural language captions, semantic features) and edges (e.g., relationship descriptions), thus ensuring adaptability for downstream applications.",
|
| 308 |
+
"bbox": [
|
| 309 |
+
91,
|
| 310 |
+
425,
|
| 311 |
+
482,
|
| 312 |
+
712
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 1
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "In summary, our key contributions are:",
|
| 319 |
+
"bbox": [
|
| 320 |
+
109,
|
| 321 |
+
715,
|
| 322 |
+
367,
|
| 323 |
+
729
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 1
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "list",
|
| 329 |
+
"sub_type": "text",
|
| 330 |
+
"list_items": [
|
| 331 |
+
"- We introduce functional 3D scene graphs that extend traditional 3D scene graphs by capturing functional relationships between objects and interactive elements.",
|
| 332 |
+
"- We propose a novel approach that leverages the knowledge embedded in foundation models, specifically VLMs and LLMs, to construct functional 3D scene graphs without task-specific training.",
|
| 333 |
+
"- We present a new real-world dataset, FunGraph3D, with ground-truth functional annotations, and demonstrate that our method outperforms adapted baselines, including Open3DSG and ConceptGraph."
|
| 334 |
+
],
|
| 335 |
+
"bbox": [
|
| 336 |
+
91,
|
| 337 |
+
732,
|
| 338 |
+
482,
|
| 339 |
+
896
|
| 340 |
+
],
|
| 341 |
+
"page_idx": 1
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"type": "text",
|
| 345 |
+
"text": "2. Related Work",
|
| 346 |
+
"text_level": 1,
|
| 347 |
+
"bbox": [
|
| 348 |
+
513,
|
| 349 |
+
89,
|
| 350 |
+
653,
|
| 351 |
+
104
|
| 352 |
+
],
|
| 353 |
+
"page_idx": 1
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"type": "text",
|
| 357 |
+
"text": "3D indoor scene understanding. Many works concentrate on closed-set 3D semantic segmentation [5, 14, 31-33, 42, 45, 57, 58, 76, 80, 81] or instance segmentation [23, 28, 29, 38, 70, 74, 77, 96] on the existing 3D indoor scene understanding benchmarks [3, 7, 10, 15, 37, 65, 72, 93]. With the development of foundation models, subsequent researches explore open-vocabulary 3D semantic segmentation [24, 36, 39, 56, 59, 73, 75, 94, 105, 107], and complex 3D visual language grounding tasks [8, 16, 30, 34, 55, 62, 90, 103]. However, current studies mainly focus on object-level perception in indoor scene and seldom consider part-level interactive elements. Recently, SceneFun3D [17] proposes a benchmark for functionality and affordance understanding, with exhaustive annotations of indoor interactive elements. However, it does not provide the object annotations as well as the relationships between the elements and objects. This work extends SceneFun3D by exploiting such relationships with functional 3D scene graphs.",
|
| 358 |
+
"bbox": [
|
| 359 |
+
511,
|
| 360 |
+
117,
|
| 361 |
+
903,
|
| 362 |
+
390
|
| 363 |
+
],
|
| 364 |
+
"page_idx": 1
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"type": "text",
|
| 368 |
+
"text": "Affordance understanding. Understanding affordance, i.e., properties of an environment to interact with, is a vital task in computer vision and robotics. Existing learning-based methods usually take inputs such as images [22, 98], videos [26, 54, 95] or 3D representations [18, 52, 53, 86], and then predict affordance maps. Some works learn affordance from human-scene interaction demonstrations [6, 12, 13, 25, 91, 92, 100, 101]. Nevertheless, existing works are often limited to object-level predictions and model affordances located on the corresponding objects. On the contrary, OpenFunGraph excavates all interactive elements at scene level, handling all kinds of functional relationships, especially those for remote operations.",
|
| 369 |
+
"bbox": [
|
| 370 |
+
511,
|
| 371 |
+
396,
|
| 372 |
+
903,
|
| 373 |
+
592
|
| 374 |
+
],
|
| 375 |
+
"page_idx": 1
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"type": "text",
|
| 379 |
+
"text": "3D scene graphs. 3D scene graph combines indoor entities into a unified structure and models inter-object relationships by building a graph of objects [4, 40, 63, 64, 75, 78, 79, 84, 85, 99, 102]. Functional 3D scene graph differs from the traditional 3D scene graph by adding interactive elements as nodes and modeling the functional relationships between objects and elements. Similarly, IFR-Explore [44] tries to excavate inter-object functional relationships based on reinforcement learning in synthetic scenarios. However, it is hard to be applied in complex real-world scenes due to its closed-set setting, requirement of ground-truth instances, and lack of consideration on part-level elements. In this paper, we propose an open-vocabulary framework for functional scene graph inference in complex real-world scenes. While there have been related efforts on open-vocabulary 3D scene graph generation, they are not well-suited for functional scene graph inference, particularly for interactive element recognition and functional relationship prediction. For example, Open3DSG [41] relies on object-level CLIP features [60]. It struggles with part-level interactive",
|
| 380 |
+
"bbox": [
|
| 381 |
+
511,
|
| 382 |
+
598,
|
| 383 |
+
903,
|
| 384 |
+
900
|
| 385 |
+
],
|
| 386 |
+
"page_idx": 1
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"type": "image",
|
| 390 |
+
"img_path": "images/d0713da2ea263956245fdd24f217f2d5bb0e03dde770ae66f0c5f37e1087b085.jpg",
|
| 391 |
+
"image_caption": [
|
| 392 |
+
"Fig. 2. Illustration of the OpenFunGraph architecture. Given a sequence of posed RGB-D frames $\\{(\\mathcal{I}_i, \\mathcal{D}_i)\\}_{i=1}^n$ , we use RAM++ [104] and GroundingDINO [49] to detect and segment objects $\\mathcal{O}$ and interactive elements $\\mathcal{I}$ , forming the node candidates of the functional 3D scene graph. Next, a mechanism using the large language model (LLM) GPT [1] and the visual language model (VLM) LLAVA [48] generates natural language descriptions $\\mathcal{L}$ for each node. Finally, we infer functional relationships $\\mathcal{R}$ between objects $\\mathcal{O}$ and interactive elements $\\mathcal{I}$ , represented as the edges in the functional 3D scene graph $\\mathcal{G}$ ."
|
| 393 |
+
],
|
| 394 |
+
"image_footnote": [],
|
| 395 |
+
"bbox": [
|
| 396 |
+
91,
|
| 397 |
+
88,
|
| 398 |
+
923,
|
| 399 |
+
220
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 2
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"text": "element recognition and is limited to inferring spatial relationships due to its design based on spatial-proximity edge feature distillation. ConceptGraph [27] uses a direct inference pipeline but focuses solely on object nodes and a narrow set of spatial relationships (e.g., on, in). In contrast, our approach introduces adaptive detection and description stages for both objects and interactive elements, alongside a sequential reasoning strategy for accurately modeling a wide range of functional relationships.",
|
| 406 |
+
"bbox": [
|
| 407 |
+
88,
|
| 408 |
+
309,
|
| 409 |
+
485,
|
| 410 |
+
446
|
| 411 |
+
],
|
| 412 |
+
"page_idx": 2
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"text": "3. Problem Formulation",
|
| 417 |
+
"text_level": 1,
|
| 418 |
+
"bbox": [
|
| 419 |
+
89,
|
| 420 |
+
477,
|
| 421 |
+
297,
|
| 422 |
+
494
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 2
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "Functional 3D Scene Graphs. We extend traditional 3D scene graphs [27, 41, 78] to facilitate their use in real-world scene interaction scenarios. Specifically, we introduce Functional 3D Scene Graphs, a representation designed to enable functional reasoning by jointly modeling objects, interactive elements and their functional relationships. We define a functional 3D scene graph as a directed graph $\\mathcal{G} = (\\mathcal{O},\\mathcal{I},\\mathcal{R})$ where $\\mathcal{O}$ are the objects in the scene, $\\mathcal{I}$ are the interactive elements and $\\mathcal{R}$ are the functional relationships which point from the interactive element nodes $\\mathcal{I}$ to object nodes $\\mathcal{O}$ . Following the definition in [17], we define interactive elements as components that agents interact with (e.g., handles, knobs and buttons) to trigger specific functions within the environment such as opening a cabinet or turning off a light. Additionally, functional relationships fall into two categories: local, where the interactive element is part of the object (e.g., door-handle), or remote, where the interactive element operates the object from a distance (e.g., TV-remote control).",
|
| 429 |
+
"bbox": [
|
| 430 |
+
91,
|
| 431 |
+
513,
|
| 432 |
+
483,
|
| 433 |
+
801
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 2
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "text",
|
| 439 |
+
"text": "Task definition. We formulate the following novel 3D scene understanding task: Given an input sequence of posed RGB-D frames $\\{(\\mathcal{I}_i,\\mathcal{D}_i)\\}_{i = 1}^n$ of an unseen indoor environment, the task is to construct the functional 3D scene graph $\\mathcal{G}$ by inferring the functional relationships $\\mathcal{R}$ among the objects $\\mathcal{O}$ and interactive elements $\\mathcal{I}$ in the scene.",
|
| 440 |
+
"bbox": [
|
| 441 |
+
88,
|
| 442 |
+
815,
|
| 443 |
+
485,
|
| 444 |
+
907
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 2
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "4. Method",
|
| 451 |
+
"text_level": 1,
|
| 452 |
+
"bbox": [
|
| 453 |
+
513,
|
| 454 |
+
308,
|
| 455 |
+
604,
|
| 456 |
+
324
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 2
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "The goal of our method, OpenFunGraph, is to predict the functional 3D scene graph of a 3D environment, by accurately detecting objects and interactive elements, and inferring the functional relationships among them in an open-vocabulary manner (Figure 2). To overcome the challenge of limited training data, we harness the knowledge of foundation models [9] to detect objects and interactive elements within the scene, describe them in natural language, and reason about their functional relationships. In the detection stage (Section 4.1), we follow a progressive strategy where we prompt the foundation model to systematically first identify objects and then transition to finer-grained interactive elements given the input image sequence. The 2D detection results are then fused across multiple viewpoints in 3D space, constructing an initial set of node candidates. Next, we utilize a VLM and an LLM to collaboratively generate multi-view aware natural language descriptions of the candidate nodes (Section 4.2). To construct the graph, we proceed with inferring the functional relationships, i.e., edges, among the object and interactive element nodes (Section 4.3). Specifically, we follow a sequential reasoning strategy, starting with local functional relationships (e.g., door - handle) and extending to remote functional relationships (e.g., TV - remote control), by leveraging the common sense knowledge of VLMs and LLMs. This allows us to progressively build the scene's functional graph by incrementally establishing connections between nodes.",
|
| 463 |
+
"bbox": [
|
| 464 |
+
511,
|
| 465 |
+
330,
|
| 466 |
+
908,
|
| 467 |
+
739
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "4.1. Node Candidate Detection",
|
| 474 |
+
"text_level": 1,
|
| 475 |
+
"bbox": [
|
| 476 |
+
511,
|
| 477 |
+
753,
|
| 478 |
+
751,
|
| 479 |
+
768
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 2
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "text",
|
| 485 |
+
"text": "In the first stage, we detect objects and interactive elements in the scene to construct a set of node candidates. We start by detecting 2D candidates on the input frames with a progressive foundation-model-based strategy that transitions from objects to finer-grained part-level interactive elements. Then, we associate and fuse the 2D detection results from multiple frames using geometric consistency, yielding the initial set of 3D node candidates.",
|
| 486 |
+
"bbox": [
|
| 487 |
+
511,
|
| 488 |
+
779,
|
| 489 |
+
908,
|
| 490 |
+
902
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 2
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "Object candidates. To identify object candidates $\\mathcal{C}_o^{\\mathcal{I}_i}$ , we utilize RAM++ [35, 104] to recognize objects in each input image $\\mathcal{I}_i$ , producing object tags $\\mathcal{T}_{obj}^{\\mathcal{I}_i}$ , such as 'cabinet' or 'door'. These object tags then serve as prompts for GroundingDINO [49], which detects 2D bounding boxes $\\mathcal{B}^{\\mathcal{I}_i}$ , segmentation masks $\\mathcal{M}^{\\mathcal{I}_i}$ , and confidence scores $\\mathcal{S}^{\\mathcal{I}_i}$ .",
|
| 497 |
+
"bbox": [
|
| 498 |
+
89,
|
| 499 |
+
90,
|
| 500 |
+
480,
|
| 501 |
+
181
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 3
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "Interactive element candidates. Despite the increasing success of foundation models in detecting object instances within scenes, the development of prompting strategies for identifying smaller elements, including interactive object parts (e.g., knobs, handles), remains largely unexplored. Here, we propose a simple yet effective strategy to generate suitable text prompts for GroundingDINO to improve the detection of small interactive parts. We ask the LLM GPT-4 to provide a list of potential interactive element tags corresponding to each object candidate tag $\\mathcal{T}_{obj}^{\\mathcal{I}_i}$ . We hold the valid object tags $\\mathcal{T}_{val}^{\\mathcal{I}_i}$ by filtering the cases where the LLM thinks the object is not interactable (e.g., wall, bed). To create prompts for GroundingDINO, we concatenate $\\mathcal{T}_{val}^{\\mathcal{I}_i}$ (e.g., door) as assistive tags with the functional element tags (e.g., handle), forming prompts such as \"door. handle\". Finally, we yield the interactive element candidates $\\mathcal{C}_{ie}^{\\mathcal{I}_i}$ in each input image $\\mathcal{I}_i$ by maintaining the detections corresponding to the functional element tags. Empirically, we observe that this approach leads to more accurate detection of small interactive parts. We support this observation with an ablation study in Section 6.3.",
|
| 508 |
+
"bbox": [
|
| 509 |
+
91,
|
| 510 |
+
188,
|
| 511 |
+
483,
|
| 512 |
+
507
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 3
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "text",
|
| 518 |
+
"text": "3D candidate fusion. After identifying the object and functional element candidates $\\mathcal{C}_{obj}^{\\mathcal{I}_i}$ and $\\mathcal{C}_{ie}^{\\mathcal{I}_i}$ in each image $\\mathcal{I}_i$ , we fuse their 2D segmentation masks using multi-view information to obtain the 3D node candidates of the graph. Following [27], we utilize the corresponding depth map $\\mathcal{D}_i$ and camera projection matrix $\\Pi_i$ to backproject the 2D mask to the 3D space, and merge them to receive the 3D object candidates $\\mathcal{C}_o$ and interactive element candidates $\\mathcal{C}_{ie}$ . For each node candidate, we store the backprojected 3D point cloud $\\mathcal{P}$ and 3D bounding box $\\mathcal{B}$ along with the associated 2D image assets, i.e., images, masks, 2D bounding boxes and confidence scores.",
|
| 519 |
+
"bbox": [
|
| 520 |
+
89,
|
| 521 |
+
513,
|
| 522 |
+
483,
|
| 523 |
+
695
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 3
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "4.2. Node Candidate Description",
|
| 530 |
+
"text_level": 1,
|
| 531 |
+
"bbox": [
|
| 532 |
+
89,
|
| 533 |
+
704,
|
| 534 |
+
344,
|
| 535 |
+
722
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 3
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"text": "We next outline the process of generating natural language descriptions $\\mathcal{L}$ for each node by leveraging a combination of VLMs and LLMs. Precise language descriptions are critical for establishing functional relationships in the final phase.",
|
| 542 |
+
"bbox": [
|
| 543 |
+
89,
|
| 544 |
+
728,
|
| 545 |
+
482,
|
| 546 |
+
789
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 3
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "Object candidates. To generate natural language descriptions for each object candidate node, we first select the top $N_v$ views of each object, ranked by $S^{\\mathcal{I}_i} \\times \\frac{n_{\\mathcal{P}} \\mathcal{I}_i}{n_{\\mathcal{P}}}$ , where $S^{\\mathcal{I}_i}$ is the 2D confidence score indicating the semantic confidence, while $n_{\\mathcal{P}} \\mathcal{I}_i$ refers to the number of 3D points the view $\\mathcal{I}_i$ contributes to the fused 3D pointcloud $\\mathcal{P}$ , presenting the geometric contribution of the view. Each object is",
|
| 553 |
+
"bbox": [
|
| 554 |
+
89,
|
| 555 |
+
794,
|
| 556 |
+
483,
|
| 557 |
+
902
|
| 558 |
+
],
|
| 559 |
+
"page_idx": 3
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"text": "then cropped based on its bounding box $\\mathcal{B}$ , and a caption describing the object crop is obtained using LLAVA v1.6 [46-48]. Finally, to derive a unified language description for each object candidate, we employ GPT-4 [1] to summarize the multi-view LLAVA captions.",
|
| 564 |
+
"bbox": [
|
| 565 |
+
511,
|
| 566 |
+
90,
|
| 567 |
+
903,
|
| 568 |
+
167
|
| 569 |
+
],
|
| 570 |
+
"page_idx": 3
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "text",
|
| 574 |
+
"text": "Interactive element candidates. Captioning small interactive elements poses additional challenges: the bounding box crops are considerably smaller, often containing only a few pixels, which hinders LLAVA's ability to generate accurate captions. To address this, we enlarge the bounding boxes by multiple scales to incorporate richer contextual visual information. Similar multi-scale approaches have been shown to be effective in [39, 73]. To direct the VLM's attention to the interactive element within the expanded crop, we highlight the element with a red outline before passing it to LLAVA, as demonstrated in [71]. Finally, the multi-scale, multiview captions are summarized into a single natural language description using GPT-4.",
|
| 575 |
+
"bbox": [
|
| 576 |
+
511,
|
| 577 |
+
172,
|
| 578 |
+
906,
|
| 579 |
+
369
|
| 580 |
+
],
|
| 581 |
+
"page_idx": 3
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "text",
|
| 585 |
+
"text": "4.3. Functional Relationships",
|
| 586 |
+
"text_level": 1,
|
| 587 |
+
"bbox": [
|
| 588 |
+
511,
|
| 589 |
+
378,
|
| 590 |
+
741,
|
| 591 |
+
395
|
| 592 |
+
],
|
| 593 |
+
"page_idx": 3
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"type": "text",
|
| 597 |
+
"text": "To model functional relationships between objects and interactive elements, we employ a sequential reasoning approach. Drawing on the concept of Chain-of-Thought reasoning [82], we decompose the task into a series of simpler steps rather than prompting the LLM to infer all possible element-object connections simultaneously. Initially, we concentrate on identifying direct, local relationships between objects and elements that are rigidly connected (e.g., door - handle). Once these relationships are established, we extend the search to remote relationships, where object-element pairs are functionally related but physically separated (e.g., TV - remote control).",
|
| 598 |
+
"bbox": [
|
| 599 |
+
511,
|
| 600 |
+
402,
|
| 601 |
+
905,
|
| 602 |
+
583
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 3
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "text",
|
| 608 |
+
"text": "Local relationship reasoning. First, we aim to construct the edges of the graph with local functional relationships, e.g., the keypanel of a microwave or the knob of a cabinet. A common characteristic of these cases is that objects and interactive elements are rigidly connected. To identify such cases efficiently, we first perform a spatial filtering process: For each object node $\\mathcal{C}_o^j$ , we assess whether an element node $\\mathcal{C}_{ie}^k$ has a significant spatial overlap. Subsequently, we leverage the LLM's common sense knowledge to reason whether a local functional relationship between these two nodes is feasible. To do this, we prompt the LLM with the language descriptions $\\mathcal{L}^j$ , $\\mathcal{L}^k$ and 3D bounding boxes $\\mathcal{B}^j$ , $\\mathcal{B}^k$ of $\\mathcal{C}_o^j$ and $\\mathcal{C}_{ie}^k$ respectively. It is tasked with reasoning whether a local rigid connection between the interactive element (e.g., handle) and object (e.g., fridge) is feasible, and then generate a language description $\\mathcal{L}^{k\\rightarrow j}$ of the functional relationship (e.g., \"opens\"). This step produces the subgraph of local connections $\\hat{\\mathcal{G}}^L = (\\mathcal{O}^L,\\mathcal{I}^L,\\mathcal{R}^L)$ .",
|
| 609 |
+
"bbox": [
|
| 610 |
+
511,
|
| 611 |
+
590,
|
| 612 |
+
906,
|
| 613 |
+
864
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 3
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "Confidence-aware remote relationship reasoning. In this step, we construct graph edges representing remote func",
|
| 620 |
+
"bbox": [
|
| 621 |
+
511,
|
| 622 |
+
869,
|
| 623 |
+
903,
|
| 624 |
+
901
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 3
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "tional relationships, such as those between a ceiling light and its switch. Determining these remote relationships is challenging, as visual cues alone often do not fully clarify which interactive element controls which specific object. To address this, we introduce a confidence-aware reasoning strategy that assigns a confidence score to each inferred remote relationship. This approach enhances decision-making in real-world scenarios by enabling the agent to prioritize interactions with higher confidence scores.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
89,
|
| 633 |
+
90,
|
| 634 |
+
480,
|
| 635 |
+
224
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 4
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "First, we form an initial set of potential candidates for remote connections, by considering the interactive element nodes that remained unassigned from the previous stage. To construct potential remote connections among the interactive elements and objects in the scene, we utilize the common sense knowledge of the LLM. Specifically, we provide the LLM with natural language descriptions $\\mathcal{L}$ of the interactive element and object nodes, so that it can output a list of likely target objects that each interactive element could be functionally linked to. Next, for each element-object pair, we employ the VLM to assess the feasibility of a functional connection. The visual input for this step is prepared by the top-1 views of the interactive element and object. The VLM can exploit useful information in the images of the element and object to generate descriptions for the feasibility assessment. For example, it describes whether the appliance is physically plugged into the electric outlet, or whether the switch is mount on the wall under the ceiling light. The descriptions from all pairs are then provided to the LLM to form a global context, assisting it to assign a relative confidence score to each proposed connection and describe the nature of each relationship. This step outputs the subgraph of remote relations: $\\hat{\\mathcal{G}}^R = (\\mathcal{O}^R,\\mathcal{I}^R,\\mathcal{R}^R)$ .",
|
| 642 |
+
"bbox": [
|
| 643 |
+
91,
|
| 644 |
+
227,
|
| 645 |
+
483,
|
| 646 |
+
578
|
| 647 |
+
],
|
| 648 |
+
"page_idx": 4
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"type": "text",
|
| 652 |
+
"text": "4.4. Final Graph Formation",
|
| 653 |
+
"text_level": 1,
|
| 654 |
+
"bbox": [
|
| 655 |
+
89,
|
| 656 |
+
583,
|
| 657 |
+
308,
|
| 658 |
+
599
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 4
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "To construct the final graph, we combine the nodes and relationships identified in both the local and remote functional reasoning stages. The resulting predicted graph is formulated as $\\mathcal{G} = (\\mathcal{O}^L\\cup \\mathcal{O}^R,\\mathcal{I}^L\\cup \\mathcal{I}^R,\\mathcal{R}^L\\cup \\mathcal{R}^R)$ .",
|
| 665 |
+
"bbox": [
|
| 666 |
+
89,
|
| 667 |
+
606,
|
| 668 |
+
482,
|
| 669 |
+
670
|
| 670 |
+
],
|
| 671 |
+
"page_idx": 4
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "text",
|
| 675 |
+
"text": "5. Data Collection",
|
| 676 |
+
"text_level": 1,
|
| 677 |
+
"bbox": [
|
| 678 |
+
89,
|
| 679 |
+
678,
|
| 680 |
+
246,
|
| 681 |
+
694
|
| 682 |
+
],
|
| 683 |
+
"page_idx": 4
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"type": "text",
|
| 687 |
+
"text": "Existing datasets of high-fidelity 3D indoor spaces focus primarily on understanding either 3D objects [7, 93] or 3D interactive elements [17]. However, they lack ground-truth annotations of the functional relationships. In many cases, these relationships cannot be inferred from static visual observations alone but instead require video captures of physical interactions with the scene to determine which actions trigger specific responses. For example, a static 3D reconstruction cannot indicate which switch controls a particular light in a room with multiple switches and lights. To systematically evaluate our method, we construct a novel dataset of 3D real-world indoor environments along with multi-sensor data (i.e., high-fidelity 3D reconstructions,",
|
| 688 |
+
"bbox": [
|
| 689 |
+
89,
|
| 690 |
+
703,
|
| 691 |
+
482,
|
| 692 |
+
900
|
| 693 |
+
],
|
| 694 |
+
"page_idx": 4
|
| 695 |
+
},
|
| 696 |
+
{
|
| 697 |
+
"type": "image",
|
| 698 |
+
"img_path": "images/b51001db26fca9b6d7527c620372d4cd05600e8f5b2717742478856d5002cdc3.jpg",
|
| 699 |
+
"image_caption": [],
|
| 700 |
+
"image_footnote": [],
|
| 701 |
+
"bbox": [
|
| 702 |
+
516,
|
| 703 |
+
89,
|
| 704 |
+
903,
|
| 705 |
+
205
|
| 706 |
+
],
|
| 707 |
+
"page_idx": 4
|
| 708 |
+
},
|
| 709 |
+
{
|
| 710 |
+
"type": "image",
|
| 711 |
+
"img_path": "images/8e30779dad82861ed2e02b97f6a9bbad3338b50cd325f3fd05dd854ec6e32009.jpg",
|
| 712 |
+
"image_caption": [],
|
| 713 |
+
"image_footnote": [],
|
| 714 |
+
"bbox": [
|
| 715 |
+
516,
|
| 716 |
+
205,
|
| 717 |
+
903,
|
| 718 |
+
323
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 4
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "image",
|
| 724 |
+
"img_path": "images/82a52d5656fef91398b9319936bba0e6874fc48cae24a519f455a3b8d6972b93.jpg",
|
| 725 |
+
"image_caption": [
|
| 726 |
+
"Fig. 3. Modalities of our FunGraph3D dataset. Top: 3D scans from a Faro laser scanner, annotated with 3D object and interactive element masks. Middle: Ground truth functional 3D scene graphs. Bottom: Egocentric video capturing human-scene interactions."
|
| 727 |
+
],
|
| 728 |
+
"image_footnote": [],
|
| 729 |
+
"bbox": [
|
| 730 |
+
516,
|
| 731 |
+
323,
|
| 732 |
+
903,
|
| 733 |
+
401
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 4
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "image",
|
| 739 |
+
"img_path": "images/03b105cecb2d30ce5b77fd58d66a212eee9bf78360d06577ba0fada118c0fd34.jpg",
|
| 740 |
+
"image_caption": [
|
| 741 |
+
"Fig. 4. Example scenes from our FunGraph3D dataset. The dataset includes typical indoor environments such as living rooms, bedrooms, bathrooms, and kitchens."
|
| 742 |
+
],
|
| 743 |
+
"image_footnote": [],
|
| 744 |
+
"bbox": [
|
| 745 |
+
517,
|
| 746 |
+
465,
|
| 747 |
+
901,
|
| 748 |
+
580
|
| 749 |
+
],
|
| 750 |
+
"page_idx": 4
|
| 751 |
+
},
|
| 752 |
+
{
|
| 753 |
+
"type": "text",
|
| 754 |
+
"text": "consumer-device video captures, egocentric human-scene interaction videos) and functional 3D scene graph annotations. We outline the steps towards building this dataset, which we refer to as FunGraph3D (Figure 4).",
|
| 755 |
+
"bbox": [
|
| 756 |
+
511,
|
| 757 |
+
643,
|
| 758 |
+
905,
|
| 759 |
+
705
|
| 760 |
+
],
|
| 761 |
+
"page_idx": 4
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "text",
|
| 765 |
+
"text": "Laser scans. As illustrated in [17], we highlight that laser scans can capture a higher level of 3D geometry details, such as small interactive elements (i.e., knobs, buttons), which is necessary for fine-grained scene understanding applications. To this end, we use a Leica RTC360 laser scanner to capture a high-resolution (5mm) 3D scan of the scene. To ensure high scene coverage during the capture, we place the scanner in multiple positions in the scene. We subsequently use the supporting software by Leica to fuse the multiple scans into a single one for the scene.",
|
| 766 |
+
"bbox": [
|
| 767 |
+
511,
|
| 768 |
+
710,
|
| 769 |
+
905,
|
| 770 |
+
862
|
| 771 |
+
],
|
| 772 |
+
"page_idx": 4
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"type": "text",
|
| 776 |
+
"text": "iPad video sequences. To enable scene understanding through multiple sensor data, we accompany the high",
|
| 777 |
+
"bbox": [
|
| 778 |
+
511,
|
| 779 |
+
869,
|
| 780 |
+
906,
|
| 781 |
+
902
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 4
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "text",
|
| 787 |
+
"text": "fidelity 3D reconstruction with RGB-D image information from a commodity device. Specifically, we capture multiple videos of the static scene with the camera of an iPad 15 Pro.",
|
| 788 |
+
"bbox": [
|
| 789 |
+
89,
|
| 790 |
+
90,
|
| 791 |
+
480,
|
| 792 |
+
136
|
| 793 |
+
],
|
| 794 |
+
"page_idx": 5
|
| 795 |
+
},
|
| 796 |
+
{
|
| 797 |
+
"type": "text",
|
| 798 |
+
"text": "Registration and alignment. To register the iPad video frames to the laser scan coordinate system, we build upon the COLMAP-based pipeline in [93]. Specifically, we run the COLMAP SfM pipeline [68, 69] by augmenting the collection of real iPad frames with rendered pseudo images of the laser scan. However, we notice that this pipeline leads to a large number of unregistered frames. To address this limitation, we incorporate the deep learning-based methods Superpoint [19] and Superglue [67] for feature extraction and matching, leading to a more accurate registration result. Afterwards, we utilize the optimized pose for each camera frame to render high-resolution depth maps for accurate back-projection from the iPad frames to the 3D space.",
|
| 799 |
+
"bbox": [
|
| 800 |
+
89,
|
| 801 |
+
142,
|
| 802 |
+
482,
|
| 803 |
+
339
|
| 804 |
+
],
|
| 805 |
+
"page_idx": 5
|
| 806 |
+
},
|
| 807 |
+
{
|
| 808 |
+
"type": "text",
|
| 809 |
+
"text": "Egocentric videos. We include egocentric videos of property owners interacting with the environment using an Apple Vision Pro headset in our dataset. These videos facilitate accurate relationship labeling as they help clarify ambiguous connections among objects and interactive elements (e.g., which light switch controls the ceiling light).",
|
| 810 |
+
"bbox": [
|
| 811 |
+
89,
|
| 812 |
+
345,
|
| 813 |
+
482,
|
| 814 |
+
436
|
| 815 |
+
],
|
| 816 |
+
"page_idx": 5
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"type": "text",
|
| 820 |
+
"text": "Annotation. For the annotation process, we extend the SceneFun3D annotation tool [17] to construct the ground-truth functional 3D scene graphs. Annotators can navigate the 3D scene and annotate the instances of objects and interactive elements along with a free-form label. Annotators are also asked to connect the interactive element to the corresponding object that it controls and provide a description of their relationship. An example of the collected annotations is displayed in Figure 3.",
|
| 821 |
+
"bbox": [
|
| 822 |
+
89,
|
| 823 |
+
441,
|
| 824 |
+
482,
|
| 825 |
+
578
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 5
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "text",
|
| 831 |
+
"text": "Statistics. FunGraph3D contains 14 in-the-wild scenes of various types (6 kitchens, 2 living rooms, 3 bedrooms and 3 bathrooms). In total, the dataset includes 201 interactive elements, 228 functional relationships and 146 objects of interest, along with open-vocabulary labels and relationships.",
|
| 832 |
+
"bbox": [
|
| 833 |
+
89,
|
| 834 |
+
584,
|
| 835 |
+
482,
|
| 836 |
+
660
|
| 837 |
+
],
|
| 838 |
+
"page_idx": 5
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "text",
|
| 842 |
+
"text": "6. Experiments",
|
| 843 |
+
"text_level": 1,
|
| 844 |
+
"bbox": [
|
| 845 |
+
89,
|
| 846 |
+
674,
|
| 847 |
+
223,
|
| 848 |
+
690
|
| 849 |
+
],
|
| 850 |
+
"page_idx": 5
|
| 851 |
+
},
|
| 852 |
+
{
|
| 853 |
+
"type": "text",
|
| 854 |
+
"text": "6.1. Experimental Setup",
|
| 855 |
+
"text_level": 1,
|
| 856 |
+
"bbox": [
|
| 857 |
+
89,
|
| 858 |
+
698,
|
| 859 |
+
279,
|
| 860 |
+
714
|
| 861 |
+
],
|
| 862 |
+
"page_idx": 5
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "text",
|
| 866 |
+
"text": "Datasets. To evaluate our method, we utilize the developed FunGraph3D dataset, described in Section 5. Additionally, we use the SceneFun3D dataset [17], which provides high-resolution $5\\mathrm{mm}$ laser scans of real-world environments along with iPad video sequences. Specifically, we randomly select 20 scenes (8 from the validation and 12 from the test split) and apply our annotation pipeline to annotate the functional 3D scene graph in each scene. Since we do not have physical access to the 3D environments, we restrict our evaluation to functional relationships that are visually unambiguous. In total, 212 interactive elements, 195 functional relationships, and 105 corresponding objects are",
|
| 867 |
+
"bbox": [
|
| 868 |
+
89,
|
| 869 |
+
719,
|
| 870 |
+
482,
|
| 871 |
+
900
|
| 872 |
+
],
|
| 873 |
+
"page_idx": 5
|
| 874 |
+
},
|
| 875 |
+
{
|
| 876 |
+
"type": "image",
|
| 877 |
+
"img_path": "images/37f1ac36e2aa541deab62dc9baf13bbc75f9517bbd414f1d827aca83478015f0.jpg",
|
| 878 |
+
"image_caption": [
|
| 879 |
+
"Fig. 5. Qualitative results. Top: input images. Bottom: predicted functional 3D scene graph. Best seen zoomed in on a color screen."
|
| 880 |
+
],
|
| 881 |
+
"image_footnote": [],
|
| 882 |
+
"bbox": [
|
| 883 |
+
516,
|
| 884 |
+
88,
|
| 885 |
+
738,
|
| 886 |
+
265
|
| 887 |
+
],
|
| 888 |
+
"page_idx": 5
|
| 889 |
+
},
|
| 890 |
+
{
|
| 891 |
+
"type": "image",
|
| 892 |
+
"img_path": "images/160727c87f1d00e22b57fa517217be390f4c3dc9fdf1463d602948496e5d429d.jpg",
|
| 893 |
+
"image_caption": [],
|
| 894 |
+
"image_footnote": [],
|
| 895 |
+
"bbox": [
|
| 896 |
+
745,
|
| 897 |
+
88,
|
| 898 |
+
903,
|
| 899 |
+
265
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 5
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "annotated for these scenes.",
|
| 906 |
+
"bbox": [
|
| 907 |
+
513,
|
| 908 |
+
316,
|
| 909 |
+
692,
|
| 910 |
+
330
|
| 911 |
+
],
|
| 912 |
+
"page_idx": 5
|
| 913 |
+
},
|
| 914 |
+
{
|
| 915 |
+
"type": "text",
|
| 916 |
+
"text": "Metrics. To evaluate open-vocabulary functional 3D scene graphs effectively, a new quantitative metric is essential. Existing approaches, such as ConceptGraph [27], rely on subjective human assessments, while Open3DSG [41] approaches evaluation as a label retrieval task, assuming all ground-truth nodes are known, an assumption that diverges from our real-world setting. To address this, we extend the Open3DSG Recall@K metric [41] with a node detection component, using spatial overlap between predicted and ground-truth nodes, inspired by evaluation techniques on 2D scene graph generation [50, 87-89, 106]. More specifically, our evaluation metric comprises two Recall@K scores: one for nodes, i.e., $\\mathcal{O}$ and $\\mathcal{I}$ , and one for triplets, i.e., $(\\mathcal{O},\\mathcal{I},\\mathcal{R})$ . For node evaluation, we preprocess all ground-truth labels to enable top-K retrieval, following Open3DSG [41]. A retrieval is considered successful if a ground-truth node has a non-zero 3D IoU with a predicted node and the ground-truth label ranks within the top-K retrievals based on cosine similarity of CLIP embeddings [60] with the predicted label. We calculate overall node recall as $R_{no} = \\frac{n_{no}^{re}}{n_{no}}$ , where $n_{no}^{re}$ is the number of successfully retrieved ground-truth nodes, and $n_{no}$ is the total count of ground-truth nodes. Additionally, we assess recall for object and interactive element nodes separately, denoted as $R_{o} = \\frac{n_{o}^{re}}{n_{o}}$ and $R_{ie} = \\frac{n_{ie}^{re}}{n_{ie}}$ , where $n_{o}^{re}$ and $n_{ie}^{re}$ are the counts of correctly retrieved objects and interactive elements and $n_{o}$ and $n_{ie}$ are their respective totals. For triplet $(\\mathcal{O},\\mathcal{I},\\mathcal{R})$ evaluation, we apply stricter criteria: a ground-truth triplet is successfully retrieved in the top-K only when all its components $\\mathcal{O},\\mathcal{I}$ and $\\mathcal{R}$ are individually retrieved within the top-K. The retrieval process for $\\mathcal{O}$ and $\\mathcal{I}$ follows the same approach as above. To handle $\\mathcal{R}$ , we preprocess all relationship annotations by generating BERT embeddings [20], an approach effective for open-vocabulary predicates [41]. Successful retrieval is based on cosine similarity between ground-truth and predicted BERT embeddings. Triplet recall is defined as $R_{tr} = \\frac{n_{re}}{n_{tr}}$ , where $n_{re}$ is",
|
| 917 |
+
"bbox": [
|
| 918 |
+
511,
|
| 919 |
+
338,
|
| 920 |
+
906,
|
| 921 |
+
904
|
| 922 |
+
],
|
| 923 |
+
"page_idx": 5
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"type": "table",
|
| 927 |
+
"img_path": "images/0de70a358429082a7730ac24b0c03fd1cda1f818d928bc00cbe7aa06e06b1419.jpg",
|
| 928 |
+
"table_caption": [],
|
| 929 |
+
"table_footnote": [],
|
| 930 |
+
"table_body": "<table><tr><td rowspan=\"3\">Methods</td><td colspan=\"6\">SceneFun3D [17]</td><td colspan=\"6\">FunGraph3D (Ours)</td></tr><tr><td colspan=\"2\">Objects</td><td colspan=\"2\">Inter. Elements</td><td colspan=\"2\">Overall Nodes</td><td colspan=\"2\">Objects</td><td colspan=\"2\">Inter. Elements</td><td colspan=\"2\">Overall Nodes</td></tr><tr><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td></tr><tr><td>Open3DSG* [41]</td><td>61.2</td><td>70.7</td><td>54.4</td><td>61.8</td><td>56.7</td><td>64.7</td><td>50.9</td><td>58.1</td><td>21.8</td><td>33.9</td><td>33.4</td><td>43.6</td></tr><tr><td>Open3DSG*† [41]</td><td>42.9</td><td>50.0</td><td>33.8</td><td>38.3</td><td>37.4</td><td>43.0</td><td>30.9</td><td>44.1</td><td>13.0</td><td>19.6</td><td>20.2</td><td>29.4</td></tr><tr><td>ConceptGraph* [27]</td><td>71.3</td><td>77.1</td><td>6.6</td><td>8.6</td><td>28.3</td><td>31.4</td><td>58.0</td><td>66.3</td><td>2.5</td><td>4.1</td><td>20.1</td><td>25.2</td></tr><tr><td>ConceptGraph* [27] + IED</td><td>71.3</td><td>77.1</td><td>53.1</td><td>59.5</td><td>60.1</td><td>66.0</td><td>58.0</td><td>66.3</td><td>20.5</td><td>33.4</td><td>38.9</td><td>45.0</td></tr><tr><td>OpenFunGraph (Ours)</td><td>81.8</td><td>87.8</td><td>71.0</td><td>79.5</td><td>73.0</td><td>82.8</td><td>70.7</td><td>79.1</td><td>44.4</td><td>57.6</td><td>55.5</td><td>65.8</td></tr></table>",
|
| 931 |
+
"bbox": [
|
| 932 |
+
94,
|
| 933 |
+
88,
|
| 934 |
+
903,
|
| 935 |
+
229
|
| 936 |
+
],
|
| 937 |
+
"page_idx": 6
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "text",
|
| 941 |
+
"text": "Tab. 1. Node evaluation on the SceneFun3D [17] and FunGraph3D datasets. * means to adapt the LLM prompts used for functional relationships inference. IED refers to the interactive element candidate detection in Section 4.1. † refers to the usage of the OpenFunGraph's fused 3D nodes rather than the ground-truth for fair comparison.",
|
| 942 |
+
"bbox": [
|
| 943 |
+
89,
|
| 944 |
+
234,
|
| 945 |
+
906,
|
| 946 |
+
277
|
| 947 |
+
],
|
| 948 |
+
"page_idx": 6
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "text",
|
| 952 |
+
"text": "the count of retrieved triplets, and $n_{tr}$ is the total count of ground-truth. We decompose triplet evaluation into node association $(R_{na} = \\frac{n_{na}}{n_{tr}}$ , with $n_{na}$ being the number of triplets retrieved only considering $\\mathcal{O}, \\mathcal{I}$ ), indicating node recognition, and edge prediction $(R_{ep} = \\frac{n_{re}}{n_{na}})$ , showing relationship inference given correct node associations.",
|
| 953 |
+
"bbox": [
|
| 954 |
+
88,
|
| 955 |
+
290,
|
| 956 |
+
480,
|
| 957 |
+
381
|
| 958 |
+
],
|
| 959 |
+
"page_idx": 6
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "text",
|
| 963 |
+
"text": "State-of-the-art comparisons. We compare our approach against ConceptGraph [27] and Open3DSG [41]-based baselines. Two ConceptGraph-based baselines are implemented: ConceptGraph* modifies the original LLM prompts to infer functional relationships, rather than focusing on spatial relationships such as in or on. ConceptGraph* + IED further incorporates the proposed interactive element candidate detection (IED) from Section 4.1, addressing ConceptGraph's initial limitation in detecting small parts. Both baselines use LLAVA v1.6 and GPT-4 for fair comparison with OpenFunGraph. We also reimplement two Open3DSG-based baselines. Open3DSG* modifies the LLM prompts to output functional relationships instead of spatial relationships. Since Open3DSG baselines rely on ground-truth node instance segmentation for graph neural network inference, we implement Open3DSG*, which uses OpenFunGraph's fused 3D nodes for fair comparison. We report Recall@3 and Recall@10 for node metrics, and Recall@5 and Recall@10 for triplet metrics.",
|
| 964 |
+
"bbox": [
|
| 965 |
+
91,
|
| 966 |
+
386,
|
| 967 |
+
483,
|
| 968 |
+
674
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 6
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"text": "6.2. Results",
|
| 975 |
+
"text_level": 1,
|
| 976 |
+
"bbox": [
|
| 977 |
+
89,
|
| 978 |
+
688,
|
| 979 |
+
184,
|
| 980 |
+
702
|
| 981 |
+
],
|
| 982 |
+
"page_idx": 6
|
| 983 |
+
},
|
| 984 |
+
{
|
| 985 |
+
"type": "text",
|
| 986 |
+
"text": "Quantitative results are presented in Table 1 and 2. Overall, the FunGraph3D dataset poses a greater challenge than SceneFun3D [17] due to its more complex scenes, which contain a higher number of objects and interactive elements.",
|
| 987 |
+
"bbox": [
|
| 988 |
+
89,
|
| 989 |
+
713,
|
| 990 |
+
482,
|
| 991 |
+
773
|
| 992 |
+
],
|
| 993 |
+
"page_idx": 6
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"type": "text",
|
| 997 |
+
"text": "Node evaluation. As shown in Table 1, OpenFunGraph surpasses ConceptGraph* [27] by $160\\%$ on SceneFun3D and by $176\\%$ in R@3 on FunGraph3D. ConceptGraph* primarily focuses on object perception, resulting in poor recall scores for interactive elements. With the added interactive element candidate detection (IED), ConceptGraph* + IED improves node recognition, but still falls short of OpenFunGraph by $22\\%$ in R@3 on SceneFun3D, and $43\\%$ in R@3",
|
| 998 |
+
"bbox": [
|
| 999 |
+
88,
|
| 1000 |
+
779,
|
| 1001 |
+
482,
|
| 1002 |
+
901
|
| 1003 |
+
],
|
| 1004 |
+
"page_idx": 6
|
| 1005 |
+
},
|
| 1006 |
+
{
|
| 1007 |
+
"type": "text",
|
| 1008 |
+
"text": "on FunGraph3D, thanks to the specified node description stage proposed in OpenFunGraph. Our approach also outperforms Open3DSG-based baselines, achieving $95\\%$ and $29\\%$ higher scores than Open3DSG*† and Open3DSG* in R@3 on SceneFun3D, and $174\\%$ and $66\\%$ higher on FunGraph3D. The limited ability of Open3DSG-based methods to identify interactive elements arises from their focus on object-level features during training, whereas our approach employs a more practical open-vocabulary inference pipeline, free from these training constraints.",
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
511,
|
| 1011 |
+
290,
|
| 1012 |
+
906,
|
| 1013 |
+
441
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 6
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "text",
|
| 1019 |
+
"text": "Triplet evaluation. Table 2 shows triplet prediction results. On SceneFun3D and FunGraph3D, benefiting from accurate node recognition and the sequential reasoning strategy for functional inference, OpenFunGraph outperforms ConceptGraph* + IED by $76\\%$ and $189\\%$ in R@5, and Open3DSG*† by $179\\%$ and $308\\%$ . Notably, Open3DSG-based baselines struggle with functional relationships, as they rely on spatial edge features from adjacent instances. ConceptGraph-based methods, which prompt the LLM to predict all possible connections, also perform worse when compared to our sequential reasoning strategy due to the increased interpretive complexity imposed on the LLM. Figure 5 visualizes qualitative results for OpenFunGraph. In the left scene, our confidence-aware remote relationship reasoning successfully infers that the light switch is more likely to control the ceiling light rather than the two table light bulbs. In the right scene, the local functional relationship between the handle and the door is accurately identified. Additionally, the fan is most confidently inferred to be powered by the nearby electric outlet.",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
511,
|
| 1022 |
+
444,
|
| 1023 |
+
908,
|
| 1024 |
+
748
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 6
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "6.3. Ablation studies",
|
| 1031 |
+
"text_level": 1,
|
| 1032 |
+
"bbox": [
|
| 1033 |
+
511,
|
| 1034 |
+
756,
|
| 1035 |
+
671,
|
| 1036 |
+
770
|
| 1037 |
+
],
|
| 1038 |
+
"page_idx": 6
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "text",
|
| 1042 |
+
"text": "We ablate three key modules in our pipeline, i.e., the GroundingDINO prompts for interactive element candidate detection, sequential reasoning, and confidence-aware remote relationship reasoning, presented in Table 3. The prompting strategy for GroundingDINO, which combines assistive object and element tags, proves effective. Using only element tags reduces node R@3 by $19\\%$ and $10\\%$ , as well as triplet R@5 by $20\\%$ and $22\\%$ on the two",
|
| 1043 |
+
"bbox": [
|
| 1044 |
+
511,
|
| 1045 |
+
779,
|
| 1046 |
+
908,
|
| 1047 |
+
902
|
| 1048 |
+
],
|
| 1049 |
+
"page_idx": 6
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "table",
|
| 1053 |
+
"img_path": "images/8e6d79b6ec3c876cf77abe515e9ef48596192f717f5249264be1ddacb3c47981.jpg",
|
| 1054 |
+
"table_caption": [],
|
| 1055 |
+
"table_footnote": [],
|
| 1056 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"6\">SceneFun3D [17]</td><td colspan=\"6\">FunGraph3D (Ours)</td></tr><tr><td>Node R@5</td><td>Assoc. R@10</td><td>Edge Pred. R@5</td><td>Pred. R@10</td><td>Overall R@5</td><td>Triplets R@10</td><td>Node R@5</td><td>Assoc. R@10</td><td>Edge Pred. R@5</td><td>Pred. R@10</td><td>Overall R@5</td><td>Triplets R@10</td></tr><tr><td>Open3DSG* [41]</td><td>47.2</td><td>58.0</td><td>69.2</td><td>78.8</td><td>32.7</td><td>45.7</td><td>22.8</td><td>36.7</td><td>47.9</td><td>55.9</td><td>10.5</td><td>20.0</td></tr><tr><td>Open3DSG*† [41]</td><td>33.6</td><td>38.8</td><td>64.4</td><td>72.3</td><td>21.6</td><td>28.1</td><td>15.7</td><td>24.2</td><td>46.6</td><td>55.7</td><td>7.3</td><td>13.5</td></tr><tr><td>ConceptGraph* [27]</td><td>5.6</td><td>6.8</td><td>80.2</td><td>95.0</td><td>4.7</td><td>6.4</td><td>1.9</td><td>2.8</td><td>51.5</td><td>84.6</td><td>1.1</td><td>2.5</td></tr><tr><td>ConceptGraph* [27] + IED</td><td>45.4</td><td>49.3</td><td>75.6</td><td>90.9</td><td>34.3</td><td>44.5</td><td>18.8</td><td>22.8</td><td>46.1</td><td>79.7</td><td>10.3</td><td>18.9</td></tr><tr><td>OpenFunGraph (Ours)</td><td>68.3</td><td>73.0</td><td>88.1</td><td>96.2</td><td>60.4</td><td>70.3</td><td>45.8</td><td>49.3</td><td>65.1</td><td>91.4</td><td>29.8</td><td>45.0</td></tr></table>",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
94,
|
| 1059 |
+
88,
|
| 1060 |
+
903,
|
| 1061 |
+
232
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 7
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "image",
|
| 1067 |
+
"img_path": "images/9013e9bd5a461892dcf82cf0bb9acda8e96b4d478a975784354a48a14719121d.jpg",
|
| 1068 |
+
"image_caption": [
|
| 1069 |
+
"Fig. 6. Functional 3D Scene Graphs for Robotic Manipulation. Left: 3D scene and functional graph generated after querying 'turning on the light.' Right: Robot interacting with scene elements as guided by the functional scene graph."
|
| 1070 |
+
],
|
| 1071 |
+
"image_footnote": [],
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
93,
|
| 1074 |
+
273,
|
| 1075 |
+
256,
|
| 1076 |
+
404
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 7
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "image",
|
| 1082 |
+
"img_path": "images/098dfd7bbece67545b245a30b7a4074163550ddc868acef1fdf4928ca88667a6.jpg",
|
| 1083 |
+
"image_caption": [],
|
| 1084 |
+
"image_footnote": [],
|
| 1085 |
+
"bbox": [
|
| 1086 |
+
259,
|
| 1087 |
+
275,
|
| 1088 |
+
480,
|
| 1089 |
+
404
|
| 1090 |
+
],
|
| 1091 |
+
"page_idx": 7
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "text",
|
| 1095 |
+
"text": "datasets respectively, due to incomplete detections. Replacing sequential reasoning with a direct approach, where the LLM infers functional relationships across all nodes, significantly reduces triplet reasoning performance (42% and 32% in triplet R@5 on SceneFun3D and FunGraph3D respectively). Sequential reasoning decomposes complex relationships into distinct types, making LLM processing easier. Ablating confidence-aware remote relationship reasoning by randomly selecting connections, instead of using the highest-confident edge (e.g., choosing a random light for the switch instead of the most confident ceiling light), leads to a decrease in triplet R@5 by 7% and 11% on the two datasets respectively. This illustrates more reasonable edges are selected correctly in our mechanism by incorporating the common sense understanding of the foundation models.",
|
| 1096 |
+
"bbox": [
|
| 1097 |
+
88,
|
| 1098 |
+
479,
|
| 1099 |
+
482,
|
| 1100 |
+
705
|
| 1101 |
+
],
|
| 1102 |
+
"page_idx": 7
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "text",
|
| 1106 |
+
"text": "6.4. Downstream Applications",
|
| 1107 |
+
"text_level": 1,
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
89,
|
| 1110 |
+
718,
|
| 1111 |
+
326,
|
| 1112 |
+
734
|
| 1113 |
+
],
|
| 1114 |
+
"page_idx": 7
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"type": "text",
|
| 1118 |
+
"text": "We showcase the versatility of the proposed functional 3D scene graph representation in downstream applications that require complex reasoning about indoor functionalities and task-oriented interactions.",
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
89,
|
| 1121 |
+
741,
|
| 1122 |
+
482,
|
| 1123 |
+
801
|
| 1124 |
+
],
|
| 1125 |
+
"page_idx": 7
|
| 1126 |
+
},
|
| 1127 |
+
{
|
| 1128 |
+
"type": "text",
|
| 1129 |
+
"text": "3D inventory question answering. To enable functional reasoning, we convert the graph structure into a JSON list that the LLM can easily query. With this list, the LLM can answer questions such as \"How can I turn on the ceiling light?\" Using the functional 3D scene graph's nodes (objects, interactive elements) and edges (functional relation",
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
88,
|
| 1132 |
+
809,
|
| 1133 |
+
482,
|
| 1134 |
+
901
|
| 1135 |
+
],
|
| 1136 |
+
"page_idx": 7
|
| 1137 |
+
},
|
| 1138 |
+
{
|
| 1139 |
+
"type": "table",
|
| 1140 |
+
"img_path": "images/42db5fb8d73481b70e595cfbf684bfe7ec3c5aee232864fd732845b0e0b8da10.jpg",
|
| 1141 |
+
"table_caption": [
|
| 1142 |
+
"Tab. 2. Triplet evaluation on the SceneFun3D [17] and FunGraph3D datasets. All marks keep the same meaning with Table 1. Node Assoc. refers to the node association metric while Edge Pred. means the edge prediction metric."
|
| 1143 |
+
],
|
| 1144 |
+
"table_footnote": [],
|
| 1145 |
+
"table_body": "<table><tr><td rowspan=\"2\">Experiments</td><td colspan=\"2\">Overall Nodes</td><td colspan=\"2\">Overall Triplets</td></tr><tr><td>R@3</td><td>R@10</td><td>R@5</td><td>R@10</td></tr><tr><td>w/o prompts for element detection</td><td>59.3</td><td>68.7</td><td>48.3</td><td>59.9</td></tr><tr><td>w/o sequential edge reasoning*</td><td>73.0</td><td>82.8</td><td>34.8</td><td>48.9</td></tr><tr><td>w/o confidence-aware edge reasoning*</td><td>73.0</td><td>82.8</td><td>56.0</td><td>65.1</td></tr><tr><td>Ours</td><td>73.0</td><td>82.8</td><td>60.4</td><td>70.3</td></tr><tr><td>w/o prompts for element detection</td><td>49.9</td><td>59.1</td><td>23.1</td><td>37.6</td></tr><tr><td>w/o sequential edge reasoning*</td><td>55.5</td><td>65.8</td><td>20.2</td><td>33.8</td></tr><tr><td>w/o confidence-aware edge reasoning*</td><td>55.5</td><td>65.8</td><td>26.8</td><td>40.1</td></tr><tr><td>Ours</td><td>55.5</td><td>65.8</td><td>29.8</td><td>45.0</td></tr></table>",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
517,
|
| 1148 |
+
273,
|
| 1149 |
+
903,
|
| 1150 |
+
388
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 7
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "text",
|
| 1156 |
+
"text": "Tab. 3. Ablation study on SceneFun3D [17] (Top) and our FunGraph3D (Bottom). Note that edge reasoning $(^{*})$ impacts only the triplet metric and does not affect node recognition performance.",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
511,
|
| 1159 |
+
391,
|
| 1160 |
+
903,
|
| 1161 |
+
434
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 7
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "ships), the LLM can provide responses such as \"You can turn on the ceiling light using the light switch plate located at position [0.611, 0.113, 0.732]. From the provided JSON list, we can see the light switch plate with id 0 has the highest confidence level of 0.8 with the ceiling light fixture.\"",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
511,
|
| 1170 |
+
448,
|
| 1171 |
+
903,
|
| 1172 |
+
523
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 7
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "text",
|
| 1178 |
+
"text": "Robotic manipulation. The functional 3D scene graph also supports robotic manipulation [43, 108] for user queries that involve functional reasoning, as illustrated in Figure 6. Similar to inventory question answering, the LLM queries the JSON list to locate the interactive element referenced in the query. The robot then navigates to and interacts with the element using the methods described in [43].",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
511,
|
| 1181 |
+
530,
|
| 1182 |
+
905,
|
| 1183 |
+
636
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 7
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "text",
|
| 1189 |
+
"text": "7. Conclusion",
|
| 1190 |
+
"text_level": 1,
|
| 1191 |
+
"bbox": [
|
| 1192 |
+
511,
|
| 1193 |
+
648,
|
| 1194 |
+
633,
|
| 1195 |
+
666
|
| 1196 |
+
],
|
| 1197 |
+
"page_idx": 7
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "text",
|
| 1201 |
+
"text": "We introduce Functional 3D Scene Graphs, a novel representation that jointly models objects, interactive elements, and their functional relationships in 3D indoor environments. Our open-vocabulary pipeline leverages the common-sense knowledge of foundation models to infer functional 3D scene graphs and enable flexible querying. To support systematic benchmarking, we develop a high-fidelity dataset of real-world 3D indoor environments with multi-modal data and functional annotations. Experiments on this and existing datasets show that our method significantly outperforms baselines. We further demonstrate the versatility of our representation for downstream tasks such as 3D question answering and robotic manipulation.",
|
| 1202 |
+
"bbox": [
|
| 1203 |
+
511,
|
| 1204 |
+
674,
|
| 1205 |
+
906,
|
| 1206 |
+
871
|
| 1207 |
+
],
|
| 1208 |
+
"page_idx": 7
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"type": "text",
|
| 1212 |
+
"text": "Acknowledgments. We would like to thank colleagues and friends who helped us capture the data of FunGraph3D: Christine Engelmann, Dominik Faerber, Elisabetta Fedele, Xudong Jiang, Xin Kong, Aoxue Liu and Houssam Naous. This work was supported by the Swiss National Science Foundation Advanced Grant 216260: \"Beyond Frozen Worlds: Capturing Functional 3D Digital Twins from the Real World\". AD is supported by the Max Planck ETH Center for Learning Systems (CLS) and FE by an SNSF PostDoc.Mobility Fellowship.",
|
| 1213 |
+
"bbox": [
|
| 1214 |
+
89,
|
| 1215 |
+
90,
|
| 1216 |
+
485,
|
| 1217 |
+
233
|
| 1218 |
+
],
|
| 1219 |
+
"page_idx": 8
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "text",
|
| 1223 |
+
"text": "References",
|
| 1224 |
+
"text_level": 1,
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
91,
|
| 1227 |
+
257,
|
| 1228 |
+
187,
|
| 1229 |
+
273
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 8
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "list",
|
| 1235 |
+
"sub_type": "ref_text",
|
| 1236 |
+
"list_items": [
|
| 1237 |
+
"[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3, 4",
|
| 1238 |
+
"[2] Christopher Agia, Krishna Murthy Jatavallabhula, Mohamed Khodeir, Ondrej Miksik, Vibhav Vineet, Mustafa Mukadam, Liam Paull, and Florian Shkurti. Taskography: Evaluating robot task planning over large 3d scene graphs. In Conference on Robot Learning (CoRL), 2022. 1",
|
| 1239 |
+
"[3] Iro Armeni, Ozan Sener, Amir R Zamir, Helen Jiang, Ioannis Brilakis, Martin Fischer, and Silvio Savarese. 3d semantic parsing of large-scale indoor spaces. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2",
|
| 1240 |
+
"[4] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2",
|
| 1241 |
+
"[5] Matan Atzmon, Haggai Maron, and Yaron Lipman. Point convolutional neural networks by extension operators. ACM Transactions On Graphics (TOG), 2018. 2",
|
| 1242 |
+
"[6] Prithviraj Banerjee, Sindi Shkodrani, Pierre Moulon, Shreyas Hampali, Fan Zhang, Jade Fountain, Edward Miller, Selen Basol, Richard Newcombe, Robert Wang, et al. Introducing hot3d: An egocentric dataset for 3d hand and object tracking. arXiv preprint arXiv:2406.09598, 2024. 2",
|
| 1243 |
+
"[7] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, et al. ARKitScenes: A diverse real-world dataset for 3d indoor scene understanding using mobile RGB-D data. In International Conference on Neural Information Processing Systems (NeurIPS), 2021. 2, 5",
|
| 1244 |
+
"[8] Valentin Bieri, Marco Zamboni, Nicolas S. Blumer, Qingxuan Chen, and Francis Engelmann. OpenCity3D: 3D Urban Scene Understanding with Vision-Language Models. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2025. 2",
|
| 1245 |
+
"[9] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al."
|
| 1246 |
+
],
|
| 1247 |
+
"bbox": [
|
| 1248 |
+
107,
|
| 1249 |
+
282,
|
| 1250 |
+
480,
|
| 1251 |
+
898
|
| 1252 |
+
],
|
| 1253 |
+
"page_idx": 8
|
| 1254 |
+
},
|
| 1255 |
+
{
|
| 1256 |
+
"type": "list",
|
| 1257 |
+
"sub_type": "ref_text",
|
| 1258 |
+
"list_items": [
|
| 1259 |
+
"On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.3",
|
| 1260 |
+
"[10] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. International Conference on 3d Vision (3dV), 2017. 2",
|
| 1261 |
+
"[11] Lianggangxu Chen, Xuejiao Wang, Jiale Lu, Shaohui Lin, Changbo Wang, and Gaoqi He. Clip-driven open-vocabulary 3d scene graph generation via cross-modality contrastive learning. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1",
|
| 1262 |
+
"[12] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In European Conference on Computer Vision (ECCV), 2022. 2",
|
| 1263 |
+
"[13] Woojin Cho, Jihyun Lee, Minjae Yi, Minje Kim, Taeyun Woo, Donghwan Kim, Taewook Ha, Hyokeun Lee, Je-Hwan Ryu, Woontack Woo, et al. Dense hand-object (ho) grapnet with full grasping taxonomy and dynamics. European Conference on Computer Vision (ECCV), 2024. 2",
|
| 1264 |
+
"[14] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2",
|
| 1265 |
+
"[15] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2",
|
| 1266 |
+
"[16] Alexandros Delitzas, Maria Parelli, Nikolas Hars, Georgios Vlassis, Sotirios-Konstantinos Anagnostidis, Gregor Bachmann, and Thomas Hofmann. Multi-clip: Contrastive vision-language pre-training for question answering tasks in 3d scenes. In British Machine Vision Conference (BMVC), 2023. 2",
|
| 1267 |
+
"[17] Alexandros Delitzas, Ayca Takmaz, Federico Tombari, Robert Sumner, Marc Pollefeys, and Francis Engelmann. Scenefun3d: Fine-grained functionality and affordance understanding in 3d scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1, 2, 3, 5, 6, 7, 8",
|
| 1268 |
+
"[18] Shengheng Deng, Xun Xu, Chaozheng Wu, Ke Chen, and Kui Jia. 3d affordancenet: A benchmark for visual object affordance understanding. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2",
|
| 1269 |
+
"[19] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In International Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2018. 6",
|
| 1270 |
+
"[20] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, 2019. 6",
|
| 1271 |
+
"[21] Helisa Dhamo, Fabian Manhardt, Nassir Navab, and Federico Tombari. Graph-to-3d: End-to-end generation and"
|
| 1272 |
+
],
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
522,
|
| 1275 |
+
92,
|
| 1276 |
+
903,
|
| 1277 |
+
898
|
| 1278 |
+
],
|
| 1279 |
+
"page_idx": 8
|
| 1280 |
+
},
|
| 1281 |
+
{
|
| 1282 |
+
"type": "list",
|
| 1283 |
+
"sub_type": "ref_text",
|
| 1284 |
+
"list_items": [
|
| 1285 |
+
"manipulation of 3d scenes using scene graphs. In International Conference on Computer Vision (ICCV), 2021. 1, 2",
|
| 1286 |
+
"[22] Thanh-Toan Do, Anh Nguyen, and Ian Reid. Affordancenet: An end-to-end deep learning approach for object affordance detection. In International Conference on Robotics and Automation (ICRA), 2018. 2",
|
| 1287 |
+
"[23] Francis Engelmann, Martin Bokeloh, Alireza Fathi, Bastian Leibe, and Matthias Nießner. 3d-mpa: Multi-proposal aggregation for 3d semantic instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2",
|
| 1288 |
+
"[24] Francis Engelmann, Fabian Manhardt, Michael Niemeyer, Keisuke Tateno, Marc Pollefeys, and Federico Tombari. Opennerf: Open set 3d neural scene segmentation with pixel-wise features and rendered novel views. International Conference on Learning Representations (ICLR), 2024. 2",
|
| 1289 |
+
"[25] Zicong Fan, Maria Parelli, Maria Eleni Kadoglou, Xu Chen, Muhammed Kocabas, Michael J Black, and Otmar Hilliges. Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2",
|
| 1290 |
+
"[26] Kuan Fang, Te-Lin Wu, Daniel Yang, Silvio Savarese, and Joseph J Lim. Demo2vec: Reasoning object affordances from online videos. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2",
|
| 1291 |
+
"[27] Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. ConceptGraphs: Open-vocabulary 3d scene graphs for perception and planning. In International Conference on Robotics and Automation (ICRA), 2024. 1, 3, 4, 6, 7, 8",
|
| 1292 |
+
"[28] Lei Han, Tian Zheng, Lan Xu, and Lu Fang. Occuseg: Occupancy-aware 3d instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2",
|
| 1293 |
+
"[29] Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2",
|
| 1294 |
+
"[30] Joy Hsu, Jiayuan Mao, and Jiajun Wu. Ns3d: Neurosymbolic grounding of 3d objects and relations. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2",
|
| 1295 |
+
"[31] Zeyu Hu, Xuyang Bai, Jiaxiang Shang, Runze Zhang, Jiayu Dong, Xin Wang, Guangyuan Sun, Hongbo Fu, and Chiew-Lan Tai. Vmnet: Voxel-mesh network for geodesic-aware 3d semantic segmentation. In International Conference on Computer Vision (ICCV), 2021. 2",
|
| 1296 |
+
"[32] Binh-Son Hua, Minh-Khoi Tran, and Sai-Kit Yeung. Pointwise convolutional neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018.",
|
| 1297 |
+
"[33] Rui Huang, Songyou Peng, Ayca Takmaz, Federico Tombari, Marc Pollefeys, Shiji Song, Gao Huang, and Francis Engelmann. Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. European Conference on Computer Vision (ECCV), 2024. 2"
|
| 1298 |
+
],
|
| 1299 |
+
"bbox": [
|
| 1300 |
+
99,
|
| 1301 |
+
92,
|
| 1302 |
+
482,
|
| 1303 |
+
898
|
| 1304 |
+
],
|
| 1305 |
+
"page_idx": 9
|
| 1306 |
+
},
|
| 1307 |
+
{
|
| 1308 |
+
"type": "list",
|
| 1309 |
+
"sub_type": "ref_text",
|
| 1310 |
+
"list_items": [
|
| 1311 |
+
"[34] Shijia Huang, Yilun Chen, Jiaya Jia, and Liwei Wang. Multi-view transformer for 3d visual grounding. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2",
|
| 1312 |
+
"[35] Xinyu Huang, Yi-Jie Huang, Youcai Zhang, Weiwei Tian, Rui Feng, Yuejie Zhang, Yanchun Xie, Yaqian Li, and Lei Zhang. Open-set image tagging with multi-grained text supervision. arXiv e-prints, 2023. 4",
|
| 1313 |
+
"[36] Krishna Murthy Jatavallabhula, Alihusein Kuwajerwala, Qiao Gu, Mohd Omama, Tao Chen, Alaa Maalouf, Shuang Li, Ganesh Iyer, Soroush Saryazdi, Nikhil Keetha, et al. Conceptfusion: Open-set multimodal 3d mapping. ICRA2023 Workshop on Pretraining for Robotics (PT4R), 2023. 2",
|
| 1314 |
+
"[37] Guangda Ji, Silvan Weder, Francis Engelmann, Marc Pollefeys, and Hermann Blum. Arkit labelmaker: A new scale for indoor 3d scene understanding. International Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2",
|
| 1315 |
+
"[38] Li Jiang, Hengshuang Zhao, Shaoshuai Shi, Shu Liu, ChiWing Fu, and Jiaya Jia. Pointgroup: Dual-set point grouping for 3d instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2",
|
| 1316 |
+
"[39] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Leref: Language embedded radiance fields. In International Conference on Computer Vision (ICCV), 2023. 2, 4",
|
| 1317 |
+
"[40] Sebastian Koch, Pedro Hermosilla, Narunas Vaskevicius, Mirco Colosi, and Timo Ropinski. Lang3dsg: Language-based contrastive pre-training for 3d scene graph prediction. In International Conference on 3d Vision (3dV), 2024. 1, 2",
|
| 1318 |
+
"[41] Sebastian Koch, Narunas Vaskevicius, Mirco Colosi, Pedro Hermosilla, and Timo Ropinski. Open3design: Open-vocabulary 3d scene graphs from point clouds with queryable objects and open-set relationships. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1, 2, 3, 6, 7, 8",
|
| 1319 |
+
"[42] Loic Landrieu and Martin Simonovsky. Large-scale point cloud semantic segmentation with superpoint graphs. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2",
|
| 1320 |
+
"[43] Oliver Lemke, Zuria Bauer, René Zurbrugg, Marc Pollefeys, Francis Engelmann, and Hermann Blum. Spot-Compose: A framework for open-vocabulary object retrieval and drawer manipulation in point clouds. In International Conference on Robotics and Automation (ICRA), 2024. 8",
|
| 1321 |
+
"[44] Qi Li, Kaichun Mo, Yanchao Yang, Hang Zhao, and Leonidas Guibas. IFR-Explore: Learning inter-object functional relationships in 3d indoor scenes. International Conference on Learning Representations (ICLR), 2022. 2",
|
| 1322 |
+
"[45] Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointconn: Convolution on x-transformed points. International Conference on Neural Information Processing Systems (NeurIPS), 2018. 2"
|
| 1323 |
+
],
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
522,
|
| 1326 |
+
92,
|
| 1327 |
+
903,
|
| 1328 |
+
900
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 9
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "list",
|
| 1334 |
+
"sub_type": "ref_text",
|
| 1335 |
+
"list_items": [
|
| 1336 |
+
"[46] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In International Conference on Neural Information Processing Systems (NeurIPS), 2023. 4",
|
| 1337 |
+
"[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024.",
|
| 1338 |
+
"[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,4",
|
| 1339 |
+
"[49] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. European Conference on Computer Vision (ECCV), 2024. 3, 4",
|
| 1340 |
+
"[50] Cewu Lu, Ranjay Krishna, Michael Bernstein, and Li Fei-Fei. Visual relationship detection with language priors. In European Conference on Computer Vision (ECCV), 2016. 6",
|
| 1341 |
+
"[51] Yang Miao, Francis Engelmann, Olga Vysotska, Federico Tombari, Marc Pollefeys, and Daniel Béla Baráth. SceneGraphLoc: Cross-modal coarse visual localization on 3d scene graphs. In European Conference on Computer Vision (ECCV), 2024. 1",
|
| 1342 |
+
"[52] Kaichun Mo, Yuzhe Qin, Fanbo Xiang, Hao Su, and Leonidas Guibas. O2o-afford: Annotation-free large-scale object-object affordance learning. In Conference on Robot Learning (CoRL), 2022. 2",
|
| 1343 |
+
"[53] Tushar Nagarajan and Kristen Grauman. Learning affordance landscapes for interaction exploration in 3d environments. International Conference on Neural Information Processing Systems (NeurIPS), 2020. 2",
|
| 1344 |
+
"[54] Tushar Nagarajan, Christoph Feichtenhofer, and Kristen Grauman. Grounded human-object interaction hotspots from video. In International Conference on Computer Vision (ICCV), 2019. 2",
|
| 1345 |
+
"[55] Maria Parelli, Alexandros Delitzas, Nikolas Hars, Georgios Vlassis, Sotirios Anagnostidis, Gregor Bachmann, and Thomas Hofmann. CLIP-Guided Vision-Language Pre-Training for Question Answering in 3D Scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2023. 2",
|
| 1346 |
+
"[56] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2",
|
| 1347 |
+
"[57] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2",
|
| 1348 |
+
"[58] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. International Conference on Neural Information Processing Systems (NeurIPS), 2017. 2"
|
| 1349 |
+
],
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
99,
|
| 1352 |
+
90,
|
| 1353 |
+
485,
|
| 1354 |
+
900
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 10
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "list",
|
| 1360 |
+
"sub_type": "ref_text",
|
| 1361 |
+
"list_items": [
|
| 1362 |
+
"[59] Minghan Qin, Wanhua Li, Jiawei Zhou, Haoqian Wang, and Hanspeter Pfister. Langsplat: 3d language gaussian splatting. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2",
|
| 1363 |
+
"[60] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), 2021. 2, 6",
|
| 1364 |
+
"[61] Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. SayPlan: Grounding large language models using 3d scene graphs for scalable robot task planning. In Conference on Robot Learning (CoRL), 2023. 1",
|
| 1365 |
+
"[62] Junha Roh, Karthik Desingh, Ali Farhadi, and Dieter Fox. Languagerefer: Spatial-language model for 3d visual grounding. In Conference on Robot Learning (CoRL), 2022. 2",
|
| 1366 |
+
"[63] Antoni Rosinol, Arjun Gupta, Marcus Abate, Jingnan Shi, and Luca Carlone. 3d dynamic scene graphs: Actionable spatial perception with places, objects, and humans. Robotics, Science and Systems, 2020. 1, 2",
|
| 1367 |
+
"[64] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. International Journal on Robotics Research (IJRR), 2021. 1, 2",
|
| 1368 |
+
"[65] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In European Conference on Computer Vision (ECCV), 2022. 2",
|
| 1369 |
+
"[66] Sayan Deb Sarkar, Ondrej Miksik, Marc Pollefeys, Daniel Barath, and Iro Armeni. SGAligner: 3d scene alignment with scene graphs. In International Conference on Computer Vision (ICCV), 2023. 1",
|
| 1370 |
+
"[67] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. SuperGlue: Learning feature matching with graph neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6",
|
| 1371 |
+
"[68] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-Motion Revisited. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6",
|
| 1372 |
+
"[69] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 6",
|
| 1373 |
+
"[70] Jonas Schult, Francis Engelmann, Alexander Hermans, Or Litany, Siyu Tang, and Bastian Leibe. Mask3d: Mask transformer for 3d semantic instance segmentation. In International Conference on Robotics and Automation (ICRA), 2023. 1, 2",
|
| 1374 |
+
"[71] Aleksandar Shtedritski, Christian Rupprecht, and Andrea Vedaldi. What does Clip Know About a Red Circle? Visual Prompt Engineering for VLMs. In International Conference on Computer Vision (ICCV), 2023. 4"
|
| 1375 |
+
],
|
| 1376 |
+
"bbox": [
|
| 1377 |
+
522,
|
| 1378 |
+
92,
|
| 1379 |
+
905,
|
| 1380 |
+
900
|
| 1381 |
+
],
|
| 1382 |
+
"page_idx": 10
|
| 1383 |
+
},
|
| 1384 |
+
{
|
| 1385 |
+
"type": "list",
|
| 1386 |
+
"sub_type": "ref_text",
|
| 1387 |
+
"list_items": [
|
| 1388 |
+
"[72] Tao Sun, Yan Hao, Shengyu Huang, Silvio Savarese, Konrad Schindler, Marc Pollefeys, and Iro Armeni. Nothing Stands Still: A Spatiotemporal Benchmark on 3D Point Cloud Registration Under Large Geometric and Temporal Change. ISPRS Journal of Photogrammetry and Remote Sensing, 2025. 2",
|
| 1389 |
+
"[73] Ayca Takmaz, Elisabetta Fedele, Robert W Sumner, Marc Pollefeys, Federico Tombari, and Francis Engelmann. Openmask3d: Open-vocabulary 3d instance segmentation. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 2, 4",
|
| 1390 |
+
"[74] Ayca Takmaz, Jonas Schult, Irem Kaftan, Mertcan Akçay, Bastian Leibe, Robert Sumner, Francis Engelmann, and Siyu Tang. 3D Segmentation of Humans in Point Clouds with Synthetic Data. In International Conference on Computer Vision (ICCV), 2023. 2",
|
| 1391 |
+
"[75] Ayca Takmaz, Alexandros Delitzas, Robert W. Sumner, Francis Engelmann, Johanna Wald, and Federico Tombari. Search3D: Hierarchical Open-Vocabulary 3D Segmentation. IEEE Robotics and Automation Letters (RA-L), 2025. 2",
|
| 1392 |
+
"[76] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In International Conference on Computer Vision (ICCV), 2019. 2",
|
| 1393 |
+
"[77] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, and Chang D Yoo. Softgroup for 3d instance segmentation on point clouds. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2",
|
| 1394 |
+
"[78] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3",
|
| 1395 |
+
"[79] Ziqin Wang, Bowen Cheng, Lichen Zhao, Dong Xu, Yang Tang, and Lu Sheng. Vl-sat: Visual-linguistic semantics assisted training for 3d semantic scene graph prediction in point cloud. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2",
|
| 1396 |
+
"[80] Silvan Weder, Francis Engelmann, Johannes L Schonberger, Akihito Seki, Marc Pollefeys, and Martin R Oswald. Alster: A Local Spatio-temporal Expert for Online 3D Semantic Reconstruction. IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2023. 2",
|
| 1397 |
+
"[81] Silvan Weder, Hermann Blum, Francis Engelmann, and Marc Pollefeys. Labelmaker: Automatic semantic label generation from rgb-d trajectories. In International Conference on 3d Vision (3dV), 2024. 2",
|
| 1398 |
+
"[82] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. International Conference on Neural Information Processing Systems (NeurIPS), 2022. 4",
|
| 1399 |
+
"[83] Abdelrhman Werby, Chenguang Huang, Martin Büchner, Abhinav Valada, and Wolfram Burgard. Hierarchical open-vocabulary 3d scene graphs for language-grounded robot"
|
| 1400 |
+
],
|
| 1401 |
+
"bbox": [
|
| 1402 |
+
99,
|
| 1403 |
+
92,
|
| 1404 |
+
482,
|
| 1405 |
+
900
|
| 1406 |
+
],
|
| 1407 |
+
"page_idx": 11
|
| 1408 |
+
},
|
| 1409 |
+
{
|
| 1410 |
+
"type": "list",
|
| 1411 |
+
"sub_type": "ref_text",
|
| 1412 |
+
"list_items": [
|
| 1413 |
+
"navigation. In First Workshop on Vision-Language Models for Navigation and Manipulation at ICRA 2024, 2024. 1",
|
| 1414 |
+
"[84] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. SceneGraphFusion: Incremental 3d scene graph prediction from rgb-d sequences. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2",
|
| 1415 |
+
"[85] Shun-Cheng Wu, Keisuke Tateno, Nassir Navab, and Federico Tombari. Incremental 3d semantic scene graph prediction from rgb sequences. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2",
|
| 1416 |
+
"[86] Chao Xu, Yixin Chen, He Wang, Song-Chun Zhu, Yixin Zhu, and Siyuan Huang. Partafford: Part-level affordance discovery from 3d objects. European Conference on Computer Vision (ECCV) Workshops, 2022. 2",
|
| 1417 |
+
"[87] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 6",
|
| 1418 |
+
"[88] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In European Conference on Computer Vision (ECCV), 2018.",
|
| 1419 |
+
"[89] Jingkang Yang, Yi Zhe Ang, Zujin Guo, Kaiyang Zhou, Wayne Zhang, and Ziwei Liu. Panoptic scene graph generation. In European Conference on Computer Vision (ECCV), 2022. 6",
|
| 1420 |
+
"[90] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In International Conference on Computer Vision (ICCV), 2021. 2",
|
| 1421 |
+
"[91] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3d reconstruction of generic objects in hands. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2",
|
| 1422 |
+
"[92] Yufei Ye, Abhinav Gupta, Kris Kitani, and Shubham Tulsiani. G-hop: Generative hand-object prior for interaction reconstruction and grasp synthesis. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2",
|
| 1423 |
+
"[93] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In International Conference on Computer Vision (ICCV), 2023. 2, 5, 6",
|
| 1424 |
+
"[94] Gonca Yilmaz, Songyou Peng, Marc Pollefeys, Francis Engelmann, and Hermann Blum. OpenDAS: OpenVocabulary Domain Adaptation for 2D and 3D Segmentation. arXiv preprint arXiv:2405.20141, 2024. 2",
|
| 1425 |
+
"[95] Tomoya Yoshida, Shuhei Kurita, Taichi Nishimura, and Shinsuke Mori. Text-driven affordance learning from egocentric vision. arXiv preprint arXiv:2404.02523, 2024. 2",
|
| 1426 |
+
"[96] Yuanwen Yue, Sabarinath Mahadevan, Jonas Schult, Francis Engelmann, Bastian Leibe, Konrad Schindler, and Theodora Kontogianni. Agile3d: Attention guided interactive multi-object 3d segmentation. International Conference on Learning Representations (ICLR), 2024. 2",
|
| 1427 |
+
"[97] Guangyao Zhai, Evin Pinar Örnek, Shun-Cheng Wu, Yan Di, Federico Tombari, Nassir Navab, and Benjamin"
|
| 1428 |
+
],
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
524,
|
| 1431 |
+
92,
|
| 1432 |
+
903,
|
| 1433 |
+
900
|
| 1434 |
+
],
|
| 1435 |
+
"page_idx": 11
|
| 1436 |
+
},
|
| 1437 |
+
{
|
| 1438 |
+
"type": "list",
|
| 1439 |
+
"sub_type": "ref_text",
|
| 1440 |
+
"list_items": [
|
| 1441 |
+
"Busam. Commonsscenes: Generating commonsense 3d indoor scenes with scene graphs. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 1, 2",
|
| 1442 |
+
"[98] Wei Zhai, Hongchen Luo, Jing Zhang, Yang Cao, and Dacheng Tao. One-shot object affordance detection in the wild. International Journal on Computer Vision (IJCV), 2022. 2",
|
| 1443 |
+
"[99] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2",
|
| 1444 |
+
"[100] Chenyangguang Zhang, Yan Di, Ruida Zhang, Guangyao Zhai, Fabian Manhardt, Federico Tombari, and Xiangyang Ji. Ddf-ho: Hand-held object reconstruction via conditional directed distance field. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 2",
|
| 1445 |
+
"[101] Chenyangguang Zhang, Guanlong Jiao, Yan Di, Gu Wang, Ziqin Huang, Ruida Zhang, Fabian Manhardt, Bowen Fu, Federico Tombari, and Xiangyang Ji. Moho: Learning single-view hand-held object reconstruction with multiview occlusion-aware supervision. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2",
|
| 1446 |
+
"[102] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. International Conference on Neural Information Processing Systems (NeurIPS), 2021. 2",
|
| 1447 |
+
"[103] Yiming Zhang, ZeMing Gong, and Angel X Chang. Multi3drefer: Grounding text description to multiple 3d objects. In International Conference on Computer Vision (ICCV), 2023. 2",
|
| 1448 |
+
"[104] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3, 4",
|
| 1449 |
+
"[105] Shijie Zhou, Haoran Chang, Sicheng Jiang, Zhiwen Fan, Zehao Zhu, Dejia Xu, Pradyumna Chari, Suya You, Zhangyang Wang, and Achuta Kadambi. Feature 3dgs: Supercharging 3d gaussian splattering to enable distilled feature fields. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2",
|
| 1450 |
+
"[106] Zijian Zhou, Zheng Zhu, Holger Caesar, and Miaojing Shi. Openpsg: Open-set panoptic scene graph generation via large multimodal models. European Conference on Computer Vision (ECCV), 2024. 6",
|
| 1451 |
+
"[107] Xingxing Zuo, Pouya Samangouei, Yunwen Zhou, Yan Di, and Mingyang Li. Fmgs: Foundation model embedded 3d gaussian splatting for holistic 3d scene understanding. International Journal on Computer Vision (IJCV), 2024. 2",
|
| 1452 |
+
"[108] René Zurbrügg, Yifan Liu, Francis Engelmann, Suryansh Kumar, Marco Hutter, Vaishakh Patil, and Fisher Yu. ICGNet: A Unified Approach for Instance-centric Grasping. In International Conference on Robotics and Automation (ICRA), 2024. 8"
|
| 1453 |
+
],
|
| 1454 |
+
"bbox": [
|
| 1455 |
+
93,
|
| 1456 |
+
90,
|
| 1457 |
+
482,
|
| 1458 |
+
878
|
| 1459 |
+
],
|
| 1460 |
+
"page_idx": 12
|
| 1461 |
+
}
|
| 1462 |
+
]
|
data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19199/f854e417-8349-409c-b5ba-42db7341d3fa_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5547f6762e3b6e7ea57f169f3749d7f70b9800b06e7ee77d69ddbf111d3cd07e
|
| 3 |
+
size 3183179
|
data/2025/2503_19xxx/2503.19199/full.md
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Open-Vocabulary Functional 3D Scene Graphs for Real-World Indoor Spaces
|
| 2 |
+
|
| 3 |
+
Chenyangguang Zhang $^{1,2}$
|
| 4 |
+
|
| 5 |
+
Alexandros Delitzas $^{2,3}$
|
| 6 |
+
|
| 7 |
+
Fangjinhua Wang²
|
| 8 |
+
|
| 9 |
+
Ruida Zhang<sup>1</sup>
|
| 10 |
+
|
| 11 |
+
Xiangyang Ji
|
| 12 |
+
|
| 13 |
+
Marc Pollefeys $^{2,4}$
|
| 14 |
+
|
| 15 |
+
Francis Engelmann2,5
|
| 16 |
+
|
| 17 |
+
$^{1}$ Tsinghua University
|
| 18 |
+
|
| 19 |
+
$^{2}$ ETH Zürich
|
| 20 |
+
|
| 21 |
+
$^{3}$ MPI for Informatics
|
| 22 |
+
|
| 23 |
+
4 Microsoft
|
| 24 |
+
|
| 25 |
+
5Stanford University
|
| 26 |
+
|
| 27 |
+
# Abstract
|
| 28 |
+
|
| 29 |
+
We introduce the task of predicting functional 3D scene graphs for real-world indoor environments from posed RGB-D images. Unlike traditional 3D scene graphs that focus on spatial relationships of objects, functional 3D scene graphs capture objects, interactive elements, and their functional relationships. Due to the lack of training data, we leverage foundation models, including visual language models (VLMs) and large language models (LLMs), to encode functional knowledge. We evaluate our approach on an extended SceneFun3D dataset and a newly collected dataset, FunGraph3D, both annotated with functional 3D scene graphs. Our method significantly outperforms adapted baselines, including Open3DSG and ConceptGraph, demonstrating its effectiveness in modeling complex scene functionalities. We also demonstrate downstream applications such as 3D question answering and robotic manipulation using functional 3D scene graphs. See our project page at https://openfungraph.github.io.
|
| 30 |
+
|
| 31 |
+
# 1. Introduction
|
| 32 |
+
|
| 33 |
+
This paper introduces functional 3D scene graphs for real-world indoor spaces from posed RGB-D images. 3D scene graphs offer a lightweight, abstract representation for capturing the comprehensive semantic structure of an environment [4]. They support a variety of applications, including 3D scene alignment [66], image localization [51], graph-conditioned 3D scene generation [21, 97], as well as robotics navigation [83] and task planning [2, 61].
|
| 34 |
+
|
| 35 |
+
Recent advances in 3D scene graph prediction [4, 11, 27, 40, 41, 63, 64, 78, 84], have enabled exciting developments across multiple areas, including scene graph inference from 3D reconstructions [11, 78], applications for robotic interactions [27, 84], online scene graph generation [84], open-vocabulary 3D scene graphs [40, 41] and large-scale, hierarchical scene graphs [4, 63, 64]. The performance of recent scene graph methods also benefits from advancements in 3D scene understanding techniques [14, 57, 70], which they rely on to extract objects and their semantics for modeling inter-object relationships. However, existing 3D
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Posed RGB-D Frames
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Fig. 1. Functional 3D Scene Graphs. Given an input sequence of posed RGB-D frames of an indoor environment, our method predicts a functional 3D scene graph by detecting objects, identifying interactive elements, and inferring functional relationships. This enables the representation of interactions, functions, and scene dynamics, going beyond existing 3D scene graph methods that are constrained to spatial relationships between static objects.
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
|
| 49 |
+
scene graph estimation methods [27, 40, 78, 84] face important limitations: graph nodes are typically restricted to objects, and edges represent only spatial relationships. For instance, edges primarily capture relative positions, such as 'the TV is mounted on the wall' or 'the flower is placed on the table'—information already implicitly encoded by object positions. Crucially, these methods lack representations of small interactive elements [17] and their functional relationships with other scene objects, which are essential for finer-grained interactions (e.g., flipping a switch to turn on a light), making them less suitable for higher-level functional reasoning. The key idea of this paper is to enhance 3D scene graphs with the capability to represent functional relationships between objects and their interactive elements. A 3D scene graph that captures both functionalities and interactions opens up significant opportunities for future research.
|
| 50 |
+
|
| 51 |
+
ties. For example, robotic agents can identify interactive elements and their functional relationships with objects to perform effective manipulation tasks, or graph-guided 3D scene generation methods [21, 97] can, with this enriched representation, generate more dynamic and realistic environments by incorporating interactive elements and their effects. However, creating functional 3D scene graphs is challenging. Most importantly, there is a lack of training data to learn the complex functional relationships between objects and their interactive elements. Unlike existing 3D scene graphs, functional 3D scene graphs require a more nuanced understanding of interactions and object affordances. To address this, our approach implements an open-vocabulary pipeline for functional 3D scene graph inference, termed OpenFunGraph, leveraging the extensive knowledge encoded within foundation models, including visual language models (VLM) and large language models (LLM). These models, pre-trained on vast amounts of multimodal data, include rich semantic information that can potentially be adapted for functional understanding. This leads us to the central question of this work: "Can we harness foundation models to construct functional 3D scene graphs?"
|
| 52 |
+
|
| 53 |
+
We evaluate our approach on two challenging datasets: an extended version of SceneFun3D [17] with newly added functional relationship annotations, and FunGraph3D, a freshly collected real-world dataset featuring high-precision 3D laser scans, accurately registered To address these limitations, we introduce functional 3D scene graphs, which model objects, interactive elements, and their functional relationships within a unified structure (formally defined in Section 3). This representation extends traditional 3D scene graphs by incorporating interactive sub-parts alongside objects and representing functional relationships beyond simple spatial ones. We argue that functional 3D scene graphs should possess the following characteristics. First, the representation should operate in an open-vocabulary manner to enhance generalization and applicability. Second, it should be flexible, allowing various attributes to be attached to nodes (e.g., sensor data, natural language captions, semantic features) and edges (e.g., relationship descriptions), thus ensuring adaptability for downstream applications.
|
| 54 |
+
|
| 55 |
+
In summary, our key contributions are:
|
| 56 |
+
|
| 57 |
+
- We introduce functional 3D scene graphs that extend traditional 3D scene graphs by capturing functional relationships between objects and interactive elements.
|
| 58 |
+
- We propose a novel approach that leverages the knowledge embedded in foundation models, specifically VLMs and LLMs, to construct functional 3D scene graphs without task-specific training.
|
| 59 |
+
- We present a new real-world dataset, FunGraph3D, with ground-truth functional annotations, and demonstrate that our method outperforms adapted baselines, including Open3DSG and ConceptGraph.
|
| 60 |
+
|
| 61 |
+
# 2. Related Work
|
| 62 |
+
|
| 63 |
+
3D indoor scene understanding. Many works concentrate on closed-set 3D semantic segmentation [5, 14, 31-33, 42, 45, 57, 58, 76, 80, 81] or instance segmentation [23, 28, 29, 38, 70, 74, 77, 96] on the existing 3D indoor scene understanding benchmarks [3, 7, 10, 15, 37, 65, 72, 93]. With the development of foundation models, subsequent researches explore open-vocabulary 3D semantic segmentation [24, 36, 39, 56, 59, 73, 75, 94, 105, 107], and complex 3D visual language grounding tasks [8, 16, 30, 34, 55, 62, 90, 103]. However, current studies mainly focus on object-level perception in indoor scene and seldom consider part-level interactive elements. Recently, SceneFun3D [17] proposes a benchmark for functionality and affordance understanding, with exhaustive annotations of indoor interactive elements. However, it does not provide the object annotations as well as the relationships between the elements and objects. This work extends SceneFun3D by exploiting such relationships with functional 3D scene graphs.
|
| 64 |
+
|
| 65 |
+
Affordance understanding. Understanding affordance, i.e., properties of an environment to interact with, is a vital task in computer vision and robotics. Existing learning-based methods usually take inputs such as images [22, 98], videos [26, 54, 95] or 3D representations [18, 52, 53, 86], and then predict affordance maps. Some works learn affordance from human-scene interaction demonstrations [6, 12, 13, 25, 91, 92, 100, 101]. Nevertheless, existing works are often limited to object-level predictions and model affordances located on the corresponding objects. On the contrary, OpenFunGraph excavates all interactive elements at scene level, handling all kinds of functional relationships, especially those for remote operations.
|
| 66 |
+
|
| 67 |
+
3D scene graphs. 3D scene graph combines indoor entities into a unified structure and models inter-object relationships by building a graph of objects [4, 40, 63, 64, 75, 78, 79, 84, 85, 99, 102]. Functional 3D scene graph differs from the traditional 3D scene graph by adding interactive elements as nodes and modeling the functional relationships between objects and elements. Similarly, IFR-Explore [44] tries to excavate inter-object functional relationships based on reinforcement learning in synthetic scenarios. However, it is hard to be applied in complex real-world scenes due to its closed-set setting, requirement of ground-truth instances, and lack of consideration on part-level elements. In this paper, we propose an open-vocabulary framework for functional scene graph inference in complex real-world scenes. While there have been related efforts on open-vocabulary 3D scene graph generation, they are not well-suited for functional scene graph inference, particularly for interactive element recognition and functional relationship prediction. For example, Open3DSG [41] relies on object-level CLIP features [60]. It struggles with part-level interactive
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
Fig. 2. Illustration of the OpenFunGraph architecture. Given a sequence of posed RGB-D frames $\{(\mathcal{I}_i, \mathcal{D}_i)\}_{i=1}^n$ , we use RAM++ [104] and GroundingDINO [49] to detect and segment objects $\mathcal{O}$ and interactive elements $\mathcal{I}$ , forming the node candidates of the functional 3D scene graph. Next, a mechanism using the large language model (LLM) GPT [1] and the visual language model (VLM) LLAVA [48] generates natural language descriptions $\mathcal{L}$ for each node. Finally, we infer functional relationships $\mathcal{R}$ between objects $\mathcal{O}$ and interactive elements $\mathcal{I}$ , represented as the edges in the functional 3D scene graph $\mathcal{G}$ .
|
| 71 |
+
|
| 72 |
+
element recognition and is limited to inferring spatial relationships due to its design based on spatial-proximity edge feature distillation. ConceptGraph [27] uses a direct inference pipeline but focuses solely on object nodes and a narrow set of spatial relationships (e.g., on, in). In contrast, our approach introduces adaptive detection and description stages for both objects and interactive elements, alongside a sequential reasoning strategy for accurately modeling a wide range of functional relationships.
|
| 73 |
+
|
| 74 |
+
# 3. Problem Formulation
|
| 75 |
+
|
| 76 |
+
Functional 3D Scene Graphs. We extend traditional 3D scene graphs [27, 41, 78] to facilitate their use in real-world scene interaction scenarios. Specifically, we introduce Functional 3D Scene Graphs, a representation designed to enable functional reasoning by jointly modeling objects, interactive elements and their functional relationships. We define a functional 3D scene graph as a directed graph $\mathcal{G} = (\mathcal{O},\mathcal{I},\mathcal{R})$ where $\mathcal{O}$ are the objects in the scene, $\mathcal{I}$ are the interactive elements and $\mathcal{R}$ are the functional relationships which point from the interactive element nodes $\mathcal{I}$ to object nodes $\mathcal{O}$ . Following the definition in [17], we define interactive elements as components that agents interact with (e.g., handles, knobs and buttons) to trigger specific functions within the environment such as opening a cabinet or turning off a light. Additionally, functional relationships fall into two categories: local, where the interactive element is part of the object (e.g., door-handle), or remote, where the interactive element operates the object from a distance (e.g., TV-remote control).
|
| 77 |
+
|
| 78 |
+
Task definition. We formulate the following novel 3D scene understanding task: Given an input sequence of posed RGB-D frames $\{(\mathcal{I}_i,\mathcal{D}_i)\}_{i = 1}^n$ of an unseen indoor environment, the task is to construct the functional 3D scene graph $\mathcal{G}$ by inferring the functional relationships $\mathcal{R}$ among the objects $\mathcal{O}$ and interactive elements $\mathcal{I}$ in the scene.
|
| 79 |
+
|
| 80 |
+
# 4. Method
|
| 81 |
+
|
| 82 |
+
The goal of our method, OpenFunGraph, is to predict the functional 3D scene graph of a 3D environment, by accurately detecting objects and interactive elements, and inferring the functional relationships among them in an open-vocabulary manner (Figure 2). To overcome the challenge of limited training data, we harness the knowledge of foundation models [9] to detect objects and interactive elements within the scene, describe them in natural language, and reason about their functional relationships. In the detection stage (Section 4.1), we follow a progressive strategy where we prompt the foundation model to systematically first identify objects and then transition to finer-grained interactive elements given the input image sequence. The 2D detection results are then fused across multiple viewpoints in 3D space, constructing an initial set of node candidates. Next, we utilize a VLM and an LLM to collaboratively generate multi-view aware natural language descriptions of the candidate nodes (Section 4.2). To construct the graph, we proceed with inferring the functional relationships, i.e., edges, among the object and interactive element nodes (Section 4.3). Specifically, we follow a sequential reasoning strategy, starting with local functional relationships (e.g., door - handle) and extending to remote functional relationships (e.g., TV - remote control), by leveraging the common sense knowledge of VLMs and LLMs. This allows us to progressively build the scene's functional graph by incrementally establishing connections between nodes.
|
| 83 |
+
|
| 84 |
+
# 4.1. Node Candidate Detection
|
| 85 |
+
|
| 86 |
+
In the first stage, we detect objects and interactive elements in the scene to construct a set of node candidates. We start by detecting 2D candidates on the input frames with a progressive foundation-model-based strategy that transitions from objects to finer-grained part-level interactive elements. Then, we associate and fuse the 2D detection results from multiple frames using geometric consistency, yielding the initial set of 3D node candidates.
|
| 87 |
+
|
| 88 |
+
Object candidates. To identify object candidates $\mathcal{C}_o^{\mathcal{I}_i}$ , we utilize RAM++ [35, 104] to recognize objects in each input image $\mathcal{I}_i$ , producing object tags $\mathcal{T}_{obj}^{\mathcal{I}_i}$ , such as 'cabinet' or 'door'. These object tags then serve as prompts for GroundingDINO [49], which detects 2D bounding boxes $\mathcal{B}^{\mathcal{I}_i}$ , segmentation masks $\mathcal{M}^{\mathcal{I}_i}$ , and confidence scores $\mathcal{S}^{\mathcal{I}_i}$ .
|
| 89 |
+
|
| 90 |
+
Interactive element candidates. Despite the increasing success of foundation models in detecting object instances within scenes, the development of prompting strategies for identifying smaller elements, including interactive object parts (e.g., knobs, handles), remains largely unexplored. Here, we propose a simple yet effective strategy to generate suitable text prompts for GroundingDINO to improve the detection of small interactive parts. We ask the LLM GPT-4 to provide a list of potential interactive element tags corresponding to each object candidate tag $\mathcal{T}_{obj}^{\mathcal{I}_i}$ . We hold the valid object tags $\mathcal{T}_{val}^{\mathcal{I}_i}$ by filtering the cases where the LLM thinks the object is not interactable (e.g., wall, bed). To create prompts for GroundingDINO, we concatenate $\mathcal{T}_{val}^{\mathcal{I}_i}$ (e.g., door) as assistive tags with the functional element tags (e.g., handle), forming prompts such as "door. handle". Finally, we yield the interactive element candidates $\mathcal{C}_{ie}^{\mathcal{I}_i}$ in each input image $\mathcal{I}_i$ by maintaining the detections corresponding to the functional element tags. Empirically, we observe that this approach leads to more accurate detection of small interactive parts. We support this observation with an ablation study in Section 6.3.
|
| 91 |
+
|
| 92 |
+
3D candidate fusion. After identifying the object and functional element candidates $\mathcal{C}_{obj}^{\mathcal{I}_i}$ and $\mathcal{C}_{ie}^{\mathcal{I}_i}$ in each image $\mathcal{I}_i$ , we fuse their 2D segmentation masks using multi-view information to obtain the 3D node candidates of the graph. Following [27], we utilize the corresponding depth map $\mathcal{D}_i$ and camera projection matrix $\Pi_i$ to backproject the 2D mask to the 3D space, and merge them to receive the 3D object candidates $\mathcal{C}_o$ and interactive element candidates $\mathcal{C}_{ie}$ . For each node candidate, we store the backprojected 3D point cloud $\mathcal{P}$ and 3D bounding box $\mathcal{B}$ along with the associated 2D image assets, i.e., images, masks, 2D bounding boxes and confidence scores.
|
| 93 |
+
|
| 94 |
+
# 4.2. Node Candidate Description
|
| 95 |
+
|
| 96 |
+
We next outline the process of generating natural language descriptions $\mathcal{L}$ for each node by leveraging a combination of VLMs and LLMs. Precise language descriptions are critical for establishing functional relationships in the final phase.
|
| 97 |
+
|
| 98 |
+
Object candidates. To generate natural language descriptions for each object candidate node, we first select the top $N_v$ views of each object, ranked by $S^{\mathcal{I}_i} \times \frac{n_{\mathcal{P}} \mathcal{I}_i}{n_{\mathcal{P}}}$ , where $S^{\mathcal{I}_i}$ is the 2D confidence score indicating the semantic confidence, while $n_{\mathcal{P}} \mathcal{I}_i$ refers to the number of 3D points the view $\mathcal{I}_i$ contributes to the fused 3D pointcloud $\mathcal{P}$ , presenting the geometric contribution of the view. Each object is
|
| 99 |
+
|
| 100 |
+
then cropped based on its bounding box $\mathcal{B}$ , and a caption describing the object crop is obtained using LLAVA v1.6 [46-48]. Finally, to derive a unified language description for each object candidate, we employ GPT-4 [1] to summarize the multi-view LLAVA captions.
|
| 101 |
+
|
| 102 |
+
Interactive element candidates. Captioning small interactive elements poses additional challenges: the bounding box crops are considerably smaller, often containing only a few pixels, which hinders LLAVA's ability to generate accurate captions. To address this, we enlarge the bounding boxes by multiple scales to incorporate richer contextual visual information. Similar multi-scale approaches have been shown to be effective in [39, 73]. To direct the VLM's attention to the interactive element within the expanded crop, we highlight the element with a red outline before passing it to LLAVA, as demonstrated in [71]. Finally, the multi-scale, multiview captions are summarized into a single natural language description using GPT-4.
|
| 103 |
+
|
| 104 |
+
# 4.3. Functional Relationships
|
| 105 |
+
|
| 106 |
+
To model functional relationships between objects and interactive elements, we employ a sequential reasoning approach. Drawing on the concept of Chain-of-Thought reasoning [82], we decompose the task into a series of simpler steps rather than prompting the LLM to infer all possible element-object connections simultaneously. Initially, we concentrate on identifying direct, local relationships between objects and elements that are rigidly connected (e.g., door - handle). Once these relationships are established, we extend the search to remote relationships, where object-element pairs are functionally related but physically separated (e.g., TV - remote control).
|
| 107 |
+
|
| 108 |
+
Local relationship reasoning. First, we aim to construct the edges of the graph with local functional relationships, e.g., the keypanel of a microwave or the knob of a cabinet. A common characteristic of these cases is that objects and interactive elements are rigidly connected. To identify such cases efficiently, we first perform a spatial filtering process: For each object node $\mathcal{C}_o^j$ , we assess whether an element node $\mathcal{C}_{ie}^k$ has a significant spatial overlap. Subsequently, we leverage the LLM's common sense knowledge to reason whether a local functional relationship between these two nodes is feasible. To do this, we prompt the LLM with the language descriptions $\mathcal{L}^j$ , $\mathcal{L}^k$ and 3D bounding boxes $\mathcal{B}^j$ , $\mathcal{B}^k$ of $\mathcal{C}_o^j$ and $\mathcal{C}_{ie}^k$ respectively. It is tasked with reasoning whether a local rigid connection between the interactive element (e.g., handle) and object (e.g., fridge) is feasible, and then generate a language description $\mathcal{L}^{k\rightarrow j}$ of the functional relationship (e.g., "opens"). This step produces the subgraph of local connections $\hat{\mathcal{G}}^L = (\mathcal{O}^L,\mathcal{I}^L,\mathcal{R}^L)$ .
|
| 109 |
+
|
| 110 |
+
Confidence-aware remote relationship reasoning. In this step, we construct graph edges representing remote func
|
| 111 |
+
|
| 112 |
+
tional relationships, such as those between a ceiling light and its switch. Determining these remote relationships is challenging, as visual cues alone often do not fully clarify which interactive element controls which specific object. To address this, we introduce a confidence-aware reasoning strategy that assigns a confidence score to each inferred remote relationship. This approach enhances decision-making in real-world scenarios by enabling the agent to prioritize interactions with higher confidence scores.
|
| 113 |
+
|
| 114 |
+
First, we form an initial set of potential candidates for remote connections, by considering the interactive element nodes that remained unassigned from the previous stage. To construct potential remote connections among the interactive elements and objects in the scene, we utilize the common sense knowledge of the LLM. Specifically, we provide the LLM with natural language descriptions $\mathcal{L}$ of the interactive element and object nodes, so that it can output a list of likely target objects that each interactive element could be functionally linked to. Next, for each element-object pair, we employ the VLM to assess the feasibility of a functional connection. The visual input for this step is prepared by the top-1 views of the interactive element and object. The VLM can exploit useful information in the images of the element and object to generate descriptions for the feasibility assessment. For example, it describes whether the appliance is physically plugged into the electric outlet, or whether the switch is mount on the wall under the ceiling light. The descriptions from all pairs are then provided to the LLM to form a global context, assisting it to assign a relative confidence score to each proposed connection and describe the nature of each relationship. This step outputs the subgraph of remote relations: $\hat{\mathcal{G}}^R = (\mathcal{O}^R,\mathcal{I}^R,\mathcal{R}^R)$ .
|
| 115 |
+
|
| 116 |
+
# 4.4. Final Graph Formation
|
| 117 |
+
|
| 118 |
+
To construct the final graph, we combine the nodes and relationships identified in both the local and remote functional reasoning stages. The resulting predicted graph is formulated as $\mathcal{G} = (\mathcal{O}^L\cup \mathcal{O}^R,\mathcal{I}^L\cup \mathcal{I}^R,\mathcal{R}^L\cup \mathcal{R}^R)$ .
|
| 119 |
+
|
| 120 |
+
# 5. Data Collection
|
| 121 |
+
|
| 122 |
+
Existing datasets of high-fidelity 3D indoor spaces focus primarily on understanding either 3D objects [7, 93] or 3D interactive elements [17]. However, they lack ground-truth annotations of the functional relationships. In many cases, these relationships cannot be inferred from static visual observations alone but instead require video captures of physical interactions with the scene to determine which actions trigger specific responses. For example, a static 3D reconstruction cannot indicate which switch controls a particular light in a room with multiple switches and lights. To systematically evaluate our method, we construct a novel dataset of 3D real-world indoor environments along with multi-sensor data (i.e., high-fidelity 3D reconstructions,
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Fig. 3. Modalities of our FunGraph3D dataset. Top: 3D scans from a Faro laser scanner, annotated with 3D object and interactive element masks. Middle: Ground truth functional 3D scene graphs. Bottom: Egocentric video capturing human-scene interactions.
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
Fig. 4. Example scenes from our FunGraph3D dataset. The dataset includes typical indoor environments such as living rooms, bedrooms, bathrooms, and kitchens.
|
| 133 |
+
|
| 134 |
+
consumer-device video captures, egocentric human-scene interaction videos) and functional 3D scene graph annotations. We outline the steps towards building this dataset, which we refer to as FunGraph3D (Figure 4).
|
| 135 |
+
|
| 136 |
+
Laser scans. As illustrated in [17], we highlight that laser scans can capture a higher level of 3D geometry details, such as small interactive elements (i.e., knobs, buttons), which is necessary for fine-grained scene understanding applications. To this end, we use a Leica RTC360 laser scanner to capture a high-resolution (5mm) 3D scan of the scene. To ensure high scene coverage during the capture, we place the scanner in multiple positions in the scene. We subsequently use the supporting software by Leica to fuse the multiple scans into a single one for the scene.
|
| 137 |
+
|
| 138 |
+
iPad video sequences. To enable scene understanding through multiple sensor data, we accompany the high
|
| 139 |
+
|
| 140 |
+
fidelity 3D reconstruction with RGB-D image information from a commodity device. Specifically, we capture multiple videos of the static scene with the camera of an iPad 15 Pro.
|
| 141 |
+
|
| 142 |
+
Registration and alignment. To register the iPad video frames to the laser scan coordinate system, we build upon the COLMAP-based pipeline in [93]. Specifically, we run the COLMAP SfM pipeline [68, 69] by augmenting the collection of real iPad frames with rendered pseudo images of the laser scan. However, we notice that this pipeline leads to a large number of unregistered frames. To address this limitation, we incorporate the deep learning-based methods Superpoint [19] and Superglue [67] for feature extraction and matching, leading to a more accurate registration result. Afterwards, we utilize the optimized pose for each camera frame to render high-resolution depth maps for accurate back-projection from the iPad frames to the 3D space.
|
| 143 |
+
|
| 144 |
+
Egocentric videos. We include egocentric videos of property owners interacting with the environment using an Apple Vision Pro headset in our dataset. These videos facilitate accurate relationship labeling as they help clarify ambiguous connections among objects and interactive elements (e.g., which light switch controls the ceiling light).
|
| 145 |
+
|
| 146 |
+
Annotation. For the annotation process, we extend the SceneFun3D annotation tool [17] to construct the ground-truth functional 3D scene graphs. Annotators can navigate the 3D scene and annotate the instances of objects and interactive elements along with a free-form label. Annotators are also asked to connect the interactive element to the corresponding object that it controls and provide a description of their relationship. An example of the collected annotations is displayed in Figure 3.
|
| 147 |
+
|
| 148 |
+
Statistics. FunGraph3D contains 14 in-the-wild scenes of various types (6 kitchens, 2 living rooms, 3 bedrooms and 3 bathrooms). In total, the dataset includes 201 interactive elements, 228 functional relationships and 146 objects of interest, along with open-vocabulary labels and relationships.
|
| 149 |
+
|
| 150 |
+
# 6. Experiments
|
| 151 |
+
|
| 152 |
+
# 6.1. Experimental Setup
|
| 153 |
+
|
| 154 |
+
Datasets. To evaluate our method, we utilize the developed FunGraph3D dataset, described in Section 5. Additionally, we use the SceneFun3D dataset [17], which provides high-resolution $5\mathrm{mm}$ laser scans of real-world environments along with iPad video sequences. Specifically, we randomly select 20 scenes (8 from the validation and 12 from the test split) and apply our annotation pipeline to annotate the functional 3D scene graph in each scene. Since we do not have physical access to the 3D environments, we restrict our evaluation to functional relationships that are visually unambiguous. In total, 212 interactive elements, 195 functional relationships, and 105 corresponding objects are
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
Fig. 5. Qualitative results. Top: input images. Bottom: predicted functional 3D scene graph. Best seen zoomed in on a color screen.
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+
annotated for these scenes.
|
| 162 |
+
|
| 163 |
+
Metrics. To evaluate open-vocabulary functional 3D scene graphs effectively, a new quantitative metric is essential. Existing approaches, such as ConceptGraph [27], rely on subjective human assessments, while Open3DSG [41] approaches evaluation as a label retrieval task, assuming all ground-truth nodes are known, an assumption that diverges from our real-world setting. To address this, we extend the Open3DSG Recall@K metric [41] with a node detection component, using spatial overlap between predicted and ground-truth nodes, inspired by evaluation techniques on 2D scene graph generation [50, 87-89, 106]. More specifically, our evaluation metric comprises two Recall@K scores: one for nodes, i.e., $\mathcal{O}$ and $\mathcal{I}$ , and one for triplets, i.e., $(\mathcal{O},\mathcal{I},\mathcal{R})$ . For node evaluation, we preprocess all ground-truth labels to enable top-K retrieval, following Open3DSG [41]. A retrieval is considered successful if a ground-truth node has a non-zero 3D IoU with a predicted node and the ground-truth label ranks within the top-K retrievals based on cosine similarity of CLIP embeddings [60] with the predicted label. We calculate overall node recall as $R_{no} = \frac{n_{no}^{re}}{n_{no}}$ , where $n_{no}^{re}$ is the number of successfully retrieved ground-truth nodes, and $n_{no}$ is the total count of ground-truth nodes. Additionally, we assess recall for object and interactive element nodes separately, denoted as $R_{o} = \frac{n_{o}^{re}}{n_{o}}$ and $R_{ie} = \frac{n_{ie}^{re}}{n_{ie}}$ , where $n_{o}^{re}$ and $n_{ie}^{re}$ are the counts of correctly retrieved objects and interactive elements and $n_{o}$ and $n_{ie}$ are their respective totals. For triplet $(\mathcal{O},\mathcal{I},\mathcal{R})$ evaluation, we apply stricter criteria: a ground-truth triplet is successfully retrieved in the top-K only when all its components $\mathcal{O},\mathcal{I}$ and $\mathcal{R}$ are individually retrieved within the top-K. The retrieval process for $\mathcal{O}$ and $\mathcal{I}$ follows the same approach as above. To handle $\mathcal{R}$ , we preprocess all relationship annotations by generating BERT embeddings [20], an approach effective for open-vocabulary predicates [41]. Successful retrieval is based on cosine similarity between ground-truth and predicted BERT embeddings. Triplet recall is defined as $R_{tr} = \frac{n_{re}}{n_{tr}}$ , where $n_{re}$ is
|
| 164 |
+
|
| 165 |
+
<table><tr><td rowspan="3">Methods</td><td colspan="6">SceneFun3D [17]</td><td colspan="6">FunGraph3D (Ours)</td></tr><tr><td colspan="2">Objects</td><td colspan="2">Inter. Elements</td><td colspan="2">Overall Nodes</td><td colspan="2">Objects</td><td colspan="2">Inter. Elements</td><td colspan="2">Overall Nodes</td></tr><tr><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td><td>R@3</td><td>R@10</td></tr><tr><td>Open3DSG* [41]</td><td>61.2</td><td>70.7</td><td>54.4</td><td>61.8</td><td>56.7</td><td>64.7</td><td>50.9</td><td>58.1</td><td>21.8</td><td>33.9</td><td>33.4</td><td>43.6</td></tr><tr><td>Open3DSG*† [41]</td><td>42.9</td><td>50.0</td><td>33.8</td><td>38.3</td><td>37.4</td><td>43.0</td><td>30.9</td><td>44.1</td><td>13.0</td><td>19.6</td><td>20.2</td><td>29.4</td></tr><tr><td>ConceptGraph* [27]</td><td>71.3</td><td>77.1</td><td>6.6</td><td>8.6</td><td>28.3</td><td>31.4</td><td>58.0</td><td>66.3</td><td>2.5</td><td>4.1</td><td>20.1</td><td>25.2</td></tr><tr><td>ConceptGraph* [27] + IED</td><td>71.3</td><td>77.1</td><td>53.1</td><td>59.5</td><td>60.1</td><td>66.0</td><td>58.0</td><td>66.3</td><td>20.5</td><td>33.4</td><td>38.9</td><td>45.0</td></tr><tr><td>OpenFunGraph (Ours)</td><td>81.8</td><td>87.8</td><td>71.0</td><td>79.5</td><td>73.0</td><td>82.8</td><td>70.7</td><td>79.1</td><td>44.4</td><td>57.6</td><td>55.5</td><td>65.8</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Tab. 1. Node evaluation on the SceneFun3D [17] and FunGraph3D datasets. * means to adapt the LLM prompts used for functional relationships inference. IED refers to the interactive element candidate detection in Section 4.1. † refers to the usage of the OpenFunGraph's fused 3D nodes rather than the ground-truth for fair comparison.
|
| 168 |
+
|
| 169 |
+
the count of retrieved triplets, and $n_{tr}$ is the total count of ground-truth. We decompose triplet evaluation into node association $(R_{na} = \frac{n_{na}}{n_{tr}}$ , with $n_{na}$ being the number of triplets retrieved only considering $\mathcal{O}, \mathcal{I}$ ), indicating node recognition, and edge prediction $(R_{ep} = \frac{n_{re}}{n_{na}})$ , showing relationship inference given correct node associations.
|
| 170 |
+
|
| 171 |
+
State-of-the-art comparisons. We compare our approach against ConceptGraph [27] and Open3DSG [41]-based baselines. Two ConceptGraph-based baselines are implemented: ConceptGraph* modifies the original LLM prompts to infer functional relationships, rather than focusing on spatial relationships such as in or on. ConceptGraph* + IED further incorporates the proposed interactive element candidate detection (IED) from Section 4.1, addressing ConceptGraph's initial limitation in detecting small parts. Both baselines use LLAVA v1.6 and GPT-4 for fair comparison with OpenFunGraph. We also reimplement two Open3DSG-based baselines. Open3DSG* modifies the LLM prompts to output functional relationships instead of spatial relationships. Since Open3DSG baselines rely on ground-truth node instance segmentation for graph neural network inference, we implement Open3DSG*, which uses OpenFunGraph's fused 3D nodes for fair comparison. We report Recall@3 and Recall@10 for node metrics, and Recall@5 and Recall@10 for triplet metrics.
|
| 172 |
+
|
| 173 |
+
# 6.2. Results
|
| 174 |
+
|
| 175 |
+
Quantitative results are presented in Table 1 and 2. Overall, the FunGraph3D dataset poses a greater challenge than SceneFun3D [17] due to its more complex scenes, which contain a higher number of objects and interactive elements.
|
| 176 |
+
|
| 177 |
+
Node evaluation. As shown in Table 1, OpenFunGraph surpasses ConceptGraph* [27] by $160\%$ on SceneFun3D and by $176\%$ in R@3 on FunGraph3D. ConceptGraph* primarily focuses on object perception, resulting in poor recall scores for interactive elements. With the added interactive element candidate detection (IED), ConceptGraph* + IED improves node recognition, but still falls short of OpenFunGraph by $22\%$ in R@3 on SceneFun3D, and $43\%$ in R@3
|
| 178 |
+
|
| 179 |
+
on FunGraph3D, thanks to the specified node description stage proposed in OpenFunGraph. Our approach also outperforms Open3DSG-based baselines, achieving $95\%$ and $29\%$ higher scores than Open3DSG*† and Open3DSG* in R@3 on SceneFun3D, and $174\%$ and $66\%$ higher on FunGraph3D. The limited ability of Open3DSG-based methods to identify interactive elements arises from their focus on object-level features during training, whereas our approach employs a more practical open-vocabulary inference pipeline, free from these training constraints.
|
| 180 |
+
|
| 181 |
+
Triplet evaluation. Table 2 shows triplet prediction results. On SceneFun3D and FunGraph3D, benefiting from accurate node recognition and the sequential reasoning strategy for functional inference, OpenFunGraph outperforms ConceptGraph* + IED by $76\%$ and $189\%$ in R@5, and Open3DSG*† by $179\%$ and $308\%$ . Notably, Open3DSG-based baselines struggle with functional relationships, as they rely on spatial edge features from adjacent instances. ConceptGraph-based methods, which prompt the LLM to predict all possible connections, also perform worse when compared to our sequential reasoning strategy due to the increased interpretive complexity imposed on the LLM. Figure 5 visualizes qualitative results for OpenFunGraph. In the left scene, our confidence-aware remote relationship reasoning successfully infers that the light switch is more likely to control the ceiling light rather than the two table light bulbs. In the right scene, the local functional relationship between the handle and the door is accurately identified. Additionally, the fan is most confidently inferred to be powered by the nearby electric outlet.
|
| 182 |
+
|
| 183 |
+
# 6.3. Ablation studies
|
| 184 |
+
|
| 185 |
+
We ablate three key modules in our pipeline, i.e., the GroundingDINO prompts for interactive element candidate detection, sequential reasoning, and confidence-aware remote relationship reasoning, presented in Table 3. The prompting strategy for GroundingDINO, which combines assistive object and element tags, proves effective. Using only element tags reduces node R@3 by $19\%$ and $10\%$ , as well as triplet R@5 by $20\%$ and $22\%$ on the two
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="6">SceneFun3D [17]</td><td colspan="6">FunGraph3D (Ours)</td></tr><tr><td>Node R@5</td><td>Assoc. R@10</td><td>Edge Pred. R@5</td><td>Pred. R@10</td><td>Overall R@5</td><td>Triplets R@10</td><td>Node R@5</td><td>Assoc. R@10</td><td>Edge Pred. R@5</td><td>Pred. R@10</td><td>Overall R@5</td><td>Triplets R@10</td></tr><tr><td>Open3DSG* [41]</td><td>47.2</td><td>58.0</td><td>69.2</td><td>78.8</td><td>32.7</td><td>45.7</td><td>22.8</td><td>36.7</td><td>47.9</td><td>55.9</td><td>10.5</td><td>20.0</td></tr><tr><td>Open3DSG*† [41]</td><td>33.6</td><td>38.8</td><td>64.4</td><td>72.3</td><td>21.6</td><td>28.1</td><td>15.7</td><td>24.2</td><td>46.6</td><td>55.7</td><td>7.3</td><td>13.5</td></tr><tr><td>ConceptGraph* [27]</td><td>5.6</td><td>6.8</td><td>80.2</td><td>95.0</td><td>4.7</td><td>6.4</td><td>1.9</td><td>2.8</td><td>51.5</td><td>84.6</td><td>1.1</td><td>2.5</td></tr><tr><td>ConceptGraph* [27] + IED</td><td>45.4</td><td>49.3</td><td>75.6</td><td>90.9</td><td>34.3</td><td>44.5</td><td>18.8</td><td>22.8</td><td>46.1</td><td>79.7</td><td>10.3</td><td>18.9</td></tr><tr><td>OpenFunGraph (Ours)</td><td>68.3</td><td>73.0</td><td>88.1</td><td>96.2</td><td>60.4</td><td>70.3</td><td>45.8</td><td>49.3</td><td>65.1</td><td>91.4</td><td>29.8</td><td>45.0</td></tr></table>
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
Fig. 6. Functional 3D Scene Graphs for Robotic Manipulation. Left: 3D scene and functional graph generated after querying 'turning on the light.' Right: Robot interacting with scene elements as guided by the functional scene graph.
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
|
| 194 |
+
datasets respectively, due to incomplete detections. Replacing sequential reasoning with a direct approach, where the LLM infers functional relationships across all nodes, significantly reduces triplet reasoning performance (42% and 32% in triplet R@5 on SceneFun3D and FunGraph3D respectively). Sequential reasoning decomposes complex relationships into distinct types, making LLM processing easier. Ablating confidence-aware remote relationship reasoning by randomly selecting connections, instead of using the highest-confident edge (e.g., choosing a random light for the switch instead of the most confident ceiling light), leads to a decrease in triplet R@5 by 7% and 11% on the two datasets respectively. This illustrates more reasonable edges are selected correctly in our mechanism by incorporating the common sense understanding of the foundation models.
|
| 195 |
+
|
| 196 |
+
# 6.4. Downstream Applications
|
| 197 |
+
|
| 198 |
+
We showcase the versatility of the proposed functional 3D scene graph representation in downstream applications that require complex reasoning about indoor functionalities and task-oriented interactions.
|
| 199 |
+
|
| 200 |
+
3D inventory question answering. To enable functional reasoning, we convert the graph structure into a JSON list that the LLM can easily query. With this list, the LLM can answer questions such as "How can I turn on the ceiling light?" Using the functional 3D scene graph's nodes (objects, interactive elements) and edges (functional relation
|
| 201 |
+
|
| 202 |
+
Tab. 2. Triplet evaluation on the SceneFun3D [17] and FunGraph3D datasets. All marks keep the same meaning with Table 1. Node Assoc. refers to the node association metric while Edge Pred. means the edge prediction metric.
|
| 203 |
+
|
| 204 |
+
<table><tr><td rowspan="2">Experiments</td><td colspan="2">Overall Nodes</td><td colspan="2">Overall Triplets</td></tr><tr><td>R@3</td><td>R@10</td><td>R@5</td><td>R@10</td></tr><tr><td>w/o prompts for element detection</td><td>59.3</td><td>68.7</td><td>48.3</td><td>59.9</td></tr><tr><td>w/o sequential edge reasoning*</td><td>73.0</td><td>82.8</td><td>34.8</td><td>48.9</td></tr><tr><td>w/o confidence-aware edge reasoning*</td><td>73.0</td><td>82.8</td><td>56.0</td><td>65.1</td></tr><tr><td>Ours</td><td>73.0</td><td>82.8</td><td>60.4</td><td>70.3</td></tr><tr><td>w/o prompts for element detection</td><td>49.9</td><td>59.1</td><td>23.1</td><td>37.6</td></tr><tr><td>w/o sequential edge reasoning*</td><td>55.5</td><td>65.8</td><td>20.2</td><td>33.8</td></tr><tr><td>w/o confidence-aware edge reasoning*</td><td>55.5</td><td>65.8</td><td>26.8</td><td>40.1</td></tr><tr><td>Ours</td><td>55.5</td><td>65.8</td><td>29.8</td><td>45.0</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Tab. 3. Ablation study on SceneFun3D [17] (Top) and our FunGraph3D (Bottom). Note that edge reasoning $(^{*})$ impacts only the triplet metric and does not affect node recognition performance.
|
| 207 |
+
|
| 208 |
+
ships), the LLM can provide responses such as "You can turn on the ceiling light using the light switch plate located at position [0.611, 0.113, 0.732]. From the provided JSON list, we can see the light switch plate with id 0 has the highest confidence level of 0.8 with the ceiling light fixture."
|
| 209 |
+
|
| 210 |
+
Robotic manipulation. The functional 3D scene graph also supports robotic manipulation [43, 108] for user queries that involve functional reasoning, as illustrated in Figure 6. Similar to inventory question answering, the LLM queries the JSON list to locate the interactive element referenced in the query. The robot then navigates to and interacts with the element using the methods described in [43].
|
| 211 |
+
|
| 212 |
+
# 7. Conclusion
|
| 213 |
+
|
| 214 |
+
We introduce Functional 3D Scene Graphs, a novel representation that jointly models objects, interactive elements, and their functional relationships in 3D indoor environments. Our open-vocabulary pipeline leverages the common-sense knowledge of foundation models to infer functional 3D scene graphs and enable flexible querying. To support systematic benchmarking, we develop a high-fidelity dataset of real-world 3D indoor environments with multi-modal data and functional annotations. Experiments on this and existing datasets show that our method significantly outperforms baselines. We further demonstrate the versatility of our representation for downstream tasks such as 3D question answering and robotic manipulation.
|
| 215 |
+
|
| 216 |
+
Acknowledgments. We would like to thank colleagues and friends who helped us capture the data of FunGraph3D: Christine Engelmann, Dominik Faerber, Elisabetta Fedele, Xudong Jiang, Xin Kong, Aoxue Liu and Houssam Naous. This work was supported by the Swiss National Science Foundation Advanced Grant 216260: "Beyond Frozen Worlds: Capturing Functional 3D Digital Twins from the Real World". AD is supported by the Max Planck ETH Center for Learning Systems (CLS) and FE by an SNSF PostDoc.Mobility Fellowship.
|
| 217 |
+
|
| 218 |
+
# References
|
| 219 |
+
|
| 220 |
+
[1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 3, 4
|
| 221 |
+
[2] Christopher Agia, Krishna Murthy Jatavallabhula, Mohamed Khodeir, Ondrej Miksik, Vibhav Vineet, Mustafa Mukadam, Liam Paull, and Florian Shkurti. Taskography: Evaluating robot task planning over large 3d scene graphs. In Conference on Robot Learning (CoRL), 2022. 1
|
| 222 |
+
[3] Iro Armeni, Ozan Sener, Amir R Zamir, Helen Jiang, Ioannis Brilakis, Martin Fischer, and Silvio Savarese. 3d semantic parsing of large-scale indoor spaces. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 2
|
| 223 |
+
[4] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In International Conference on Computer Vision (ICCV), 2019. 1, 2
|
| 224 |
+
[5] Matan Atzmon, Haggai Maron, and Yaron Lipman. Point convolutional neural networks by extension operators. ACM Transactions On Graphics (TOG), 2018. 2
|
| 225 |
+
[6] Prithviraj Banerjee, Sindi Shkodrani, Pierre Moulon, Shreyas Hampali, Fan Zhang, Jade Fountain, Edward Miller, Selen Basol, Richard Newcombe, Robert Wang, et al. Introducing hot3d: An egocentric dataset for 3d hand and object tracking. arXiv preprint arXiv:2406.09598, 2024. 2
|
| 226 |
+
[7] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, et al. ARKitScenes: A diverse real-world dataset for 3d indoor scene understanding using mobile RGB-D data. In International Conference on Neural Information Processing Systems (NeurIPS), 2021. 2, 5
|
| 227 |
+
[8] Valentin Bieri, Marco Zamboni, Nicolas S. Blumer, Qingxuan Chen, and Francis Engelmann. OpenCity3D: 3D Urban Scene Understanding with Vision-Language Models. In IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2025. 2
|
| 228 |
+
[9] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al.
|
| 229 |
+
|
| 230 |
+
On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.3
|
| 231 |
+
[10] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. International Conference on 3d Vision (3dV), 2017. 2
|
| 232 |
+
[11] Lianggangxu Chen, Xuejiao Wang, Jiale Lu, Shaohui Lin, Changbo Wang, and Gaoqi He. Clip-driven open-vocabulary 3d scene graph generation via cross-modality contrastive learning. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1
|
| 233 |
+
[12] Zerui Chen, Yana Hasson, Cordelia Schmid, and Ivan Laptev. Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In European Conference on Computer Vision (ECCV), 2022. 2
|
| 234 |
+
[13] Woojin Cho, Jihyun Lee, Minjae Yi, Minje Kim, Taeyun Woo, Donghwan Kim, Taewook Ha, Hyokeun Lee, Je-Hwan Ryu, Woontack Woo, et al. Dense hand-object (ho) grapnet with full grasping taxonomy and dynamics. European Conference on Computer Vision (ECCV), 2024. 2
|
| 235 |
+
[14] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2
|
| 236 |
+
[15] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 2
|
| 237 |
+
[16] Alexandros Delitzas, Maria Parelli, Nikolas Hars, Georgios Vlassis, Sotirios-Konstantinos Anagnostidis, Gregor Bachmann, and Thomas Hofmann. Multi-clip: Contrastive vision-language pre-training for question answering tasks in 3d scenes. In British Machine Vision Conference (BMVC), 2023. 2
|
| 238 |
+
[17] Alexandros Delitzas, Ayca Takmaz, Federico Tombari, Robert Sumner, Marc Pollefeys, and Francis Engelmann. Scenefun3d: Fine-grained functionality and affordance understanding in 3d scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1, 2, 3, 5, 6, 7, 8
|
| 239 |
+
[18] Shengheng Deng, Xun Xu, Chaozheng Wu, Ke Chen, and Kui Jia. 3d affordancenet: A benchmark for visual object affordance understanding. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2
|
| 240 |
+
[19] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In International Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2018. 6
|
| 241 |
+
[20] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, 2019. 6
|
| 242 |
+
[21] Helisa Dhamo, Fabian Manhardt, Nassir Navab, and Federico Tombari. Graph-to-3d: End-to-end generation and
|
| 243 |
+
|
| 244 |
+
manipulation of 3d scenes using scene graphs. In International Conference on Computer Vision (ICCV), 2021. 1, 2
|
| 245 |
+
[22] Thanh-Toan Do, Anh Nguyen, and Ian Reid. Affordancenet: An end-to-end deep learning approach for object affordance detection. In International Conference on Robotics and Automation (ICRA), 2018. 2
|
| 246 |
+
[23] Francis Engelmann, Martin Bokeloh, Alireza Fathi, Bastian Leibe, and Matthias Nießner. 3d-mpa: Multi-proposal aggregation for 3d semantic instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2
|
| 247 |
+
[24] Francis Engelmann, Fabian Manhardt, Michael Niemeyer, Keisuke Tateno, Marc Pollefeys, and Federico Tombari. Opennerf: Open set 3d neural scene segmentation with pixel-wise features and rendered novel views. International Conference on Learning Representations (ICLR), 2024. 2
|
| 248 |
+
[25] Zicong Fan, Maria Parelli, Maria Eleni Kadoglou, Xu Chen, Muhammed Kocabas, Michael J Black, and Otmar Hilliges. Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2
|
| 249 |
+
[26] Kuan Fang, Te-Lin Wu, Daniel Yang, Silvio Savarese, and Joseph J Lim. Demo2vec: Reasoning object affordances from online videos. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 250 |
+
[27] Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. ConceptGraphs: Open-vocabulary 3d scene graphs for perception and planning. In International Conference on Robotics and Automation (ICRA), 2024. 1, 3, 4, 6, 7, 8
|
| 251 |
+
[28] Lei Han, Tian Zheng, Lan Xu, and Lu Fang. Occuseg: Occupancy-aware 3d instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2
|
| 252 |
+
[29] Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2
|
| 253 |
+
[30] Joy Hsu, Jiayuan Mao, and Jiajun Wu. Ns3d: Neurosymbolic grounding of 3d objects and relations. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2
|
| 254 |
+
[31] Zeyu Hu, Xuyang Bai, Jiaxiang Shang, Runze Zhang, Jiayu Dong, Xin Wang, Guangyuan Sun, Hongbo Fu, and Chiew-Lan Tai. Vmnet: Voxel-mesh network for geodesic-aware 3d semantic segmentation. In International Conference on Computer Vision (ICCV), 2021. 2
|
| 255 |
+
[32] Binh-Son Hua, Minh-Khoi Tran, and Sai-Kit Yeung. Pointwise convolutional neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
|
| 256 |
+
[33] Rui Huang, Songyou Peng, Ayca Takmaz, Federico Tombari, Marc Pollefeys, Shiji Song, Gao Huang, and Francis Engelmann. Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. European Conference on Computer Vision (ECCV), 2024. 2
|
| 257 |
+
|
| 258 |
+
[34] Shijia Huang, Yilun Chen, Jiaya Jia, and Liwei Wang. Multi-view transformer for 3d visual grounding. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2
|
| 259 |
+
[35] Xinyu Huang, Yi-Jie Huang, Youcai Zhang, Weiwei Tian, Rui Feng, Yuejie Zhang, Yanchun Xie, Yaqian Li, and Lei Zhang. Open-set image tagging with multi-grained text supervision. arXiv e-prints, 2023. 4
|
| 260 |
+
[36] Krishna Murthy Jatavallabhula, Alihusein Kuwajerwala, Qiao Gu, Mohd Omama, Tao Chen, Alaa Maalouf, Shuang Li, Ganesh Iyer, Soroush Saryazdi, Nikhil Keetha, et al. Conceptfusion: Open-set multimodal 3d mapping. ICRA2023 Workshop on Pretraining for Robotics (PT4R), 2023. 2
|
| 261 |
+
[37] Guangda Ji, Silvan Weder, Francis Engelmann, Marc Pollefeys, and Hermann Blum. Arkit labelmaker: A new scale for indoor 3d scene understanding. International Conference on Computer Vision and Pattern Recognition (CVPR), 2025. 2
|
| 262 |
+
[38] Li Jiang, Hengshuang Zhao, Shaoshuai Shi, Shu Liu, ChiWing Fu, and Jiaya Jia. Pointgroup: Dual-set point grouping for 3d instance segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2
|
| 263 |
+
[39] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Leref: Language embedded radiance fields. In International Conference on Computer Vision (ICCV), 2023. 2, 4
|
| 264 |
+
[40] Sebastian Koch, Pedro Hermosilla, Narunas Vaskevicius, Mirco Colosi, and Timo Ropinski. Lang3dsg: Language-based contrastive pre-training for 3d scene graph prediction. In International Conference on 3d Vision (3dV), 2024. 1, 2
|
| 265 |
+
[41] Sebastian Koch, Narunas Vaskevicius, Mirco Colosi, Pedro Hermosilla, and Timo Ropinski. Open3design: Open-vocabulary 3d scene graphs from point clouds with queryable objects and open-set relationships. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 1, 2, 3, 6, 7, 8
|
| 266 |
+
[42] Loic Landrieu and Martin Simonovsky. Large-scale point cloud semantic segmentation with superpoint graphs. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 267 |
+
[43] Oliver Lemke, Zuria Bauer, René Zurbrugg, Marc Pollefeys, Francis Engelmann, and Hermann Blum. Spot-Compose: A framework for open-vocabulary object retrieval and drawer manipulation in point clouds. In International Conference on Robotics and Automation (ICRA), 2024. 8
|
| 268 |
+
[44] Qi Li, Kaichun Mo, Yanchao Yang, Hang Zhao, and Leonidas Guibas. IFR-Explore: Learning inter-object functional relationships in 3d indoor scenes. International Conference on Learning Representations (ICLR), 2022. 2
|
| 269 |
+
[45] Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. Pointconn: Convolution on x-transformed points. International Conference on Neural Information Processing Systems (NeurIPS), 2018. 2
|
| 270 |
+
|
| 271 |
+
[46] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In International Conference on Neural Information Processing Systems (NeurIPS), 2023. 4
|
| 272 |
+
[47] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024.
|
| 273 |
+
[48] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava next: Improved reasoning,OCR, and world knowledge, 2024.3,4
|
| 274 |
+
[49] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. European Conference on Computer Vision (ECCV), 2024. 3, 4
|
| 275 |
+
[50] Cewu Lu, Ranjay Krishna, Michael Bernstein, and Li Fei-Fei. Visual relationship detection with language priors. In European Conference on Computer Vision (ECCV), 2016. 6
|
| 276 |
+
[51] Yang Miao, Francis Engelmann, Olga Vysotska, Federico Tombari, Marc Pollefeys, and Daniel Béla Baráth. SceneGraphLoc: Cross-modal coarse visual localization on 3d scene graphs. In European Conference on Computer Vision (ECCV), 2024. 1
|
| 277 |
+
[52] Kaichun Mo, Yuzhe Qin, Fanbo Xiang, Hao Su, and Leonidas Guibas. O2o-afford: Annotation-free large-scale object-object affordance learning. In Conference on Robot Learning (CoRL), 2022. 2
|
| 278 |
+
[53] Tushar Nagarajan and Kristen Grauman. Learning affordance landscapes for interaction exploration in 3d environments. International Conference on Neural Information Processing Systems (NeurIPS), 2020. 2
|
| 279 |
+
[54] Tushar Nagarajan, Christoph Feichtenhofer, and Kristen Grauman. Grounded human-object interaction hotspots from video. In International Conference on Computer Vision (ICCV), 2019. 2
|
| 280 |
+
[55] Maria Parelli, Alexandros Delitzas, Nikolas Hars, Georgios Vlassis, Sotirios Anagnostidis, Gregor Bachmann, and Thomas Hofmann. CLIP-Guided Vision-Language Pre-Training for Question Answering in 3D Scenes. In International Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2023. 2
|
| 281 |
+
[56] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2
|
| 282 |
+
[57] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2
|
| 283 |
+
[58] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. International Conference on Neural Information Processing Systems (NeurIPS), 2017. 2
|
| 284 |
+
|
| 285 |
+
[59] Minghan Qin, Wanhua Li, Jiawei Zhou, Haoqian Wang, and Hanspeter Pfister. Langsplat: 3d language gaussian splatting. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2
|
| 286 |
+
[60] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), 2021. 2, 6
|
| 287 |
+
[61] Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. SayPlan: Grounding large language models using 3d scene graphs for scalable robot task planning. In Conference on Robot Learning (CoRL), 2023. 1
|
| 288 |
+
[62] Junha Roh, Karthik Desingh, Ali Farhadi, and Dieter Fox. Languagerefer: Spatial-language model for 3d visual grounding. In Conference on Robot Learning (CoRL), 2022. 2
|
| 289 |
+
[63] Antoni Rosinol, Arjun Gupta, Marcus Abate, Jingnan Shi, and Luca Carlone. 3d dynamic scene graphs: Actionable spatial perception with places, objects, and humans. Robotics, Science and Systems, 2020. 1, 2
|
| 290 |
+
[64] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. International Journal on Robotics Research (IJRR), 2021. 1, 2
|
| 291 |
+
[65] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In European Conference on Computer Vision (ECCV), 2022. 2
|
| 292 |
+
[66] Sayan Deb Sarkar, Ondrej Miksik, Marc Pollefeys, Daniel Barath, and Iro Armeni. SGAligner: 3d scene alignment with scene graphs. In International Conference on Computer Vision (ICCV), 2023. 1
|
| 293 |
+
[67] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. SuperGlue: Learning feature matching with graph neural networks. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6
|
| 294 |
+
[68] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-Motion Revisited. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 6
|
| 295 |
+
[69] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 6
|
| 296 |
+
[70] Jonas Schult, Francis Engelmann, Alexander Hermans, Or Litany, Siyu Tang, and Bastian Leibe. Mask3d: Mask transformer for 3d semantic instance segmentation. In International Conference on Robotics and Automation (ICRA), 2023. 1, 2
|
| 297 |
+
[71] Aleksandar Shtedritski, Christian Rupprecht, and Andrea Vedaldi. What does Clip Know About a Red Circle? Visual Prompt Engineering for VLMs. In International Conference on Computer Vision (ICCV), 2023. 4
|
| 298 |
+
|
| 299 |
+
[72] Tao Sun, Yan Hao, Shengyu Huang, Silvio Savarese, Konrad Schindler, Marc Pollefeys, and Iro Armeni. Nothing Stands Still: A Spatiotemporal Benchmark on 3D Point Cloud Registration Under Large Geometric and Temporal Change. ISPRS Journal of Photogrammetry and Remote Sensing, 2025. 2
|
| 300 |
+
[73] Ayca Takmaz, Elisabetta Fedele, Robert W Sumner, Marc Pollefeys, Federico Tombari, and Francis Engelmann. Openmask3d: Open-vocabulary 3d instance segmentation. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 2, 4
|
| 301 |
+
[74] Ayca Takmaz, Jonas Schult, Irem Kaftan, Mertcan Akçay, Bastian Leibe, Robert Sumner, Francis Engelmann, and Siyu Tang. 3D Segmentation of Humans in Point Clouds with Synthetic Data. In International Conference on Computer Vision (ICCV), 2023. 2
|
| 302 |
+
[75] Ayca Takmaz, Alexandros Delitzas, Robert W. Sumner, Francis Engelmann, Johanna Wald, and Federico Tombari. Search3D: Hierarchical Open-Vocabulary 3D Segmentation. IEEE Robotics and Automation Letters (RA-L), 2025. 2
|
| 303 |
+
[76] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J Guibas. Kpconv: Flexible and deformable convolution for point clouds. In International Conference on Computer Vision (ICCV), 2019. 2
|
| 304 |
+
[77] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, and Chang D Yoo. Softgroup for 3d instance segmentation on point clouds. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2
|
| 305 |
+
[78] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2, 3
|
| 306 |
+
[79] Ziqin Wang, Bowen Cheng, Lichen Zhao, Dong Xu, Yang Tang, and Lu Sheng. Vl-sat: Visual-linguistic semantics assisted training for 3d semantic scene graph prediction in point cloud. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2
|
| 307 |
+
[80] Silvan Weder, Francis Engelmann, Johannes L Schonberger, Akihito Seki, Marc Pollefeys, and Martin R Oswald. Alster: A Local Spatio-temporal Expert for Online 3D Semantic Reconstruction. IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2023. 2
|
| 308 |
+
[81] Silvan Weder, Hermann Blum, Francis Engelmann, and Marc Pollefeys. Labelmaker: Automatic semantic label generation from rgb-d trajectories. In International Conference on 3d Vision (3dV), 2024. 2
|
| 309 |
+
[82] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. International Conference on Neural Information Processing Systems (NeurIPS), 2022. 4
|
| 310 |
+
[83] Abdelrhman Werby, Chenguang Huang, Martin Büchner, Abhinav Valada, and Wolfram Burgard. Hierarchical open-vocabulary 3d scene graphs for language-grounded robot
|
| 311 |
+
|
| 312 |
+
navigation. In First Workshop on Vision-Language Models for Navigation and Manipulation at ICRA 2024, 2024. 1
|
| 313 |
+
[84] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. SceneGraphFusion: Incremental 3d scene graph prediction from rgb-d sequences. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1, 2
|
| 314 |
+
[85] Shun-Cheng Wu, Keisuke Tateno, Nassir Navab, and Federico Tombari. Incremental 3d semantic scene graph prediction from rgb sequences. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2023. 2
|
| 315 |
+
[86] Chao Xu, Yixin Chen, He Wang, Song-Chun Zhu, Yixin Zhu, and Siyuan Huang. Partafford: Part-level affordance discovery from 3d objects. European Conference on Computer Vision (ECCV) Workshops, 2022. 2
|
| 316 |
+
[87] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 6
|
| 317 |
+
[88] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In European Conference on Computer Vision (ECCV), 2018.
|
| 318 |
+
[89] Jingkang Yang, Yi Zhe Ang, Zujin Guo, Kaiyang Zhou, Wayne Zhang, and Ziwei Liu. Panoptic scene graph generation. In European Conference on Computer Vision (ECCV), 2022. 6
|
| 319 |
+
[90] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In International Conference on Computer Vision (ICCV), 2021. 2
|
| 320 |
+
[91] Yufei Ye, Abhinav Gupta, and Shubham Tulsiani. What's in your hands? 3d reconstruction of generic objects in hands. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2
|
| 321 |
+
[92] Yufei Ye, Abhinav Gupta, Kris Kitani, and Shubham Tulsiani. G-hop: Generative hand-object prior for interaction reconstruction and grasp synthesis. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2
|
| 322 |
+
[93] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In International Conference on Computer Vision (ICCV), 2023. 2, 5, 6
|
| 323 |
+
[94] Gonca Yilmaz, Songyou Peng, Marc Pollefeys, Francis Engelmann, and Hermann Blum. OpenDAS: OpenVocabulary Domain Adaptation for 2D and 3D Segmentation. arXiv preprint arXiv:2405.20141, 2024. 2
|
| 324 |
+
[95] Tomoya Yoshida, Shuhei Kurita, Taichi Nishimura, and Shinsuke Mori. Text-driven affordance learning from egocentric vision. arXiv preprint arXiv:2404.02523, 2024. 2
|
| 325 |
+
[96] Yuanwen Yue, Sabarinath Mahadevan, Jonas Schult, Francis Engelmann, Bastian Leibe, Konrad Schindler, and Theodora Kontogianni. Agile3d: Attention guided interactive multi-object 3d segmentation. International Conference on Learning Representations (ICLR), 2024. 2
|
| 326 |
+
[97] Guangyao Zhai, Evin Pinar Örnek, Shun-Cheng Wu, Yan Di, Federico Tombari, Nassir Navab, and Benjamin
|
| 327 |
+
|
| 328 |
+
Busam. Commonsscenes: Generating commonsense 3d indoor scenes with scene graphs. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 1, 2
|
| 329 |
+
[98] Wei Zhai, Hongchen Luo, Jing Zhang, Yang Cao, and Dacheng Tao. One-shot object affordance detection in the wild. International Journal on Computer Vision (IJCV), 2022. 2
|
| 330 |
+
[99] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2
|
| 331 |
+
[100] Chenyangguang Zhang, Yan Di, Ruida Zhang, Guangyao Zhai, Fabian Manhardt, Federico Tombari, and Xiangyang Ji. Ddf-ho: Hand-held object reconstruction via conditional directed distance field. International Conference on Neural Information Processing Systems (NeurIPS), 2023. 2
|
| 332 |
+
[101] Chenyangguang Zhang, Guanlong Jiao, Yan Di, Gu Wang, Ziqin Huang, Ruida Zhang, Fabian Manhardt, Bowen Fu, Federico Tombari, and Xiangyang Ji. Moho: Learning single-view hand-held object reconstruction with multiview occlusion-aware supervision. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2
|
| 333 |
+
[102] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. International Conference on Neural Information Processing Systems (NeurIPS), 2021. 2
|
| 334 |
+
[103] Yiming Zhang, ZeMing Gong, and Angel X Chang. Multi3drefer: Grounding text description to multiple 3d objects. In International Conference on Computer Vision (ICCV), 2023. 2
|
| 335 |
+
[104] Youcai Zhang, Xinyu Huang, Jinyu Ma, Zhaoyang Li, Zhaochuan Luo, Yanchun Xie, Yuzhuo Qin, Tong Luo, Yaqian Li, Shilong Liu, et al. Recognize anything: A strong image tagging model. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 3, 4
|
| 336 |
+
[105] Shijie Zhou, Haoran Chang, Sicheng Jiang, Zhiwen Fan, Zehao Zhu, Dejia Xu, Pradyumna Chari, Suya You, Zhangyang Wang, and Achuta Kadambi. Feature 3dgs: Supercharging 3d gaussian splattering to enable distilled feature fields. In International Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2
|
| 337 |
+
[106] Zijian Zhou, Zheng Zhu, Holger Caesar, and Miaojing Shi. Openpsg: Open-set panoptic scene graph generation via large multimodal models. European Conference on Computer Vision (ECCV), 2024. 6
|
| 338 |
+
[107] Xingxing Zuo, Pouya Samangouei, Yunwen Zhou, Yan Di, and Mingyang Li. Fmgs: Foundation model embedded 3d gaussian splatting for holistic 3d scene understanding. International Journal on Computer Vision (IJCV), 2024. 2
|
| 339 |
+
[108] René Zurbrügg, Yifan Liu, Francis Engelmann, Suryansh Kumar, Marco Hutter, Vaishakh Patil, and Fisher Yu. ICGNet: A Unified Approach for Instance-centric Grasping. In International Conference on Robotics and Automation (ICRA), 2024. 8
|
data/2025/2503_19xxx/2503.19199/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6bd86032f79f8995439415ce73f1c482d5f2a5bc89a4bd566c361235cf3f70c6
|
| 3 |
+
size 507555
|
data/2025/2503_19xxx/2503.19199/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19206/f960489b-01ba-4992-95b9-2b84c8e9e359_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ad3dd4d939178db19c7ad3a00d995265f96596109c98a79af3533a0b91c9d6b
|
| 3 |
+
size 4620803
|
data/2025/2503_19xxx/2503.19206/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19206/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:071b7dcf87b80ff8c3484d3381ee9bbf506d65e531195f5d3f3f1d3ef60b9619
|
| 3 |
+
size 7288522
|
data/2025/2503_19xxx/2503.19206/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19213/ec62378f-85c7-445a-9c9f-be2c1c024816_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dd20d57fc83aa8aa5bf078be4d1fb102b7bc71c7fdb985a3eb1f624cc6661ed7
|
| 3 |
+
size 337720
|
data/2025/2503_19xxx/2503.19213/full.md
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Survey of Large Language Model Agents for Question Answering
|
| 2 |
+
|
| 3 |
+
Murong Yue
|
| 4 |
+
Computer Science Department
|
| 5 |
+
George Mason University
|
| 6 |
+
Fairfax, VA-22030
|
| 7 |
+
myue@gmu.edu
|
| 8 |
+
|
| 9 |
+
Abstract—This paper surveys the development of large language model (LLM)-based agents for question answering (QA). Traditional agents face significant limitations, including substantial data requirements and difficulty in generalizing to new environments. LLM-based agents address these challenges by leveraging LLMs as their core reasoning engine. These agents achieve superior QA results compared to traditional QA pipelines and naive LLM QA systems by enabling interaction with external environments. We systematically review the design of LLM agents in the context of QA tasks, organizing our discussion across key stages: planning, question understanding, information retrieval, and answer generation. Additionally, this paper identifies ongoing challenges and explores future research directions to enhance the performance of LLM agent QA systems.
|
| 10 |
+
|
| 11 |
+
Index Terms—Question Answering, Large Language Model, Natural Language Processing
|
| 12 |
+
|
| 13 |
+
# I. INTRODUCTION
|
| 14 |
+
|
| 15 |
+
The concept of autonomous agents has long been recognized in artificial intelligence research. These agents can perceive their environment and act upon it autonomously, pursuing predetermined goals [1]. The rapid advancement of large language models (LLMs) has led to increased interest in LLM-based agents [2,3]. LLMs are neural networks comprising billions of parameters. Through training on vast amounts of text data, LLMs acquire a deep understanding of grammar, semantics, context, and world knowledge. This enables them to transform various natural language processing (NLP) tasks into end-to-end text generation problems, resulting in significant performance improvements across multiple domains. For LLM-based agents, we can take the perception sensor as the "eye" and textual representative action as the "hand". In this scenario, the LLM serves as the "brain" in building sophisticated agents, addressing the limitations of prior agents. Training prior agents requires substantial sample data and high costs in expert reward design. In contrast, LLM agents have broad world knowledge and demonstrate strong generalization capabilities to adapt to new tasks or environments. Besides, LLMs exhibit powerful reasoning skills due to their broad language understanding and comprehensive world knowledge, handling complex queries even without specific environmental training. Besides, they accept natural language input, offering flexibility, explainability, and user-friendliness.
|
| 16 |
+
|
| 17 |
+
Question answering (QA) has been a longstanding research focus in NLP and is a widely adopted application for LLM-
|
| 18 |
+
|
| 19 |
+
based agents. QA aims to provide correct answers to questions based on given context or knowledge, addressing human information needs [4]. It is worth noting that many NLP tasks can be framed in a QA format; for instance, a translation task can be posed as "Can you provide the translation of the following sentence". In this survey, we focus specifically on tasks where users have explicit information needs. While LLMs can directly answer questions, they face certain limitations. Firstly, LLMs may produce hallucinations, generating imprecise or incorrect answers, particularly when nuanced, domain-specific knowledge is required. This is especially problematic in complex fields such as legal, financial, or medical decision-making [5, 6]. Secondly, LLM's inference does not interact with external environments, such as databases for retrieving up-to-date information or tools (e.g., calculators, APIs) for obtaining more accurate answers. Besides, they cannot autonomously verify the correctness of their outputs within the environment.
|
| 20 |
+
|
| 21 |
+
LLM-based agents are widely used in QA to address these issues. The key distinction between LLM-based agents and standalone LLMs in QA tasks lies in the heuristic design of multiple modules. These modules guide the LLM in performing specific actions, such as planning, and enable interaction with external environments, including databases, tools, other trained models, and humans.
|
| 22 |
+
|
| 23 |
+
This paper presents a comprehensive survey of LLM agent design for QA tasks. We begin by providing the necessary preliminary knowledge to understand the survey. We then summarize current research on LLM agents in QA, organizing our review based on each stage of the QA process into planning, question understanding, information retrieval, answer generation, and follow-up interaction. For each stage, we discuss the motivation for introducing this stage and explore how LLM agents are designed to enhance the performance of each stage. Additionally, we identify various challenges in this field and discuss potential future research directions.
|
| 24 |
+
|
| 25 |
+
The research covered in this survey is primarily drawn from top-tier conferences and journals in the NLP field. Key venues include the Annual Meeting of the Association for Computational Linguistics (ACL), the Conference on Empirical Methods in Natural Language Processing (EMNLP), the North American Chapter of the Association for Computational Linguistics (NAACL), the International Conference on Learn
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
(B) Traditional QA pipeline
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
(C) LLM QA system
|
| 34 |
+
(D) LLM-based Agent QA system
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
Fig. 1. Overview of the naive agent, traditional QA pipeline, naive LLM QA system, and LLM-based agent QA system.
|
| 38 |
+
|
| 39 |
+
ing Representations (ICLR), and the Conference on Neural Information Processing Systems (NeurIPS), etc.
|
| 40 |
+
|
| 41 |
+
# II. PRELIMINARY
|
| 42 |
+
|
| 43 |
+
# A. Agent
|
| 44 |
+
|
| 45 |
+
An agent is defined as a computational entity capable of interacting with its environment to achieve specific goals. Figure 1 (A) presents an overview of a conceptual agent. The agent typically consists of:
|
| 46 |
+
|
| 47 |
+
1) State $S$ : refers to a specific configuration or condition of the agent and its environment at a given point in time, such as the agent's current position, the goal of the agent, etc.
|
| 48 |
+
|
| 49 |
+
2) Observation $O$ : the information the agent perceives from its environment through sensors.
|
| 50 |
+
3) Action $A$ : the specific decision or behavior the agent chooses to execute in the environment.
|
| 51 |
+
|
| 52 |
+
Let $S_{t}$ denote the agent's state at time $t$ . The agent's planning process is represented as:
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
A _ {t} = \pi_ {p} \left(S _ {t}\right) \tag {1}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
where $\pi_p$ is the planning policy function and $A_{t}$ is the action at time $t$ . The agent's observation $O_{t}$ is obtained by:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
O _ {t} = E \left(A _ {t}\right) \tag {2}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $E$ is the sensory processing function. The new agent state is then updated by:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
S _ {t + 1} = T \left(S _ {t}, A _ {t}, O _ {t}\right) \tag {3}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
where $T$ is the transition function defining how an agent's state transitions from one to another.
|
| 71 |
+
|
| 72 |
+
Naive agents were designed based on heuristic or rule-based approaches, capable of handling only explicitly programmed scenarios and lacking generalization abilities [7]. The advent of Reinforcement Learning (RL) based agents introduced a paradigm shift, enabling agents to learn from interaction with their environment through trial and error and feedback mechanisms in the form of rewards [8, 9].
|
| 73 |
+
|
| 74 |
+
# B. Question Answering
|
| 75 |
+
|
| 76 |
+
Question Answering (QA) is a task wherein a system automatically provides answers to questions posed by users. It serves as a crucial application across multiple domains, including search engines, customer support, and education [10].
|
| 77 |
+
|
| 78 |
+
Let $Q$ be the input question, $C$ be the given context, and $A = (a_{1}, a_{2}, \ldots, a_{T})$ be the answer sequence composed of $T$ tokens. $P(A|Q)$ is the probability of generating the answer given the question.
|
| 79 |
+
|
| 80 |
+
QA systems are generally constructed using three main approaches, categorized by the answer format:
|
| 81 |
+
|
| 82 |
+
1) Classification-based Methods: Given multiple possible answers $A_{1}, A_{2}, \ldots, A_{n}$ , the system is treated as a multi-class classifier:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
P \left(A _ {i} \mid Q\right) = \operatorname {c l a s s i f i e r} (Q, C; \theta) \tag {4}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $A_{i}$ is the predicted answer from the set of possible answers $A_{1}, A_{2}, \ldots, A_{n}$ .
|
| 89 |
+
2) Span-extraction Methods: These methods involve identifying a span of text in a given document or context that contains the answer [11]:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
P _ {s} = \operatorname {c l a s s i f i e r} \left(C _ {s}, Q, C; \theta\right), \quad P _ {e} = \operatorname {c l a s s i f i e r} \left(C _ {e}, Q, C; \theta\right) \tag {5}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $C_s$ and $C_e$ are the tokens of the start and end positions, and $P_s$ and $P_e$ are the probabilities of the answer start and end positions in the context $C$ .
|
| 96 |
+
|
| 97 |
+
3) Text-generation Methods: These models approach QA as a sequence generation task. Given a question, the model classifies all tokens:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
A = \arg \max _ {A} \prod_ {t = 1} ^ {T} P \left(a _ {t} \mid Q, C, a _ {1}, a _ {2}, \dots , a _ {t - 1}; \theta\right) \tag {6}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where the model generates each token $a_{t}$ based on the question $Q$ , content $C$ , and previously generated tokens $a_{1}, a_{2}, \ldots, a_{t-1}$ .
|
| 104 |
+
|
| 105 |
+
Early efforts in text generation for QA include neural sequence-to-sequence models, where an encoder maps the input question to a latent representation, and a decoder generates the answer [12].
|
| 106 |
+
|
| 107 |
+
# C. Traditional QA Pipeline
|
| 108 |
+
|
| 109 |
+
Figure 1 (B) illustrates the traditional QA pipeline. These systems relied on decomposing the problem-solving process into several steps, executed through a fixed pipeline. The process typically begins with query understanding, which involves performing syntactic analysis and semantic understanding of the user's query [13]. This stage includes part-of-speech tagging, dependency parsing, intent classification, and slot extraction. Subsequently, the system employs information retrieval techniques to search for potentially relevant documents or sources of answers from a pre-established database, such as knowledge graphs, databases, or documents[14]. Finally, the system selects the most probable answer from candidate answers, extracts the answer from the context, or generates the answer and presents it to the user as the final output.
|
| 110 |
+
|
| 111 |
+
The primary limitations of this pipeline approach are threefold. First, each sub-module required training a specialized model, and these models lacked world knowledge, rendering them unable to handle out-of-domain questions. Second, the pipeline was static, incapable of dynamically planning steps based on the question. Third, for open-domain questions, a vast corpus was required for retrieval, and the system relied heavily on the efficacy of the retrievers.
|
| 112 |
+
|
| 113 |
+
# D. LLM-based QA
|
| 114 |
+
|
| 115 |
+
As depicted in Figure 1 (C), LLM-based QA utilizes pretrained LLMs to comprehend and generate answers, often through fine-tuning on specific datasets or by employing few-shot prompting techniques. This approach has redefined QA paradigms.
|
| 116 |
+
|
| 117 |
+
Mainstream pre-trained LLMs can be broadly categorized into two types: Masked LLMs (e.g., BERT [15]), which are employed in span extract QA tasks, and Autoregressive LLMs (e.g., GPT [16]), which are used in text generation and adhere to the formula presented in equation 6. In this survey, we focus more on the latter.
|
| 118 |
+
|
| 119 |
+
In contrast to traditional QA systems, LLMs possess the capability to generate coherent and contextually relevant answers, even for questions they were not explicitly trained on. Furthermore, they can perform open-domain QA directly,
|
| 120 |
+
|
| 121 |
+
Algorithm 1 LLM Agent Question Answering
|
| 122 |
+
1: Initialize: Memory $M \leftarrow Q$ ▷ Initialize memory with the question $Q$
|
| 123 |
+
2: for each time step $t$ do
|
| 124 |
+
3: $A_{t} \leftarrow \pi_{p}(M)$ ▷ Action planning module selects action based on memory
|
| 125 |
+
4: if $A_{t}$ interacts with external environment then
|
| 126 |
+
5: $O_{t} \leftarrow F_{Env}(A_{t})$ ▷ Environment feedback function returns observation
|
| 127 |
+
6: $M \leftarrow M \parallel (A_{t}, O_{t})$ ▷ Thinking module processes action and observation to get texts of current state
|
| 128 |
+
7: else
|
| 129 |
+
8: $T_{t} \leftarrow F_{Think}(A_{t})$ ▷ Thinking module returns feedback
|
| 130 |
+
9: $M \leftarrow M \parallel (A_{t}, T_{t})$ ▷ Thinking module processes action to get texts of current state
|
| 131 |
+
10: end if
|
| 132 |
+
11: end for
|
| 133 |
+
|
| 134 |
+
leveraging their extensive knowledge encoded from large-scale pre-training [17].
|
| 135 |
+
|
| 136 |
+
Despite their strengths, LLM-based QA models face several limitations. A significant issue is the phenomenon of hallucination, wherein LLMs may generate factually incorrect but plausible-sounding answers [18]. Additionally, LLMs are constrained by their inability to consult external databases, APIs, or other dynamic sources during inference [19]. Once trained, the model's parameters are fixed, necessitating complete reliance on internalized knowledge to generate answers.
|
| 137 |
+
|
| 138 |
+
# E. LLM-based QA Agent
|
| 139 |
+
|
| 140 |
+
To address the limitations of traditional QA pipelines and naive LLM QA systems, LLM-based agents have emerged as a superior solution, as illustrated in Figure 1 (D).
|
| 141 |
+
|
| 142 |
+
The architecture of an LLM-based agent typically comprises three primary components: memory $(M)$ , which aggregates all information the agent possesses, including the initial question $Q$ , textual results for understanding the question, and retrieved information; a planning module $(\pi_p)$ , which determines the next action to take by referring to the LLM with planning prompts; and an inner-thinking module $(\pi_t)$ , which executes inner thinking actions by referring to the LLM with thinking prompts. The environment can be the physical world, the digital world (e.g., APIs, web browsers), or even human interactions.
|
| 143 |
+
|
| 144 |
+
LLM agents dynamically answer the question via several steps. At step $t$ , $A_{t}$ represents the action. The action space of an agent can be classified into external and internal actions, depending on whether interaction with the environment is involved. External actions enable the agent to engage with the environment, gather an observation $O_{t}$ , and store it in its memory $M$ . In contrast, internal actions do not involve external interaction; instead, they modify the agent's memory $M$ based purely on its intrinsic reasoning.
|
| 145 |
+
|
| 146 |
+
The process is presented in Algorithm 1. The memory $M$ is initialized with the question $Q$ . For time step $t$ , the action $A_{t}$ is determined based on the planner $\pi_{p}$ :
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
A _ {t} = \pi_ {p} (M) \tag {7}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
If $A_{t}$ is an action for interacting with the external environment, $O_{t}$ is obtained by:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
O _ {t} = E \left(A _ {t}\right) \tag {8}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $E$ is the environment feedback function. Subsequently, the memory is updated as:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
M = M \parallel (A _ {t}, O _ {t}) \tag {9}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where $\parallel$ denotes concatenation.
|
| 165 |
+
|
| 166 |
+
If $A_{t}$ is an action for thinking, the thought $T_{t}$ is obtained by:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
T _ {t} = \pi_ {t} \left(A _ {t}\right) \tag {10}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
After obtaining the state, memory is updated as:
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
M = M \parallel (A _ {t}, T _ {t}) \tag {11}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
The LLM-based QA agent can be considered as a specialized instance of the conceptual agent. In this framework, the planning policy network for the LLM-based agent is typically the LLM itself. The agent state from the general agent is analogous to the memory $M$ in the LLM-based agent, and the transition function $T$ in equation 3 is equivalent to the concatenation operation. A key distinction lies in the action space of the LLM-based agent, which encompasses not only interactions with the environment but also the activation of inner thinking processes. Using LLM as the planner and inner thinker expanded capability, endows the LLM-based agent with superior generalization and reasoning abilities compared to other agent types, such as those based on reinforcement learning.
|
| 179 |
+
|
| 180 |
+
# III. AREA TAXONOMY
|
| 181 |
+
|
| 182 |
+
As we discussed in Section II-C, the question-answering process can be broken down into: question understanding, information retrieval, and answer generation. Additionally, planning is the unique feature of the LLM agent and is the inner ability of the LLM agent where the system determines the best strategy to answer the question, such as how to understand the query, whether to retrieve information directly or infer an answer through reasoning, etc. Therefore, we follow these stages to organize our survey. Besides, a QA system may include follow-up interaction, which allows users to clarify their queries or ask related follow-up questions, making the interaction more dynamic and user-centric [20]. We follow this taxonomy as shown in Figure 2 to present the cutting-edge technique used in question answering in the LLM era.
|
| 183 |
+
|
| 184 |
+
# IV. TAXONOMY-BASED SURVEY
|
| 185 |
+
|
| 186 |
+
# A. Datasets
|
| 187 |
+
|
| 188 |
+
The rapid advancement of LLMs has significantly impacted QA tasks, prompting the creation of diverse and challenging datasets. These datasets can be broadly categorized into two main types: closed-domain/context-based QA and open-domain QA. The distinction between closed-domain/context-based QA and open-domain QA lies in whether the scope of knowledge is constrained to specific documents or not. In closed-domain QA, the model's reference is strictly limited to a predefined context, such as a passage, document, or domain-specific corpus, provided alongside the question. The model is expected to derive the answer solely from this context. External knowledge or information beyond the provided context is neither required nor considered. In contrast, open-domain QA involves no such constraints on the source of knowledge. The model is tasked with answering questions by retrieving relevant information from a vast, open-ended corpus, such as the internet, or from the inner knowledge acquired in the pretraining stage. This approach demands the ability to handle the queries with no specific document or passage supplied.
|
| 189 |
+
|
| 190 |
+
a) Close-domain datasets: Closed-domain QA datasets are typically designed to assess a system's ability to answer questions within specific documents. The dataset Squad [11] and DROP [21] is designed to assess a system's ability to perform comprehension over text passages. It requires models to handle complex documents and extract relevant information to answer questions. DROP also involves performing arithmetic operations or comparisons based on the extracted information. HotpotQA [22], is also designed for reading comprehension but places more emphasis on multi-hop reasoning. Here, models must answer questions by analyzing information from multiple paragraphs, often requiring them to perform intermediate reasoning steps to connect evidence across different contexts. This encourages not only factual recall but also the ability to perform reason over the extracted facts. FinQA [6] is a dataset specifically crafted for financial reasoning. It contains questions based on financial reports and documents, requiring models to not only extract information but also understand and reason about financial concepts, often involving numeric and arithmetic reasoning.
|
| 191 |
+
|
| 192 |
+
b) Open-domain QA datasets: In contrast, open-domain QA datasets do not provide explicit references to specific documents or passages from which the answer must be derived. Instead, these datasets cover a broader range of topics and test a wide variety of reasoning skills, often requiring the model to retrieve or infer information from vast, unstructured knowledge sources or the knowledge stored in its parameters. In the LLM era, open-domain QA datasets become more important as it's a more natural way for humans to interact with the machine.
|
| 193 |
+
|
| 194 |
+
Factual question answering in open-domain QA is exemplified by datasets such as StrategyQA [23], which requires models to recall the information required and answer yes/no questions by employing multi-step reasoning to determine
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Fig. 2. Area Taxonomy of LLM-based QA agent.
|
| 198 |
+
|
| 199 |
+
whether the claim is correct. Another dataset, ASQA [24], focuses on answering ambiguous questions by generating multiple possible interpretations and corresponding answers. Meanwhile, ELI5 [25] is designed for answering long-form, open-ended questions posed by non-experts, often requiring models to provide detailed explanations on a wide array of topics in a manner that is both informative and accessible to lay audiences.
|
| 200 |
+
|
| 201 |
+
When it comes to evaluating mathematical reasoning, several challenging datasets are widely used. GSM8k [26] tests the ability to solve grade-school-level math problems, emphasizing arithmetic and problem-solving skills. For more advanced mathematical reasoning, MATH [27] and TheoremQA [28] datasets present high-school-level and university-level math problems, respectively, covering topics like algebra, calculus, geometry, etc. Additionally, Olympic Math [29] introduces a suite of competition-level problems that challenge models to solve complex and creative mathematical puzzles, often requiring deep mathematical insight.
|
| 202 |
+
|
| 203 |
+
Symbolic reasoning capabilities are tested using datasets like BBH [30] (Big Bench Hard), which includes a variety of difficult reasoning tasks such as pattern recognition, logic puzzles, and algorithmic reasoning. Another dataset, Folio [31], is specifically designed to assess the symbolic reasoning abilities of models through a range of formal logic and symbolic manipulation tasks.
|
| 204 |
+
|
| 205 |
+
For evaluating knowledge-intensive reasoning across multiple domains, datasets such as MMLU [32] are utilized. MMLU consists of questions from over 50 different fields, including history, physics, and law, and requires models to demonstrate broad knowledge and reasoning across both the humanities and sciences. Similarly, $GPQA$ [33] assesses the model's ability to answer open-ended graduate-student level questions across various domains, often requiring retrieval of specific scientific facts. WikiQA [34] focuses on open-domain
|
| 206 |
+
|
| 207 |
+
question answering based on Wikipedia data, where models are tasked with retrieving relevant information from Wikipedia articles to answer a diverse set of questions.
|
| 208 |
+
|
| 209 |
+
Lastly, conditional reasoning is explored through datasets like IFQA [35], where models must reason conditionally over conditions that may be counterfactual. The model is required to understand hypothetical or counterfactual scenarios and derive correct answers based on given conditions or changes in context.
|
| 210 |
+
|
| 211 |
+
# B. Planning
|
| 212 |
+
|
| 213 |
+
Planning is a crucial component in autonomous systems and is key to enabling agents to take deliberate actions. It refers to the process by which an agent formulates a sequence of intermediate steps or actions that lead to the achievement of a final goal or answer. There are two primary paradigms:
|
| 214 |
+
|
| 215 |
+
a) Prompting-based Planning: In this approach, the LLM is guided by well-formulated instructions, leveraging its latent knowledge to make decisions. By prompting the LLM to consider intermediate actions and reasoning steps, the model can effectively answer the question. ReAct [36] prompts the LLM to not only think about the next action it should take but also the content of that action. For instance, if the next step involves retrieving information, the model is asked both what action to take (e.g., searching) and what specific content it should search for. This approach demonstrated the potential of using prompting as a mechanism to guide the LLM's decision-making and planning capabilities. ReAct was one of the first works to show that prompting LLMs can effectively make a plan based on the current situation and take a series of actions to better answer the question. Think on Graph [37] focuses on planning of knowledge-graph based QA. In this approach, the LLM is prompted to make decisions about whether it should continue exploring nodes in a knowledge graph to gather additional information, or
|
| 216 |
+
|
| 217 |
+
whether it has enough data to answer the question at hand. This strategy enables the LLM to iteratively plan its search through the graph, making dynamic decisions about when to stop gathering information and proceed with answering the question, thus improving its reasoning capabilities over structured data. Similarly, Active Retriever [38] highlights the importance of continually gathering information across multiple reasoning steps. This approach emphasizes that a single round of retrieval might not be sufficient for answering complex questions, and therefore prompts the LLM to plan multiple retrievals when necessary. The model is prompted to assess the completeness of its current information and to decide whether further retrieval is needed before attempting to answer the question. By prompting the LLM to actively plan repeated rounds of information gathering, Active Retriever ensures that the model remains flexible and can adjust its strategy as it progresses through the task. Agentverse [39] extends this idea of planning by prompting LLMs to decide which expert models or agents to involve in answering the question during the decision-making process. In this approach, the LLM is tasked with choosing between various specialized models or retrieval systems, based on the nature of the question. For example, the LLM might decide that a specific expert model is better suited to retrieve legal or financial information.
|
| 218 |
+
|
| 219 |
+
Collectively, these works demonstrate the effectiveness of prompting LLMs to make the plan. However, despite the success of these methods, they heavily rely on the careful design of heuristic prompts. The effectiveness of the LLM's planning ability depends significantly on how well the instructions are formulated and how similar the in-context demonstrations, i.e., the examples provided within the prompt, are provided. This dependence on prompt design poses a challenge for generalization. Because each prompt and set of demonstrations is often tailored to a specific task or domain, the LLM may struggle to transfer its planning abilities to new, unseen contexts.
|
| 220 |
+
|
| 221 |
+
b) Tuning-based Planning: In these approaches, the LLM can learn from incorrect action trajectories and refine its strategies through trial and error, improving its ability to solve tasks autonomously. One approach is FireAct [40], which fine-tunes LLMs using action trajectories generated from mutihop QA tasks. This method capitalizes on the ReAct-style framework, creating multiple potential action trajectories for solving a task. During training, GPT-4 is prompted to generate these action trajectories. The correct action trajectories are then collected and used to fine-tune the planner, allowing it to learn from past attempts and gradually improve its decision-making processes. The Learning from Failure approach [41] suggests that using only the correct trajectories, while discarding failed attempts, leads to significant wastage of valuable data. This approach recognizes that the comparison between successful and failed trajectories can offer crucial insights for fine-tuning the planner. Instead of ignoring failed trajectories, Learning from Failure proposes incorporating them into the training process. Specifically, the method includes positive examples with a standard prompt and negative examples with a special
|
| 222 |
+
|
| 223 |
+
prompt that indicates the case was incorrect. By learning from both success and failure, the model can better understand why certain actions lead to failure and how they can be avoided in future tasks. During inference, only the normal prompt is provided, but the planner has already been fine-tuned to understand the difference between successful and unsuccessful trajectories. This method enhances the model's ability to generalize from both types of experiences, offering a more holistic approach to learning from trial and error. While many prior works are limited to a narrow range of tasks, AgentGen [42] aims to synthesize planning paths for a much more diverse set of tasks, each conditioned on specific environments. AgentGen first constructs multiple seed environments. The LLM is then prompted to alter these environments, for example by "adding more constraints". Once the modified environment is generated, the LLM proceeds to collect trajectories based on the new environment. This allows the model to explore various ways of handling different types of environments, significantly expanding its planning capabilities beyond the narrow focus of prior methods.
|
| 224 |
+
|
| 225 |
+
The primary advantage of these tuning-based planning approaches is their ability to learn from vast amounts of training data, including both successful and failed attempts. However, these methods also have notable limitations. One major challenge is that they rely heavily on searching through multiple potential trajectories to identify the best planning path. This reliance on search-based methods can limit the scalability of the approach. Additionally, the fine-tuning process itself, while effective at improving task-specific performance, can negatively impact the model's ability to generalize to new, unseen tasks, posing a trade-off between task-specific optimization and broader generalization.
|
| 226 |
+
|
| 227 |
+
# C. Question Understanding
|
| 228 |
+
|
| 229 |
+
The question-understanding process requires extracting and comprehending information from the user's query and making it easier for the machine to understand. Users' questions can be ambiguous or complex, so multiple techniques are used to help the machine process the question to make the answer easier. Traditionally, separate models were trained specifically for tasks like slot tagging and intent understanding [43]. As LLMs show the inherent ability to handle complex linguistic structures, the research made them useful for performing question understanding without the need for task-specific models.
|
| 230 |
+
|
| 231 |
+
a) Identifying Slots: Slot identification focuses on recognizing specific entities, variables, or attributes within a query and categorizing them based on predefined types, such as names, dates, locations, or specialized terms. This process serves as a bridge between unstructured natural language input and structured data representations, enabling systems to map user queries into a structured format that can be processed. An example of the slot identifying of the LLM agent is the ChatLaw [5], a legal consultation system that utilizes LLMs to identify and cluster legal entities from consultation questions. For example, when a user asks a legal question such as "What are the penalties for breach of contract in California?", the
|
| 232 |
+
|
| 233 |
+
system extracts key entities like "California" as location. It serves as a basic step of the question understanding.
|
| 234 |
+
|
| 235 |
+
b) Query Expansion: Query expansion enhances the retrieval of relevant information by augmenting the user's original query with additional terms or phrases. These additional terms can include synonyms, related concepts, or more specific details that are inferred from the context of the search or question [44]. In many cases, users' initial queries may lack the precision needed to return the most relevant results. By expanding the query with terms that are semantically related to the original input, query expansion helps to mitigate these issues. For example, if a user queries "car insurance claims," a query expansion process might add terms like "vehicle insurance," "auto claims," or "accident report" to improve the retrieval of documents that might not explicitly match the user's initial input but are still relevant to the topic [45].
|
| 236 |
+
|
| 237 |
+
An approach utilizing LLMs for query expansion is HyQE [46], which prompts LLMs to generate multiple hypothetical documents that act as expansions for the original query. The key insight behind HyQE is that hypothetical documents, generated by LLMs, have a higher probability of containing the necessary keywords that are likely to be present in relevant answer documents. Another notable approach is Query2CoT [47]. It first breaks down complex queries into step-by-step sub-questions. The LLM is instructed to identify the key keywords associated with each sub-question. By decomposing the query in this way, Query2CoT enables the retrieval system to focus on different components of the query in isolation, improving precision in identifying relevant documents. Another innovative query expansion method is Step-back reformulation [48], which reformulates complex reasoning questions into higher-level concept questions. This method focuses on simplifying reasoning-intensive queries by stepping back from the detailed question and focusing on a broader conceptual understanding. For example, given the specific question "Estella Leopold went to which school between August 1954 and November 1954?", a step-back reformulation might simplify this into the higher-level question "What was Estella Leopold's education history?". By expanding the query in this way, the system broadens its scope of inquiry, allowing for the retrieval of more general information that might still provide the answer.
|
| 238 |
+
|
| 239 |
+
c) Query Reformulation: Another effective approach to handling ambiguity or vagueness in user queries is query reformulation. This technique involves rephrasing or simplifying. A prominent method for query reformulation is Rephrase and Response [49], which designs specific prompts to instruct LLM to rewrite questions for improved clarity. In this approach, the LLM is prompted to rephrase the original query in a more structured or precise manner, helping to clarify ambiguities and improve the alignment between the query and the retrieved information. In addition to prompt-based methods, other approaches have explored fine-tuning LLMs specifically for the task of query rewriting [50, 51]. The authors introduce methods where LLMs are fine-tuned to rewrite queries into multiple alternative versions. The system
|
| 240 |
+
|
| 241 |
+
first generates multiple reformulations of the original query and then evaluates which of these reformulated queries leads to the most accurate answer in a downstream pipeline. In these methods, the correctness of the final answer acts as a reward signal to guide the selection of the best query rewrite. Once the best-performing query is identified, the system uses techniques like Direct Preference Optimization (DPO) [52] to fine-tune the LLM, training it to generate improved query rewrites in future interactions.
|
| 242 |
+
|
| 243 |
+
# D. Information Retrieval
|
| 244 |
+
|
| 245 |
+
The information retrieval component in LLM-based QA agents is pivotal in answering knowledge-intensive questions for extracting relevant knowledge from a vast corpus or external knowledge sources. Information retrieval refers to the process of identifying and ranking documents, passages, or snippets that may contain the necessary information to answer a given question. This process typically follows a retrieving and ranking paradigm:
|
| 246 |
+
|
| 247 |
+
a) Retrieval: Retrieval involves fetching candidate documents or passages using sparse or dense retrieval techniques. Sparse methods, like BM25 [4], use term frequency-inverse document frequency (TF-IDF) based algorithms. However, sparse methods often struggle with semantic mismatches where the query and document might have a similar meaning but use different terms.
|
| 248 |
+
|
| 249 |
+
Dense retrieval methods learn vector representations for queries and documents, embedding them in a shared semantic space. Once the query $q$ and document $d$ are embedded into vectors, we compute the similarity between the two using a distance metric and retrieve top-K documents. The objective of dense retrieval can be formulated using a contrastive learning objective:
|
| 250 |
+
|
| 251 |
+
$$
|
| 252 |
+
\mathcal {L} = \log \frac {e ^ {s i m \left(q _ {i} , d _ {i} ^ {+}\right)}}{e ^ {s i m \left(q _ {i} , d _ {i} ^ {+}\right)} + \sum_ {j = 1} ^ {N} e ^ {s i m \left(q _ {i} , d _ {j} ^ {-}\right)}} \tag {12}
|
| 253 |
+
$$
|
| 254 |
+
|
| 255 |
+
where $d_i^+$ is the positive sample, $d_j^-$ are negative samples, and $N$ is the number of negative samples [53].
|
| 256 |
+
|
| 257 |
+
b) **Ranking:** Although the retrieved documents can help the LLM agent in solving questions with external knowledge, sometimes it may hurt the performance while providing irrelevant documents when answering [54]. Besides, the number of the input tokens of LLM is limited. Therefore, it is important to get the most relevant information after retrieval. Ranking typically follows retrieval, where an additional model, such as the LLM itself or a tuned LLM, assigns scores to the retrieved candidates, refining their relevance. For a given query $q$ and retrieved documents $d_{1}, d_{2}, \ldots, d_{k}$ , the ranking model produces a relevance score $r(q, d_{i})$ for each document. The documents are then ordered by their scores:
|
| 258 |
+
|
| 259 |
+
$$
|
| 260 |
+
r (q, d _ {i}) = \operatorname {R a n k} \operatorname {M o d e l} (q, d _ {i}) \tag {13}
|
| 261 |
+
$$
|
| 262 |
+
|
| 263 |
+
Recent research, including studies by Ma et al. [55] and Zhuang et al. [56], highlights that while LLMs may not excel as document retrievers, they demonstrate remarkable
|
| 264 |
+
|
| 265 |
+
capabilities when employed as re-rankers of previously retrieved documents. The evaluation compared the performance of LLM-based rankers against traditional rankers. LLMs show significant strengths in re-ranking retrieved documents. Since LLMs possess a powerful ability to comprehend and analyze the deeper semantic meanings within documents, they can assign relevance scores that align more closely with the user's intent or query.
|
| 266 |
+
|
| 267 |
+
An example of an LLM-based system that leverages this re-ranking ability is Haystack [57]. In Haystack, LLMs are prompted to assess the relevance of a set of documents to a given query. These scores are then used to re-rank the retrieved documents, ensuring that the most semantically relevant documents appear higher in the list. Another approach is Self-RAG [58]. In Self-RAG, a critical LLM is fine-tuned to evaluate not only the relevance of retrieved documents but also their usefulness in contributing to the final answer. One of the key challenges in retrieval-augmented generation systems is determining when to trust the LLM's internal knowledge versus relying on external documents. The critical LLM in Self-RAG addresses this issue by distinguishing between documents that provide novel information and those that simply repeat knowledge that the LLM already possesses. For example, the critical LLM will be in favor of documents that add new or complementary information.
|
| 268 |
+
|
| 269 |
+
c) Compression and Selection: Compression and selection techniques summarize long documents and select the most relevant passages. The key insight behind this technique is that not all portions of a document contribute equally to the final output. These techniques aim to preserve the essential content while reducing the length of the document. The compression model maps a document $D$ to a compressed representation $C(D)$ :
|
| 270 |
+
|
| 271 |
+
$$
|
| 272 |
+
C (D) = \text {C o m p r e s s i o n} D) \tag {14}
|
| 273 |
+
$$
|
| 274 |
+
|
| 275 |
+
LLMLingua [59] introduces a novel coarse-to-fine, step-by-step compression methodology designed specifically to handle long prompts. LLMLingua first applies a coarse-grained compression step to reduce the input size by removing extraneous or low-importance information. This is followed by a fine-grained process that further refines the compressed input, ensuring that essential semantic content is preserved. By progressively compressing the prompt in this manner, LLMLingua manages to reduce the input length significantly while retaining the necessary information to maintain the model's performance. RRecomp [60] fine-tunes LLMs as both extractive and abstractive compressors. The extractive compression phase involves identifying and retaining the most important sentences or phrases, which ensures that critical details are preserved. Following the extractive phase, RRecomp applies an abstractive compression step, where the remaining content is summarized or rephrased into a more concise form. This hybrid model of compression allows RRecomp to balance detail retention and brevity.
|
| 276 |
+
|
| 277 |
+
# E. Answer Generation
|
| 278 |
+
|
| 279 |
+
Answer generation synthesizes relevant information to produce a response to the given query. Several methods can enhance this process in LLM-based agents:
|
| 280 |
+
|
| 281 |
+
a) Tool-augmented Generation: This approach allows LLMs to interact with external tools, such as calculators or code interpreters, to augment their reasoning capabilities. The Program-of-Thought (PoT) approach [61, 62] focuses on using LLMs to generate executable Python code as part of the reasoning process. Instead of relying solely on the LLM to generate answers directly, PoT leverages the code interpreter to get the final answer. The LLM generates Python code as intermediate steps, which can be executed in a code interpreter, and the results are returned as the final answer. The advantage of this approach is that it allows LLMs to handle problems that require both complex reasoning and precise computation.
|
| 282 |
+
|
| 283 |
+
HuggingGPT [63] and OpenAGI [64] extend this concept further by integrating domain-specific external models as tools for LLMs. These systems prompt the LLM to recognize tasks that require specialized knowledge beyond its internal capacity and then invoke external models to handle those tasks. For instance, in an image-related task, HuggingGPT can call upon a pre-trained image segmentation model to analyze an image, extract relevant features, or make predictions, which the LLM then incorporates into its reasoning process to produce a final answer. Similarly, OpenAGI acts as an orchestrator that connects LLMs to various task-specific models—whether for processing audio, images, or other modalities—allowing the LLM to integrate multimodal information into its responses. This approach enables the LLM to work alongside specialized AI models, thereby enhancing its ability to answer questions or solve problems that extend beyond the textual or conceptual knowledge encoded in its pre-trained weights.
|
| 284 |
+
|
| 285 |
+
The Binding approach [65,66] takes this integration of external tools one step further by using a hybrid system of natural language generation and targeted computational tools. In Binding, the LLM is first prompted to generate a preliminary solution to a problem in the form of a natural language skeleton. However, instead of producing a complete solution, the LLM intentionally leaves certain parts of the answer blank as placeholders for information that will be calculated or filled in by external tools. For example, in solving a mathematical word problem, the LLM might generate the structure of the solution in natural language, outlining the steps required to solve the problem, but leaving placeholders for specific numerical calculations. These masked tokens are then filled in using external tools like a calculator, which can compute the precise values needed. Binding improves the overall reliability of LLM-generated answers by ensuring that numerical or factual inaccuracies are minimized through the use of dedicated external computational systems.
|
| 286 |
+
|
| 287 |
+
b) Prompt-Enhanced Generation: Various prompting techniques can improve answer generation.
|
| 288 |
+
|
| 289 |
+
The Chain-of-Thought (CoT) approach [67] is to prompt LLMs to generate step-by-step intermediate reasoning steps when answering complex questions, rather than providing
|
| 290 |
+
|
| 291 |
+
direct answers in a single pass. This structured reasoning process allows the model to break down the solving into smaller, manageable steps. One of the advancements in CoT is the introduction of question decomposition techniques. Zhou et al. [68] propose the Least-to-Most prompting strategy, where complex questions are explicitly decomposed into a series of simpler sub-questions that can be tackled in sequence. Another notable refinement is the concept of self-verification [69]. Self-verification leverages the model's own reasoning to verify the correctness of its intermediate steps. After generating an initial answer, the LLM is prompted to re-evaluate its reasoning by checking the final answer for logical consistency and factual accuracy.
|
| 292 |
+
|
| 293 |
+
Recent approaches optimize the prompting with the training data. Self-Discovery [70] aims to automate the discovery of the composition of optimal reasoning strategies. In SelfDiscovery, the LLM is trained to explore different ways of approaching the problem, experimenting with diverse prompts and reasoning strategies. Over time, the model learns which strategies lead to more successful outcomes and adjusts its behavior accordingly. The PromptAgent [71] builds on these ideas by integrating automatic prompt optimization into the reasoning process. PromptAgent treats prompt construction as an adaptive, multi-step process where the LLM itself plays a central role in refining and improving the prompt structure. PromptAgent continuously updates the prompt based on the LLM's performance during question answering.
|
| 294 |
+
|
| 295 |
+
# F. Follow-up Interaction
|
| 296 |
+
|
| 297 |
+
Follow-up interaction maintains engagement and ensures clarity during conversations. Unlike traditional QA systems, LLM agents engage in multi-turn conversations, refining answers based on user feedback [72].
|
| 298 |
+
|
| 299 |
+
a) Error Resolution: Follow-up interaction allows models to clarify misunderstandings or request additional details. Schick et al. [73] explore using human feedback to revise creative writing, while Yan et al. [74] apply human feedback to refine semantic parsing results.
|
| 300 |
+
b) Sequential Question Answering: This involves maintaining context across multiple question-answering processes [75]. LLMs can engage in multi-turn conversations after instruction tuning and reinforcement learning from human feedback [76]. ChatQA [77] further enhances this ability through context-enhanced instruction tuning.
|
| 301 |
+
|
| 302 |
+
# V. OPEN PROBLEM
|
| 303 |
+
|
| 304 |
+
As LLM QA agents grow more powerful, tasks that once seemed challenging, such as generating coherent multi-turn conversations or solving complex math problems, have now become more manageable. However, it is still far from perfect, and new challenges also arise.
|
| 305 |
+
|
| 306 |
+
# A. Challenging and Practical Benchmarking
|
| 307 |
+
|
| 308 |
+
As LLM agents show great performance improvement, the challenge of benchmarking them becomes increasingly complex. It includes multiple aspects:
|
| 309 |
+
|
| 310 |
+
a) Fine-grained Answer Generation Process Evaluation: Evaluating free-form answers remains a major challenge for existing LLM-based QA system benchmarks. Many benchmarks still rely on multiple-choice formats or coarse-grained metrics like ROGUE [78] and BERTScore [79], which focus solely on the final answer. Such evaluations overlook the reasoning process that leads to the answer, limiting their granularity. With the increased adoption of techniques like CoT [67], future benchmarks need to incorporate more fine-grained evaluation mechanisms that assess not only the final answer but also the underlying reasoning process, ensuring both the correctness and coherence of the thought process [80].
|
| 311 |
+
|
| 312 |
+
b) Hard-to-judge Question Evaluation: Some questions are inherently difficult to assess, unlike games such as Go, where the criteria for success are clear. These questions can be categorized into two types. First, those with an objectively correct answer but where verifying correctness is difficult, such as complex math problems that may take even experts several hours or days to assess. Second, open-ended questions, such as "How to improve a QA system?" where no absolute answer exists. Developing benchmarks that effectively evaluate LLM agents' performance on such questions is crucial to understanding their path toward achieving human-level intelligence.
|
| 313 |
+
|
| 314 |
+
c) Up-to-date Evaluation: A pressing challenge is maintaining the relevance and fairness of benchmarks over time. As LLMs continue to learn from vast and constantly evolving internet data, preventing data leakage and ensuring fair comparisons become essential. First, it is necessary to protect the integrity of the dataset's content, such as test questions and answers. A possible solution would be to develop a benchmark that dynamically updates itself [81]. Second, benchmarks should protect the structure and format of datasets, as using synthetic data or hand-labeled examples to manipulate leaderboards [82] can lead to overfitting. Currently, there are few effective mechanisms to prevent this, making the development of more credible and time-resilient benchmarks a valuable research direction.
|
| 315 |
+
|
| 316 |
+
# B. Hallucination and Calibration
|
| 317 |
+
|
| 318 |
+
One of the most pressing issues with current LLMs in QA is their tendency to hallucinate, i.e. generating false or fabricated information while maintaining confidence that their output is accurate. Unlike humans, who can often gauge their confidence in an answer, LLMs lack well-calibrated mechanisms for judging the correctness of their outputs. Addressing this issue requires improving the model's calibration ability, enabling it to better predict when it may be wrong and communicate that uncertainty. There are several possible avenues for tackling this challenge:
|
| 319 |
+
|
| 320 |
+
a) Integrating External Tools/Knowledge: A promising research direction involves integrating external tools or knowledge databases that can help LLMs assess the confidence levels of their responses [83]. However, one challenge is that LLMs can be overly influenced by external information without adequately assessing its reliability, unlike humans who are more attuned to the credibility of information sources.
|
| 321 |
+
|
| 322 |
+
Therefore, enabling LLMs to autonomously analyze and weigh the reliability of different sources is an important direction for reducing hallucinations.
|
| 323 |
+
|
| 324 |
+
b) Improving Calibration Ability in Training LLM: Another essential approach is to improve the internal calibration mechanisms of LLMs during training. Techniques such as uncertainty-aware training [84] aim to fine-tune models to predict their uncertainty during training. However, these methods have not yet shown significant improvements compared to traditional logit-based methods or ensemble approaches like voting-based strategies [85]. Thus, developing more effective training methods that enhance LLMs' inherent calibration abilities remains an open problem.
|
| 325 |
+
c) LLM Intrinsic Representation: Humans typically experience awareness when they are unsure of an answer, raising the question: can LLMs develop a similar form of self-awareness? And if so, how can it be represented? This area of research might involve identifying specific neural circuits within the model that correspond to overconfidence or uncertainty and then adjusting these inner states accordingly. By detecting and modulating these intrinsic representations, researchers could help LLMs become more aware of their own reliability, allowing them to provide more trustworthy answers [86]. However, this is an emerging field, and the underlying mechanisms are still not well understood.
|
| 326 |
+
|
| 327 |
+
# C. Reasoning Ability Improvement
|
| 328 |
+
|
| 329 |
+
Enhancing the reasoning capabilities of LLM agents in QA remains an important and challenging area of exploration. Reasoning ability is crucial in QA because it not only determines the correctness of answers but also ensures that the process leading to the conclusion is logical, interpretable, and reliable. Therefore, improving reasoning skills is key to making LLMs effective in real-world problem-solving scenarios.
|
| 330 |
+
|
| 331 |
+
a) Exploration to Enhance Reasoning: One promising approach to improving reasoning is to explore multiple reasoning paths for training questions or synthesized data [87]. Studies have shown that searching through various trials and training LLMs on successful paths can significantly enhance their reasoning capabilities [88]. However, current search methods often rely on some domain-specific signals, such as Lean language or algebraic representations in geometric problems, which limit the generalization to general domains. In other areas, reward models [89] or LLM self-evaluation strategies [90] are used to score the reasoning process. Therefore, developing more reliable scoring mechanisms for reasoning path exploration is a critical step toward improving LLM reasoning across diverse fields.
|
| 332 |
+
|
| 333 |
+
b) Improving Reasoning from Memory: Another key area is enabling LLMs to learn from memory, which allows them to quickly adapt their reasoning to new environments. Unlike humans, who learn from past experiences, LLMs treat each interaction as a new session, often repeating the same mistakes. Enhancing LLMs' ability to retain and learn from past interactions is essential. This goes beyond simply providing interaction history at inference time. LLMs need to be able
|
| 334 |
+
|
| 335 |
+
to extract foundational knowledge from past experiences and apply it to new scenarios, thereby improving their adaptability and reasoning effectiveness.
|
| 336 |
+
|
| 337 |
+
c) Enhancing Logical Chains with Causal Reasoning: Integrating causal reasoning can significantly improve the rigor and coherence of LLM-generated logical chains. Typically, LLMs generate responses based on statistical correlations, which may not accurately reflect the causal logic required for certain problems. By incorporating causal reasoning frameworks, LLMs can better identify and apply causal relationships within problems, leading to more robust logical reasoning. This approach not only improves the accuracy of answers but also enhances their interpretability. Training LLMs to recognize and utilize causal models, especially in domains where causal structures are inherent, will help generate more precise and insightful responses.
|
| 338 |
+
|
| 339 |
+
# D. Autonomous Tool Selection and Creation
|
| 340 |
+
|
| 341 |
+
Humans possess the ability to select appropriate tools for various tasks, often summarizing their experiences to create new tools tailored for specific purposes. In the context of QA, our humans instinctively determine when to seek external assistance, such as searching the web, consulting a database, or asking an expert. In contrast, current LLMs lack the innate ability to actively choose tools or external resources. They do not inherently plan the steps required to solve a problem or determine when to involve external systems, presenting a significant challenge in developing more autonomous, intelligent agents capable of utilizing the right resources at the right time. Besides, they cannot find the pattern of questions and create tools by themselves to solve some repeated questions.
|
| 342 |
+
|
| 343 |
+
To address this limitation, future research should focus on enabling LLMs to develop a form of tool-use and tool-creation planning. This would allow the model to dynamically decide when to leverage the tools and when to create new tools. Such capabilities would bring LLMs closer to human-like problem-solving, enhancing their effectiveness in open-ended tasks and collaborative environments.
|
| 344 |
+
|
| 345 |
+
# E. LLMs in Building Document Indexing
|
| 346 |
+
|
| 347 |
+
Another key challenge is the role of LLMs in improving information retrieval (IR). Given that LLMs have demonstrated the ability to grasp the semantic meanings of natural language, their integration into the retrieval process shows great promise. Currently, LLMs are employed for tasks such as question expansion/formulation or ranking to enhance IR performance. From our perspective, incorporating LLMs into document indexing represents a novel promising direction for future research. Indexing involves converting documents into vector representations using an embedding model, allowing document vectors to be retrieved based on the similarity to query vectors. A primary challenge is the cost associated with using LLMs to index millions or billions of documents. However, as smaller LLMs continue to improve in capability, integrating LLMs into the indexing process may become a viable and impactful research avenue.
|
| 348 |
+
|
| 349 |
+
# VI. CONCLUSION
|
| 350 |
+
|
| 351 |
+
The rapid development of LLM agents has significantly enhanced question-answering systems. This survey recalls the development of the agent and the QA systems and then defines the concept of LLM agent QA systems. We break down the answering process into multiple sub-tasks, demonstrating how cutting-edge methods are leveraged in improving the LLM agent QA system. Finally, we highlight notable challenges and identify promising avenues of research that could elevate LLM-based agents into even more powerful ones.
|
| 352 |
+
|
| 353 |
+
# REFERENCES
|
| 354 |
+
|
| 355 |
+
[1] S. Franklin and A. Graesser, “Is it an agent, or just a program?: A taxonomy for autonomous agents,” in International workshop on agent theories, architectures, and languages. Springer, 1996, pp. 21–35.
|
| 356 |
+
[2] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein, "Generative agents: Interactive simulacra of human behavior," in Proceedings of the 36th annual acm symposium on user interface software and technology, 2023, pp. 1-22.
|
| 357 |
+
[3] L. Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin et al., "A survey on large language model based autonomous agents," Frontiers of Computer Science, vol. 18, no. 6, p. 186345, 2024.
|
| 358 |
+
[4] D. Jurafsky and J. H. Martin, Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics, and Speech Recognition with Language Models, 2024.
|
| 359 |
+
[5] J. Cui, M. Ning, Z. Li, B. Chen, Y. Yan, H. Li, B. Ling, Y. Tian, and L. Yuan, "Chatlaw: A multi-agent collaborative legal assistant with knowledge graph enhanced mixture-of-experts large language model," 2024.
|
| 360 |
+
[6] W. Tao, H. Zhu, K. Tan, J. Wang, Y. Liang, H. Jiang, P. Yuan, and Y. Lan, "Finqa: A training-free dynamic knowledge graph question answering system in finance with llm-based revision," in Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, 2024, pp. 418-423.
|
| 361 |
+
[7] N. J. Nilsson, Principles of artificial intelligence. Springer, 1982.
|
| 362 |
+
[8] V. Mnih et al., “Human-level control through deep reinforcement learning,” Nature, vol. 518, no. 7540, pp. 529–533, 2015.
|
| 363 |
+
[9] T. P. Lillicrap et al., "Continuous control with deep reinforcement learning," arXiv preprint arXiv:1509.02971, 2015.
|
| 364 |
+
[10] E. M. Voorhees and D. M. Tice, “The trec-8 question answering track report,” in Text retrieval conference TREC. Citeuser, 1999.
|
| 365 |
+
[11] P. Rajpurkar et al., “Squad: 100,000+ questions for machine comprehension of text,” in Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2016, pp. 2383–2392.
|
| 366 |
+
[12] D. Bahdanau et al., “Neural machine translation by jointly learning to align and translate,” arXiv preprint arXiv:1409.0473, 2014.
|
| 367 |
+
[13] S. Garg et al., “Tanda: Transfer and adapt pre-trained transformer models for answer sentence selection,” in Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019, pp. 5488–5494.
|
| 368 |
+
[14] V. Karpukhin et al., “Dense passage retrieval for open-domain question answering,” in Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2020, pp. 6769-6781.
|
| 369 |
+
[15] J. Devlin et al., "Bert: Pre-training of deep bidirectional transformers for language understanding," arXiv preprint arXiv:1810.04805, 2018.
|
| 370 |
+
[16] OpenAI, “Gpt-4 technical report,” arXiv preprint arXiv:2303.08774, 2023.
|
| 371 |
+
[17] F. Petroni et al., "Language models as knowledge bases?" arXiv preprint arXiv:1909.01066, 2019.
|
| 372 |
+
[18] Z. Ji et al., "A survey of hallucination in natural language generation," ACM Computing Surveys (CSUR), vol. 55, no. 12, pp. 1-38, 2023.
|
| 373 |
+
[19] P. Lewis et al., "Retrieval-augmented generation for knowledge-intensive nlp tasks," in Advances in Neural Information Processing Systems, 2020, pp. 9459-9474.
|
| 374 |
+
[20] F. Zhu, W. Lei, C. Wang, J. Zheng, S. Poria, and T.-S. Chua, “Retrieving and reading: A comprehensive survey on open-domain question answering,” arXiv preprint arXiv:2101.00774, 2021.
|
| 375 |
+
|
| 376 |
+
[21] D. Dua, Y. Wang, P. Dasigi, G. Stanovsky, S. Singh, and M. Gardner, “Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs,” arXiv preprint arXiv:1903.00161, 2019.
|
| 377 |
+
[22] Z. Yang, P. Qi, S. Zhang, Y. Bengio, W. W. Cohen, R. Salakhutdinov, and C. D. Manning, "Hotpotqa: A dataset for diverse, explainable multihop question answering," in Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, 2018, pp. 2369-2380.
|
| 378 |
+
[23] M. Geva, D. Khashabi, E. Segal, T. Khot, D. Roth, and J. Berant, "Did aristotle use a laptop? a question answering benchmark with implicit reasoning strategies," Transactions of the Association for Computational Linguistics, vol. 9, pp. 346-361, 2021.
|
| 379 |
+
[24] I. Stelmakh, Y. Luan, B. Dhingra, and M.-W. Chang, "Asqa: Factoid questions meet long-form answers," arXiv preprint arXiv:2204.06092, 2022.
|
| 380 |
+
[25] A. Fan, P. Lewis, and Y. Dauphin, “Eli5: Long-form question answering,” arXiv preprint arXiv:1907.09190, 2019.
|
| 381 |
+
[26] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano, C. Hesse, and J. Schulman, “Training verifiers to solve math word problems,” arXiv preprint arXiv:2110.14168, 2021.
|
| 382 |
+
[27] D. Hendrycks, C. Burns, S. Kadavath, A. Arora, S. Basart, E. Tang, D. Song, and J. Steinhardt, "Measuring mathematical problem solving with the math dataset," arXiv preprint arXiv:2103.03874, 2021.
|
| 383 |
+
[28] W. Chen, M. Yin, M. Ku, P. Lu, Y. Wan, X. Ma, J. Xu, X. Wang, and T. Xia, “Theoremqa: A theorem-driven question answering dataset,” in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 7889–7901.
|
| 384 |
+
[29] C. He, R. Luo, Y. Bai, S. Hu, Z. L. Thai, J. Shen, J. Hu, X. Han, Y. Huang, Y. Zhang, J. Liu, L. Qi, Z. Liu, and M. Sun, “Olympiad-bench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems,” 2024.
|
| 385 |
+
[30] M. Suzgun, N. Scales, N. Scharli, S. Gehrmann, Y. Tay, H. W. Chung, A. Chowdhery, Q. V. Le, E. H. Chi, D. Zhou, and J. Wei, "Challenging big-bench tasks and whether chain-of-thought can solve them," arXiv preprint arXiv:2210.09261, 2022.
|
| 386 |
+
[31] S. Han, H. Schoelkopf, Y. Zhao, Z. Qi, M. Riddell, L. Benson, L. Sun, E. Zubova, Y. Qiao, M. Burtell, D. Peng, J. Fan, Y. Liu, B. Wong, M. Sailor, A. Ni, L. Nan, J. Kasai, T. Yu, R. Zhang, S. Joty, A. R. Fabbri, W. Kryscinski, X. V. Lin, C. Xiong, and D. Radev, "Folio: Natural language reasoning with first-order logic," arXiv preprint arXiv:2209.00840, 2022. [Online]. Available: https://arxiv.org/abs/2209.00840
|
| 387 |
+
[32] D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, and J. Steinhardt, "Measuring massive multitask language understanding," arXiv preprint arXiv:2009.03300, 2020.
|
| 388 |
+
[33] D. Rein, B. L. Hou, A. C. Stickland, J. Petty, R. Y. Pang, J. Dirani, J. Michael, and S. R. Bowman, "GPQA: A graduate-level google-proof q&a benchmark," in First Conference on Language Modeling, 2024. [Online]. Available: https://openreview.net/forum?id=Ti67584b98
|
| 389 |
+
[34] Y. Yang, W.-t. Yih, and C. Meek, "Wikiqa: A challenge dataset for open-domain question answering using wikipedia," arXiv preprint arXiv:1412.7808, 2015.
|
| 390 |
+
[35] W. Yu, M. Jiang, P. Clark, and A. Sabharwal, "Ifqa: A dataset for open-domain question answering under counterfactual presuppositions," arXiv preprint arXiv:2305.14010, 2023.
|
| 391 |
+
[36] S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao, "React: Synergizing reasoning and acting in language models," arXiv preprint arXiv:2210.03629, 2022.
|
| 392 |
+
[37] J. Sun, C. Xu, L. Tang, S. Wang, C. Lin, Y. Gong, H.-Y. Shum, and J. Guo, "Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph," arXiv preprint arXiv:2307.07697, 2023.
|
| 393 |
+
[38] Z. Jiang, F. F. Xu, L. Gao, Z. Sun, Q. Liu, J. Dwivedi-Yu, Y. Yang, J. Callan, and G. Neubig, "Active retrieval augmented generation," arXiv preprint arXiv:2305.06983, 2023.
|
| 394 |
+
[39] W. Chen, Y. Su, J. Zuo, C. Yang, C. Yuan, C.-M. Chan, H. Yu, Y. Lu, Y.-H. Hung, C. Qian et al., "Agentverse: Facilitating multiagent collaboration and exploring emergent behaviors," in The Twelfth International Conference on Learning Representations, 2023.
|
| 395 |
+
[40] B. Chen, C. Shu, E. Shareghi, N. Collier, K. Narasimhan, and S. Yao, “Fireact: Toward language agent fine-tuning,” arXiv preprint arXiv:2310.05915, 2023.
|
| 396 |
+
|
| 397 |
+
[41] R. Wang, H. Li, X. Han, Y. Zhang, and T. Baldwin, “Learning from failure: Integrating negative examples when fine-tuning large language models as agents,” arXiv preprint arXiv:2402.11651, 2024.
|
| 398 |
+
[42] M. Hu, P. Zhao, C. Xu, Q. Sun, J. Lou, Q. Lin, P. Luo, S. Rajmohan, and D. Zhang, "Agentgen: Enhancing planning abilities for large language model based agent via environment and task generation," arXiv preprint arXiv:2408.00764, 2024.
|
| 399 |
+
[43] Q. Chen, Z. Zhuo, and W. Wang, “Bert for joint intent classification and slot filling,” arXiv preprint arXiv:1902.10909, 2019.
|
| 400 |
+
[44] C. Carpineto and G. Romano, “A survey of automatic query expansion in information retrieval,” ACM Computing Surveys (CSUR), vol. 44, no. 1, pp. 1-50, 2012.
|
| 401 |
+
[45] S. Robertson, “Understanding inverse document frequency: on theoretical arguments foridf,” Journal of documentation, vol. 60, no. 5, pp. 503-520, 2004.
|
| 402 |
+
[46] L. Gao, X. Ma, J. Lin, and J. Callan, "Precise zero-shot dense retrieval without relevance labels," in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), A. Rogers, J. Boyd-Graber, and N. Okazaki, Eds. Toronto, Canada: Association for Computational Linguistics, Jul. 2023, pp. 1762-1777. [Online]. Available: https://aclanthology.org/2023.acl-long.99
|
| 403 |
+
[47] R. Jagerman, H. Zhuang, Z. Qin, X. Wang, and M. Bendersky, "Query expansion by prompting large language models," arXiv preprint arXiv:2305.03653, 2023.
|
| 404 |
+
[48] H. S. Zheng, S. Mishra, X. Chen, H.-T. Cheng, E. H. Chi, Q. V. Le, and D. Zhou, "Take a step back: Evoking reasoning via abstraction in large language models," arXiv preprint arXiv:2310.06117, 2023.
|
| 405 |
+
[49] Y. Deng, W. Zhang, Z. Chen, and Q. Gu, “Rephrase and respond: Let large language models ask better questions for themselves,” arXiv preprint arXiv:2311.04205, 2023.
|
| 406 |
+
[50] X. Ma, Y. Gong, P. He, hai zhao, and N. Duan, "Query rewriting in retrieval-augmented large language models," in The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. [Online]. Available: https://openreview.net/forum?id=gXq1cwkUZc
|
| 407 |
+
[51] W. Peng, G. Li, Y. Jiang, Z. Wang, D. Ou, X. Zeng, Tongxu, and E. Chen, "Large language model based long-tail query rewriting in taobao search," Companion Proceedings of the ACM on Web Conference 2024, 2023. [Online]. Available: https://apisemantic scholar.org/CorpusID:265042961
|
| 408 |
+
[52] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, "Direct preference optimization: Your language model is secretly a reward model," Advances in Neural Information Processing Systems, vol. 36, 2024.
|
| 409 |
+
[53] Y. Gao, Y. Xiong, M. Wang, and H. Wang, "Modular rag: Transforming rag systems into lego-like reconfigurable frameworks," arXiv preprint arXiv:2407.21059, 2024.
|
| 410 |
+
[54] O. Yoran, T. Wolfson, O. Ram, and J. Berant, "Making retrieval-augmented language models robust to irrelevant context," arXiv preprint arXiv:2310.01558, 2023.
|
| 411 |
+
[55] Y. Ma, Y. Cao, Y. Hong, and A. Sun, "Large language model is not a good few-shot information extractor, but a good reranker for hard samples!" arXiv preprint arXiv:2303.08559, 2023.
|
| 412 |
+
[56] S. Zhuang, B. Liu, B. Koopman, and G. Zuccon, "Open-source large language models are strong zero-shot query likelihood models for document ranking," arXiv preprint arXiv:2310.13243, 2023.
|
| 413 |
+
[57] V. Blagojevi, “Enhancing rag pipelines in haystack: Introducing diversityranker and lostinthemidleranker,” 2023.
|
| 414 |
+
[58] A. Asai, Z. Wu, Y. Wang, A. Sil, and H. Hajishirzi, "Self-rag: Learning to retrieve, generate, and critique through self-reflection," arXiv preprint arXiv:2310.11511, 2023.
|
| 415 |
+
[59] H. Jiang, Q. Wu, X. Luo, D. Li, C.-Y. Lin, Y. Yang, and L. Qiu, "Longlmlingua: Accelerating and enhancing llms in long context scenarios via prompt compression," arXiv preprint arXiv:2310.06839, 2023.
|
| 416 |
+
[60] F. Xu, W. Shi, and E. Choi, "Recomp: Improving retrieval-augmented lms with compression and selective augmentation," arXiv preprint arXiv:2310.04408, 2023.
|
| 417 |
+
[61] W. Chen, X. Ma, X. Wang, and W. W. Cohen, “Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks,” arXiv preprint arXiv:2211.12588, 2022.
|
| 418 |
+
[62] L. Gao, A. Madaan, S. Zhou, U. Alon, P. Liu, Y. Yang, J. Callan, and G. Neubig, “Pal: Program-aided language models,” in International Conference on Machine Learning. PMLR, 2023, pp. 10764–10799.
|
| 419 |
+
|
| 420 |
+
[63] Y. Shen, K. Song, X. Tan, D. Li, W. Lu, and Y. Zhuang, "Huggingppt: Solving ai tasks with chatgpt and its friends in hugging face," Advances in Neural Information Processing Systems, vol. 36, 2024.
|
| 421 |
+
[64] Y. Ge, W. Hua, K. Mei, J. Tan, S. Xu, Z. Li, Y. Zhang et al., "Openagi: When llm meets domain experts," Advances in Neural Information Processing Systems, vol. 36, 2024.
|
| 422 |
+
[65] Z. Cheng, T. Xie, P. Shi, C. Li, R. Nadkarni, Y. Hu, C. Xiong, D. Radev, M. Ostendorf, L. Zettlemoyer et al., "Binding language models in symbolic languages," arXiv preprint arXiv:2210.02875, 2022.
|
| 423 |
+
[66] S. Gao, J. Dwivedi-Yu, P. Yu, X. E. Tan, R. Pasunuru, O. Golovneva, K. Sinha, A. Celikyilmaz, A. Bosselut, and T. Wang, "Efficient tool use with chain-of-abstraction reasoning," arXiv preprint arXiv:2401.17464, 2024.
|
| 424 |
+
[67] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., "Chain-of-thought prompting elicits reasoning in large language models," Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022.
|
| 425 |
+
[68] D. Zhou, N. Scharli, L. Hou, J. Wei, N. Scales, X. Wang, D. Schuurmans, C. Cui, O. Bousquet, Q. Le, and E. Chi, "Least-to-most prompting enables complex reasoning in large language models," 2023. [Online]. Available: https://arxiv.org/abs/2205.10625
|
| 426 |
+
[69] X. Chen, M. Lin, N. Schärli, and D. Zhou, "Teaching large language models to self-debug," arXiv preprint arXiv:2304.05128, 2023.
|
| 427 |
+
[70] P. Zhou, J. Pujara, X. Ren, X. Chen, H.-T. Cheng, Q. V. Le, E. H. Chi, D. Zhou, S. Mishra, and H. S. Zheng, "Self-discover: Large language models self-compose reasoning structures," arXiv preprint arXiv:2402.03620, 2024.
|
| 428 |
+
[71] X. Wang, C. Li, Z. Wang, F. Bai, H. Luo, J. Zhang, N. Jojic, E. P. Xing, and Z. Hu, "Promptagent: Strategic planning with language models enables expert-level prompt optimization," arXiv preprint arXiv:2310.16427, 2023.
|
| 429 |
+
[72] X. Wang, Z. Wang, J. Liu, Y. Chen, L. Yuan, H. Peng, and H. Ji, "Mint: Evaluating llms in multi-turn interaction with tools and language feedback," arXiv preprint arXiv:2309.10691, 2023.
|
| 430 |
+
[73] T. Schick, J. Dwivedi-Yu, Z. Jiang, F. Petroni, P. Lewis, G. Izacard, Q. You, C. Nalmantis, E. Grave, and S. Riedel, "Peer: A collaborative language model," arXiv preprint arXiv:2208.11663, 2022.
|
| 431 |
+
[74] H. Yan, S. Srivastava, Y. Tai, S. I. Wang, W.-t. Yih, and Z. Yao, "Learning to simulate natural language feedback for interactive semantic parsing," arXiv preprint arXiv:2305.08195, 2023.
|
| 432 |
+
[75] M. Iyyer, W.-t. Yih, and M.-W. Chang, “Search-based neural structured learning for sequential question answering,” in Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2017, pp. 1821-1831.
|
| 433 |
+
[76] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., "Training language models to follow instructions with human feedback," Advances in neural information processing systems, vol. 35, pp. 27 730-27 744, 2022.
|
| 434 |
+
[77] Z. Liu, W. Ping, R. Roy, P. Xu, M. Shoeybi, and B. Catanzaro, "Chatqa: Building gpt-4 level conversational qa models," arXiv preprint arXiv:2401.10225, 2024.
|
| 435 |
+
[78] C.-Y. Lin, “Rouge: A package for automatic evaluation of summaries,” in Text summarization branches out, 2004, pp. 74-81.
|
| 436 |
+
[79] T. Zhang, V. Kishore, F. Wu, K. Q. Weinberger, and Y. Artzi, “Bertscore: Evaluating text generation with bert,” arXiv preprint arXiv:1904.09675, 2019.
|
| 437 |
+
[80] P. Mondorf and B. Plank, “Beyond accuracy: Evaluating the reasoning behavior of large language models—a survey,” arXiv preprint arXiv:2404.01869, 2024.
|
| 438 |
+
[81] J. Ying, Y. Cao, Y. Bai, Q. Sun, B. Wang, W. Tang, Z. Ding, Y. Yang, X. Huang, and S. Yan, "Automating dataset updates towards reliable and timely evaluation of large language models," 2024. [Online]. Available: https://arxiv.org/abs/2402.11894
|
| 439 |
+
[82] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., "The llama 3 herd of models," arXiv preprint arXiv:2407.21783, 2024.
|
| 440 |
+
[83] S. J. Semnani, V. Z. Yao, H. C. Zhang, and M. S. Lam, "Wikichat: Stopping the hallucination of large language model chatbots by few-shot grounding on wikipedia," arXiv preprint arXiv:2305.14292, 2023.
|
| 441 |
+
[84] Y. Yang, H. Li, Y. Wang, and Y. Wang, “Improving the reliability of large language models by leveraging uncertainty-aware in-context learning,” arXiv preprint arXiv:2310.04782, 2023.
|
| 442 |
+
|
| 443 |
+
[85] Z. Lin, S. Trivedi, and J. Sun, "Generating with confidence: Uncertainty quantification for black-box large language models," arXiv preprint arXiv:2305.19187, 2023.
|
| 444 |
+
[86] H. Orgad, M. Toker, Z. Gekhman, R. Reichart, I. Szpektor, H. Kotek, and Y. Belinkov, "Llms know more than they show: On the intrinsic representation of llm hallucinations," arXiv preprint arXiv:2410.02707, 2024.
|
| 445 |
+
[87] X. Chan, X. Wang, D. Yu, H. Mi, and D. Yu, "Scaling synthetic data creation with 1,000,000,000 personas," arXiv preprint arXiv:2406.20094, 2024.
|
| 446 |
+
[88] T. Trinh, Y. Wu, Q. Le, H. He, and T. Luong, “Solving olympiad geometry without human demonstrations,” Nature, 2024.
|
| 447 |
+
[89] D. Zhang, S. Zhoubian, Y. Yue, Y. Dong, and J. Tang, “Rest-mcts*: Llm self-training via process reward guided tree search,” arXiv preprint arXiv:2406.03816, 2024.
|
| 448 |
+
[90] Y. Tian, B. Peng, L. Song, L. Jin, D. Yu, H. Mi, and D. Yu, "Toward self-improvement of llms via imagination, searching, and criticizing," arXiv preprint arXiv:2404.12253, 2024.
|
data/2025/2503_19xxx/2503.19213/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a115ec82bc6e04cf940db28eb2bbd495e9f6aa0faf62dcc1053ab7592364221f
|
| 3 |
+
size 174512
|
data/2025/2503_19xxx/2503.19213/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19296/63a45a97-530e-409e-a7a5-2234e300494a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0494264ad3cfbd25cd39e43e863a56a486b95f619c063545cedac3732b466c5f
|
| 3 |
+
size 2243731
|
data/2025/2503_19xxx/2503.19296/full.md
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fine-grained Textual Inversion Network for Zero-Shot Composed Image Retrieval
|
| 2 |
+
|
| 3 |
+
Haoqiang Lin
|
| 4 |
+
|
| 5 |
+
Shandong University
|
| 6 |
+
|
| 7 |
+
Qingdao, China
|
| 8 |
+
|
| 9 |
+
zichaohq@gmail.com
|
| 10 |
+
|
| 11 |
+
Haokun Wen
|
| 12 |
+
|
| 13 |
+
Harbin Institute of Technology (Shenzhen)
|
| 14 |
+
|
| 15 |
+
Shenzhen, China
|
| 16 |
+
|
| 17 |
+
whenhaokun@gmail.com
|
| 18 |
+
|
| 19 |
+
Xuemeng Song*
|
| 20 |
+
|
| 21 |
+
Shandong University
|
| 22 |
+
|
| 23 |
+
Qingdao, China
|
| 24 |
+
|
| 25 |
+
sxmustc@gmail.com
|
| 26 |
+
|
| 27 |
+
Meng Liu
|
| 28 |
+
|
| 29 |
+
Shandong Jianzhu University
|
| 30 |
+
|
| 31 |
+
Jinan, China
|
| 32 |
+
|
| 33 |
+
mengliu.sdu@gmail.com
|
| 34 |
+
|
| 35 |
+
Yupeng Hu
|
| 36 |
+
|
| 37 |
+
Shandong University
|
| 38 |
+
|
| 39 |
+
Jinan, China
|
| 40 |
+
|
| 41 |
+
huyupeng@sdu.edu.cn
|
| 42 |
+
|
| 43 |
+
Liqiang Nie
|
| 44 |
+
|
| 45 |
+
Harbin Institute of Technology (Shenzhen)
|
| 46 |
+
|
| 47 |
+
Shenzhen, China
|
| 48 |
+
|
| 49 |
+
nieliqiang@gmail.com
|
| 50 |
+
|
| 51 |
+
# ABSTRACT
|
| 52 |
+
|
| 53 |
+
Composed Image Retrieval (CIR) allows users to search target images with a multimodal query, comprising a reference image and a modification text that describes the user's modification demand over the reference image. Nevertheless, due to the expensive labor cost of training data annotation, recent researchers have shifted to the challenging task of zero-shot CIR (ZS-CIR), which targets fulfilling CIR without annotated triplets. The pioneer ZS-CIR studies focus on converting the CIR task into a standard text-to-image retrieval task by pre-training a textual inversion network that can map a given image into a single pseudo-word token. Despite their significant progress, their coarse-grained textual inversion may be insufficient to capture the full content of the image accurately. To overcome this issue, in this work, we propose a novel Fine-grained Textual Inversion Network for ZS-CIR, named FTI4CIR. In particular, FTI4CIR comprises two main components: fine-grained pseudo-word token mapping and tri-wise caption-based semantic regularization. The former maps the image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens to comprehensively express the image in the textual form, while the latter works on jointly aligning the fine-grained pseudoword tokens to the real-word token embedding space based on a BLIP-generated image caption template. Extensive experiments conducted on three benchmark datasets demonstrate the superiority of our proposed method.
|
| 54 |
+
|
| 55 |
+
# CCS CONCEPTS
|
| 56 |
+
|
| 57 |
+
- Information systems $\rightarrow$ Image search.
|
| 58 |
+
|
| 59 |
+
# KEYWORDS
|
| 60 |
+
|
| 61 |
+
Composed Image Retrieval; Multimodal Retrieval; Textual Inversion
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
(a) Existing textual inversion for ZS-CIR
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
(b) Ours fine-grained textual inversion for ZS-CIR
|
| 68 |
+
Figure 1: An illustration of method comparison. (a) Exiting textual inversion for ZS-CIR. (b) Our fine-grained textual inversion for ZS-CIR.
|
| 69 |
+
|
| 70 |
+
# ACM Reference Format:
|
| 71 |
+
|
| 72 |
+
Haoqiang Lin, Haokun Wen, Xuemeng Song*, Meng Liu, Yupeng Hu, and Liqiang Nie. 2024. Fine-grained Textual Inversion Network for Zero-Shot Composed Image Retrieval. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '24), July 14-18, 2024, Washington, DC, USA. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3626772.3657831
|
| 73 |
+
|
| 74 |
+
# 1 INTRODUCTION
|
| 75 |
+
|
| 76 |
+
Unlike traditional content-based image retrieval [7, 30] or text-based image retrieval [29, 32], composed image retrieval (CIR) enables users to search the target image with a multimodal query: a reference image plus a modification text that indicates their desired modification demand over the reference image. In light of its significant application potential in e-commerce and internet search engines [18, 43], CIR has garnered considerable research attention in recent years [5, 17, 20, 41, 44]. Current CIR methods mainly fall into the supervised learning paradigm. Namely, they
|
| 77 |
+
|
| 78 |
+
heavily rely on annotated triplets in the form of <reference image, modification text, target image>. However, it is time-consuming to annotate the modification text for each potential <reference image, target image> pair. Therefore, existing CIR datasets suffer from limited scale, constraining the generalization ability of current supervised methods, especially for queries from unseen domains.
|
| 79 |
+
|
| 80 |
+
To eliminate the model's dependency on labeled datasets, recent researches [2, 34] have introduced the new task of zero-shot CIR (ZS-CIR), which aims to address CIR without requiring any annotated training triplet. As depicted in Figure 1 (a), inspired by the textual inversion technique [8, 11, 39], current ZS-CIR solutions focus on training a mapping network over a frozen vision-language pre-trained model, such as CLIP [31], which can convert the image's visual embedding into a latent pseudo-word token. This pseudo-word token should possess two important properties: a) encapsulating the informative content of the image, and b) being compatible with the textual token embedding space of real-words. In this manner, each image can be represented by the sentence "a photo of $S^*$ " and gains a textual embedding, where $S^*$ is the to-be-learned pseudo-word. Notably, the mapping network is trained simply based on a set of unlabeled open-domain real-world images instead of the labeled triplets. During inference, when provided with a composed query, the reference image can be converted into a sentence with a pseudo-word and hence be seamlessly integrated into the token sequence of the given modification text, forming a unified text query. Ultimately, the CIR task can be fulfilled with a standard text-to-image retrieval model.
|
| 81 |
+
|
| 82 |
+
Although significant progress has been made in ZS-CIR studies, they primarily concentrate on converting the input image into a general pseudo-word token based on the image's global visual feature. However, these approaches may not accurately capture the full content of the image. In CIR tasks, users' modification requests generally fall into subject-oriented and attribute-oriented. Subject-oriented requests involve altering the category or number of the primary subject(s) in the image, while attribute-oriented requests pertain to changing the attributes (e.g., background and sleeve length) of these primary subject(s). Therefore, we propose conducting fine-grained textual inversion to better address the downstream CIR tasks. As shown in Figure 1 (b), we map each image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens to express the image fully. Then each image would be represented by the sentence "a photo of $[S^*]$ with $[A_1^*, \dots, A_r^*]]$ ", where $S^*$ refers to the subject-oriented pseudo-word encapsulating the primary subject(s) information of the image, while $A_i^*$ ( $i = 1, \dots, r$ ) stand for the attribute-oriented pseudo-words containing the contextual attributes of the primary subject(s).
|
| 83 |
+
|
| 84 |
+
However, this is non-trivial owing to the following two challenges. 1) Images of different domains usually involve diverse local attributes. For instance, images from the fashion domain typically contain the attributes of sleeve length, waist design, and color, while images from the animal domain usually contain the attributes of background, position, and fur. Therefore, how to effectively capture the diverse local attributes across different images poses a crucial challenge. 2) Jointly regulate the projection of both subject-oriented and attribute-oriented pseudo-word tokens. Previous ZS-CIR work has adopted the image-related categories as real-word tokens to promote the
|
| 85 |
+
|
| 86 |
+
projected pseudo-word token to reside in the real-word token embedding space. However, we argue that simply using the general image categories is insufficient to fully regularize the projection of both subject-oriented and attribute-oriented pseudo-word tokens. Consequently, how to jointly align subject-oriented and attribute-oriented pseudo-word tokens to the real-word token embedding space forms another challenge.
|
| 87 |
+
|
| 88 |
+
To address these challenges, we propose a Fine-grained Textual Inversion Network for Zero-Shot Composed Image Retrieval, dubbed FTI4CIR. As shown in Figure 2, our FTI4CIR consists of two key components: fine-grained pseudo-word token mapping and triwise caption-based semantic regularization. The first component works on mapping the image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens based on the image's global feature and local attribute features, respectively. In particular, to cope with the diverse types of local attributes across images in different domains, we devise the dynamic local attribute feature extraction module, which first uncovers all the possible local attribute features by adaptively aggregating the image's local patch features by Transformer [36] and then filters out the learned irrelevant attribute features with a local-global relevance-based filtering strategy. The second component targets jointly aligning the subject-oriented and attribute-oriented pseudo-word tokens to the real-word token embedding space. Towards this end, we first employ BLIP [21] to generate the real-word description for each image, which is typically in the format of "[primary subject(s)] + [detailed description)". We then design three text templates based on the generated caption to facilitate a tri-wise (namely, subject-wise, attribute-wise, and whole-wise) caption-based semantic regularization. By pushing the embedding of the pseudo-word-involved template to be close to that of the original real-word caption, the tri-wise caption-based semantic regularization can promote not only the interaction between the pseudo-words and their counterpart real-words, but also that between the pseudo-words and the other contextual real-words in the image caption.
|
| 89 |
+
|
| 90 |
+
In the inference phase, given a multimodal query of the CIR task, FTI4CIR first converts the reference image into a sentence with pseudo-words, and then concatenates it with the modification text to derive a final pure text query, thereby simplifying the CIR task to a standard text-to-image retrieval task. Our main contributions can be summarized as follows:
|
| 91 |
+
|
| 92 |
+
- To the best of our knowledge, we are the first to explore the fine-grained textual inversion, i.e., mapping the image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens to realize ZS-CIR.
|
| 93 |
+
- To facilitate the attribute-oriented pseudo-word token mapping, we propose a dynamic local attribute feature extraction module to handle the problem of diverse types of local attributes across images in different domains.
|
| 94 |
+
- We devise a tri-wise caption-based semantic regularization that enables the thorough interactions between pseudowords and real-words, and hence promotes aligning the pseudo-word tokens to the real-word token embedding space. We have released our codes to facilitate other researchers<sup>1</sup>.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
Figure 2: The proposed FTI4CIR consists of two key modules: (a) Fine-grained pseudo-word token mapping and (b) Tri-wise caption-based semantic regularization.
|
| 98 |
+
|
| 99 |
+
# 2 RELATED WORK
|
| 100 |
+
|
| 101 |
+
Our work is closely related to composed image retrieval and vision-language pre-trained models.
|
| 102 |
+
|
| 103 |
+
Composed Image Retrieval. Based on the type of feature extraction backbone, existing CIR methods can be primarily divided into two groups. The former group [1, 13, 19, 20, 37, 40] mainly utilizes the traditional models, such as ResNet [15] and LSTM [16], to extract image and text features, respectively. In contrast, the second group [3, 4, 12, 14, 23, 42] takes advantage of vision-language pre-trained (VLP) models, like CLIP, for feature extraction. Typically, these VLP model-based methods usually yield better performance due to their superior feature extraction ability on multimodal data. Nevertheless, all the above approaches fall into the supervised learning paradigm, requiring costly annotated triplets to tune the model. Hence, recent researchers have shifted the ZS-CIR task to fulfill CIR without labeled triplets. For example, Saito et al. [34] employed a pre-trained textual inversion network to convert the reference image into a single pseudo-word token within the CLIP token embedding space. This pseudo-word token is then seamlessly integrated into the token sequence of the given modification text to retrieve the target image. Meanwhile, Baldrati et al. [2] respectively designed an optimization-based textual inversion method and a mapping network-based method to learn a pseudo-word token for encapsulating the visual content of each image. Both methods involve category-based semantic regularization to align the pseudoword token to the CLIP token embedding space. Although these ZS-CIR studies have achieved significant progress, they focus on using a single pseudo-word to represent the whole information of each image, which may miss the rich detailed information contained in the image. In light of this, we propose conducting the fine-grained textual inversion, i.e., mapping the image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens, to fully express the image in the textual form.
|
| 104 |
+
|
| 105 |
+
Vision-language Pre-trained Models. Recently, VLP models have attracted considerable research attention. The standard VLP models, like CLIP and ALIGN [22], are pre-trained on large-scale image-text pairs to acquire knowledge of implicit alignment between images and texts. Due to the powerful cross-modal
|
| 106 |
+
|
| 107 |
+
alignment and feature extraction capability, they have seen wideranging usage from diverse tasks like zero-shot classification [31], fine-grained classification [9], and video-text retrieval [25]. Further, beyond simple as a feature extraction backbone, there have been many attempts to unify various vision and language tasks into a single framework of the VLP model [6, 38]. For example, BLIP is a multimodal mixture of encoder-decoder VLP model that has demonstrated remarkable performance across understanding-based tasks (e.g., visual question answering) and generation-based tasks (e.g., image captioning). In this study, we employed the standard VLP model CLIP as the feature extraction backbone for our model to realize fine-grained textual inversion. Additionally, we utilized BLIP as an image captioning model to generate the image's caption to align the projected pseudo-word tokens into the real-word token embedding space.
|
| 108 |
+
|
| 109 |
+
# 3 FT14CIR
|
| 110 |
+
|
| 111 |
+
To address the task of ZS-CIR, we propose a Fine-grained Textual Inversion Network for Zero-Shot Composed Image Retrieval, named FTI4CIR, which can be trained simply on an unlabeled image set. As shown in Figure 2, FTI4CIR consists of two key components: fine-grained pseudo-word token mapping and triwise caption-based semantic regularization. As a major novelty, different from existing work that maps an image into a coarse global pseudo-word token, FTI4CIR maps the image into not only a subject-oriented pseudo-word token s but also several attribute-oriented pseudo-word tokens $\left[\mathbf{a}_1,\dots ,\mathbf{a}_{\mathbf{r}}\right]$ . For optimization, we first adopt the commonly used contrastive loss to correlate the original visual embedding and the pseudo-word-based textual embedding of the given image. In addition, we devise a tri-wise caption-based semantic regularization, which fully utilizes BLIP-generated high-quality image captions to model the thorough interactions between pseudo-words and real-words, and hence promotes the pseudoword tokens learning. Once our FTI4CIR gets well-trained, it can be applied to various downstream CIR tasks in the inference phase, where the multimodal CIR query would be unified into a pure text query and hence simplify each CIR task into a standard text-to-image retrieval task.
|
| 112 |
+
|
| 113 |
+
# 3.1 Fine-grained Pseudo-word Token Mapping
|
| 114 |
+
|
| 115 |
+
In order to encapsulate the image's content in a fine-grained manner, we project the image into both subject-oriented and attribute-oriented pseudo-word tokens. The former captures the primary subject(s) and the latter captures its/their associated attributes.
|
| 116 |
+
|
| 117 |
+
Subject-oriented Pseudo-word Token Mapping. Intuitively, the subject-oriented pseudo-word token should capture the primary subject(s) information of the image. To achieve this, we leverage the widely adopted CLIP visual encoder, inspired by its remarkable success in CIR tasks [4, 42], to extract the global feature of the input image. Similar to previous works, we regard the last-layer output of the frozen CLIP visual encoder as the global feature of the image, denoted as $\mathbf{v}_g\in \mathbb{R}^{d_1}$ , where $d_{1}$ represents the dimension of the global feature. We then employ a Multi-layer perceptron (MLP), acting as a mapping network, to project the global image feature into a subject-oriented pseudo-word token. Formally, let $\phi_s$ be the mapping function. We then have:
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\mathbf {s} = \phi_ {s} (\mathbf {v} _ {g}), \tag {1}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
where $s$ denotes the subject-oriented pseudo-word token.
|
| 124 |
+
|
| 125 |
+
Attribute-oriented Pseudo-word Token Mapping. The attribute-oriented pseudo-word tokens are intended to encapsulate the local detailed attribute information of the primary subject(s) in the image. Therefore, for deriving the attribute-oriented pseudoword tokens, we resort to the image's local features. Recent advancements in VLP models have made the use of local patch features quite prevalent. Nevertheless, individual patch features do not directly represent the local semantic attributes, as each attribute is usually correlated with multiple patches [42]. For example, the collar design attribute is typically associated with the top patches of an image, while the sleeve length attribute is connected to patches on both the left and right sides.
|
| 126 |
+
|
| 127 |
+
Unified Local Attribute Features Extraction. Accordingly, to facilitate the derivation of attribute-oriented pseudo-word tokens, we propose extracting the local attribute features of each image by adaptively aggregating the patch features. As mentioned above, the types of attributes usually vary across images from different domains. Therefore, we propose the dynamic local attribute feature extraction module. In particular, we first assume that there are a total of $n$ latent local attributes present in all open-domain real-world images. We then learn these local attribute features for each image by adaptively aggregating its local patch features. To mitigate the adverse impact of the learned noisy irrelevant attributes (e.g., the collar length attribute for an image with natural scenery), we introduce a local-global relevance-based filtering strategy to retain only the relevant local attribute features for subsequent attribute-oriented pseudo-word tokens learning.
|
| 128 |
+
|
| 129 |
+
Formally, let $\mathbf{V} = \left\{\mathbf{v}_p^i\right\}_{i=1}^m \in \mathbb{R}^{d_2 \times m}$ represent the image patch features, output by the second-to-last layer of the frozen CLIP visual encoder. $d_2$ denotes the dimension of the patch features and $m$ denotes the number of image patches. Then for local attribute features extraction, we adopt the off-the-shelf Transformer Network, which can model a comprehensive interaction among the inputs by the self-attention mechanism. Specifically, we define a set of $n$ learnable query embeddings $\mathbf{X} = \{\mathbf{x}_i\}_{i=1}^n \in \mathbb{R}^{d_2 \times n}$ , where each learnable query embedding corresponds to a specific latent
|
| 130 |
+
|
| 131 |
+
attribute of images. We then feed the concatenation of the learnable query embeddings $\mathbf{X}$ and all the image patch features $\mathbf{V}$ into the Transformer encoder. We regard the outputted corresponding query embeddings as the local attribute features of the image. To facilitate the subsequent local-global relevance-based filtering, we also introduce a fully connected network to ensure that the dimension of the local attribute feature matches that of the global image feature. Formally, this process can be expressed as follows,
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\mathbf {X} ^ {\prime} = F C \left(\mathcal {F} _ {\text {T r a n s f o r m e r}} ([ \mathbf {X} | \mathbf {V} ])\right), \tag {2}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where $[\cdot |\cdot ]$ represents cascade operation, $FC$ denotes the fully connected network, and $\mathbf{X}^{\prime} = \left\{\mathbf{x}_{i}^{\prime}\right\}_{i = 1}^{n}\in \mathbb{R}^{d_{1}\times n}$ denotes the local attribute features of the image.
|
| 138 |
+
|
| 139 |
+
Local-global Relevance-based Filtering. As mentioned above, we need to filter out the irrelevant local attribute features of the given image to enhance the interaction between the reference image and modification text, in the downstream CIR tasks. For this purpose, we propose the local-global relevance-based filtering strategy, where the reliable global image feature is used as the reference for selecting the relevant local attribute features. Essentially, the more similar a given local attribute feature is to the global image feature, the more likely it is to be relevant to the given image. Specifically, we identify the relevant local attribute feature by jointly considering its relative similarity ranking and absolute similarity score. Firstly, we rank the learned local attributes features according to their cosine similarities to the given global image feature as follows,
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\left\{ \begin{array}{l} c _ {i} = \cos \left(\mathbf {x} _ {i} ^ {\prime}, \mathbf {v} _ {g}\right), i = 1, \dots , n, \\ \mathbf {W} = \left[ \mathbf {x} _ {j} ^ {\prime} \right], c _ {j} \in \operatorname {t o p} - k (C), \end{array} \right. \tag {3}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where $\cos(\cdot, \cdot)$ denotes the cosine similarity, $c_{i}$ indicates the similarity between the global image feature $\mathbf{v}_{g}$ and the $i$ -th learned local attribute feature $\mathbf{x}_{i}^{\prime}$ . top- $k(\mathcal{C})$ denotes the top $k$ entry values of $\mathcal{C}$ , where $\mathcal{C} = [c_{1}, c_{2}, \dots, c_{n}]$ . $\mathbf{W} \in \mathbb{R}^{d_{1} \times k}$ represents the selected valid local attribute features.
|
| 146 |
+
|
| 147 |
+
We then set a similarity threshold to further guarantee the quality of the retained local attribute features as follows,
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathbf {W} ^ {\prime} = \left[ \mathbf {x} _ {j} ^ {\prime} \right], c _ {j} \geq \varepsilon \text {a n d} \mathbf {x} _ {j} ^ {\prime} \in \mathbf {W}, \tag {4}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where $\varepsilon$ is the local-global similarity threshold for retaining the final relevant local attribute features. $\mathbf{W}^{\prime}\in \mathbb{R}^{d_{1}\times r}$ represents the set of final selected local attribute features, $r\in [1,k]$ is the number of selected local attribute features.
|
| 154 |
+
|
| 155 |
+
Notably, to guarantee that different local attribute features can indeed represent different visual attributes, we introduce an orthogonal loss that enforces the distinctiveness of each feature. In particular, we deploy the orthogonal loss over $\mathbf{W}$ rather than $\mathbf{W}'$ to ensure the discrimination between the image's local attribute features as much as possible, while preventing interference from irrelevant features (low similarity features), thus facilitating the local attribute features learning. It can be formulated as follows,
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\mathcal {L} _ {\text {o r t h o}} = \left\| \mathbf {W} \mathbf {W} ^ {\top} - \mathbf {I} \right\| _ {F} ^ {2}, \tag {5}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
where $\mathbf{I}$ represent the identity matrix, and $\| \cdot \| _F$ refers to the Frobenius norm of the matrix.
|
| 162 |
+
|
| 163 |
+
Mapping. Given the inherent differences between the global image feature and local attribute features, we employ another MLP-based mapping network, denoted as $\phi_l$ , to project the local attribute features into the real-word token embedding space. Mathematically, we have:
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
[ \mathbf {a} _ {1}, \dots , \mathbf {a} _ {r} ] = \phi_ {a} (\mathbf {W} ^ {\prime}), \tag {6}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $\mathbf{a}_i$ is the $i$ -th mapped attribute-oriented pseudo-word token.
|
| 170 |
+
|
| 171 |
+
Contrastive Cross-modal Learning. Having acquired the subject-oriented and attribute-oriented pseudo-word tokens, we can represent each image in the textual form. Specifically, we design the pseudo-word-based textual template: "a photo of $[S^*]$ with $[[A_1^*, \dots, A_r^*]]$ ." to represent each image, where $S^*$ is the pseudoword corresponding to the subject-oriented pseudo-word token s, and $A_i^*$ is that corresponding to the $i$ -th attribute-oriented pseudoword token a_i. Accordingly, by feeding this template into the frozen CLIP text encoder, we can obtain the pseudo-word-based textual representation, denoted as $\mathbf{t}_q$ , for each image.
|
| 172 |
+
|
| 173 |
+
To supervise the pseudo-word tokens learning with the unlabeled pre-training image dataset, following previous work, we adopt the symmetric contrastive loss [2]. Intuitively, we expect that for each unlabeled image, its pseudo-word-based textual representation should be more aligned with its original visual representation than the visual representations of other images, and vice versa. This loss can be written as follows,
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\begin{array}{l} \mathcal {L} _ {\text {s i m}} = - \frac {1}{B} \sum_ {i = 1} ^ {B} \left\{\log \frac {e ^ {\left(\cos \left(\mathbf {v} _ {g} ^ {i} , \mathbf {t} _ {q} ^ {i}\right) / \tau\right)}}{\sum_ {j = 1} ^ {B} e ^ {\left(\cos \left(\mathbf {v} _ {g} ^ {i} , \mathbf {t} _ {q} ^ {j}\right) / \tau\right)} + \sum_ {j \neq i} e ^ {\left(\cos \left(\mathbf {t} _ {q} ^ {j} , \mathbf {t} _ {q} ^ {j}\right) / \tau\right)}} \right. \tag {7} \\ \left. + \log \frac {e ^ {(\cos (\mathbf {t} _ {q} ^ {i} , \mathbf {v} _ {g} ^ {i}) / \tau)}}{\sum_ {j = 1} ^ {B} e ^ {(\cos (\mathbf {t} _ {q} ^ {i} , \mathbf {v} _ {g} ^ {j}) / \tau)} + \sum_ {j \neq i} e ^ {(\cos (\mathbf {v} _ {g} ^ {i} , \mathbf {v} _ {g} ^ {j}) / \tau)}} \right\}, \\ \end{array}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
where $B$ is the batch size, and $\tau$ is a temperature hyperparameter. $\mathbf{v}_g^i$ and $\mathbf{t}_g^i$ represent the global image feature and the pseudo-word-based textual representation of the $i$ -th image, respectively.
|
| 180 |
+
|
| 181 |
+
# 3.2 Tri-wise Caption-based Semantic Regularization
|
| 182 |
+
|
| 183 |
+
To align the pseudo-word tokens to the real-word token embedding space for facilitating the subsequent combination of the reference image and the modification text embeddings in the inference phase, we follow previous work [2] and further regularize the interaction between pseudo-word tokens and real-word tokens. The existing solution [2] focuses on aligning the pseudo-word token with the image-related categories. In our context, it is apparent that simply using image-related categories to guide the subject-oriented and attribute-oriented pseudo-word tokens learning can lead to suboptimal performance. This is because the image-related category tokens only convey information about the image's category, lacking detailed specifics such as the number of primary subject(s) and local attributes present in the image.
|
| 184 |
+
|
| 185 |
+
Inspired by the remarkable success of the VLP models in image captioning, we use BLIP to generate a high-quality description for each image to guide the pseudo-word tokens learning. This is because BLIP, pre-trained on the COCO [24] dataset, typically generates the image caption in the format of "[primary subject(s)] + [detailed description]", such as "[three dogs] [sitting in front
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
Figure 3: An example of BLIP generated caption, which can be divided into two parts: $\hat{T}_{sub}$ and $\hat{T}_{attr}$ .
|
| 189 |
+
|
| 190 |
+
of a door]. As can be seen, this format matches our designed text template for representing each image well. To facilitate the following tri-wise caption-based semantic regularization, we first split the generated caption into two parts: one describes the primary subject(s) and the other one delivers the local attributes. Specifically, given the generated caption $\hat{T}$ , we employ the POS tagger of the space library<sup>3</sup> to identify its first subject term. Then we denote the former sub-sequence of $\hat{T}$ ending with the detected subject term as $\hat{T}_{subj}$ , and the remaining sub-sequence as $\hat{T}_{attr}$ . As illustrated in Figure 3, given the example description "three dogs sitting in front of a door", we can split it into two parts: $\hat{T}_{subj}$ refers to "three dogs" describing the primary subjects of the image, while $\hat{T}_{attr}$ refers to "sitting in front of a door" conveying the local attribute information about the primary subjects.
|
| 191 |
+
|
| 192 |
+
One naive approach to regulating the pseudo-word tokens learning is to model the interaction between the pseudo-words and the counterpart real-words. Namely, pushing the subject-oriented pseudo-word $S^*$ to be close to $\hat{T}_{subj}$ , while the attribute-oriented pseudo-words $A_i^* (i = 1,\dots ,r)$ to $\hat{T}_{attr}$ . However, this approach only considers the local interaction, but ignores the interaction between the pseudo-words and other contextual real-words, such as the interaction between $S^*$ and $\hat{T}_{attr}$ , and that between $A_i^* (i = 1,\dots ,r)$ and $\hat{T}_{subj}$ . In fact, due to the sequence nature of the text, modeling the interaction between the pseudo-words and other contextual real-words should be also beneficial for the CLIP text encoder to interpret the pseudo-word-based textual form of each image. Accordingly, we propose to conduct the tri-wise caption-based semantic regularization within the context.
|
| 193 |
+
|
| 194 |
+
Notably, to unify the caption format and the aforementioned pseudo-word-based textual template, we standardize each generated caption with the text template "a photo of \([\hat{T}_{\text{subj}}]\) with \([\hat{T}_{\text{attr}}]". Let \(\hat{T}_B\) denotes the standard caption of the image. Based on this caption, we then derive three derivatives: \(\hat{T}_S\), \(\hat{T}_A\), and \(\hat{T}_{SA}\) in Figure 2 (b). In particular, \(\hat{T}_S\) replaces \(\hat{T}_{\text{subj}}\) with the subject-oriented pseudo-word \(S^*\), \(\hat{T}_A\) replaces \(\hat{T}_{\text{attr}}\) with the attribute-oriented pseudo-words \(A_i^*(i = 1,\dots,r)\), and \(\hat{T}_{SA}\) replaces both. Based on the standard caption and its three derivatives, we can conduct a tri-wise caption-based semantic regularization as follows,
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
\left\{ \begin{array}{l} \mathcal {L} _ {\text {s u b j}} = 1 - \cos \left(t _ {B}, t _ {S}\right), \\ \mathcal {L} _ {\text {a t t r}} = 1 - \cos \left(t _ {B}, t _ {A}\right), \\ \mathcal {L} _ {\text {w h o l e}} = 1 - \cos \left(t _ {B}, t _ {S A}\right), \\ \mathcal {L} _ {\text {t r i - r e g}} = \mathcal {L} _ {\text {s u b j}} + \mathcal {L} _ {\text {a t t r}} + \mathcal {L} _ {\text {w h o l e}}, \end{array} \right. \tag {8}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
where $t_B, t_S, t_A$ , and $t_{SA}$ are the embedding derived by the frozen CLIP text encoder for $\hat{T}_B, \hat{T}_S, \hat{T}_A$ , and $\hat{T}_{SA}$ , respectively. Losses $\mathcal{L}_{\mathrm{subj}}$ and $\mathcal{L}_{\mathrm{attr}}$ are specifically designed to guide the learning of the
|
| 201 |
+
|
| 202 |
+
subject-oriented pseudo-word token and attribute-oriented pseudoword tokens, respectively. $\mathcal{L}_{\mathrm{whole}}$ is devised to jointly regulate all the pseudo-word tokens learning. Essentially, our goal is to ensure that the pseudo-word-based caption derivatives are semantically close to the real-word-based caption.
|
| 203 |
+
|
| 204 |
+
The final loss used to optimize FTI4CIR is as follows,
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {s i m}} + \mathcal {L} _ {\text {o r t h o}} + \lambda_ {\text {r e g}} \mathcal {L} _ {\text {t r i - r e g}}, \tag {9}
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
where $\lambda_{\mathrm{reg}}$ is the trade-off hyper-parameters.
|
| 211 |
+
|
| 212 |
+
# 3.3 Inference with Pre-trained FTI4CIR
|
| 213 |
+
|
| 214 |
+
During the inference phase, we combine the reference image $I_r$ and its modification text $T_m$ as a composed query to retrieve the target image $I_t$ . To be specific, we first employ the well-trained FT14CIR to comprehensively map the reference image into both subject-oriented and attribute-oriented pseudo-word tokens to represent the image in the textual form. Then similar to previous work, we introduce the template "a photo of $[S^*]$ with $[[A_1^*, \dots, A_r^*]]$ but $[T_m]$ ", to compose the reference image and modification text into a sentence. Thereafter, we encode the composed query by CLIP text encoder and each candidate image by CLIP visual encoder, and based on the corresponding output embeddings to measure their similarity. Ultimately, we rank the candidate images according to their similarities.
|
| 215 |
+
|
| 216 |
+
# 4 EXPERIMENT
|
| 217 |
+
|
| 218 |
+
In this section, we first introduce the experimental settings and then provide the experiment results as well as corresponding analyses to answer the following research questions:
|
| 219 |
+
|
| 220 |
+
- RQ1. Does FTI4CIR surpass existing methods?
|
| 221 |
+
- RQ2. How does each component affect FTI4CIR?
|
| 222 |
+
- RQ3. Is FTI4CIR sensitive to the key hyperparameter?
|
| 223 |
+
- RQ4. How is the qualitative performance of FTI4CIR?
|
| 224 |
+
|
| 225 |
+
# 4.1 Experimental Setting
|
| 226 |
+
|
| 227 |
+
4.1.1 Evaluation Dataset. To evaluate the performance of our FTI4CIR in various downstream CIR tasks, following [2], we chose three public datasets for evaluation: FashionIQ [43], CIRR [26], and CIRCO [2]. FashionIQ contains fashion items of three categories: Dresses, Shirts, and Tops&Tees. Similar to previous studies [45], since the test set remains undisclosed, we evaluated our model on the validation set, which in total consists of 6K validation triplets of three categories. CIRR comprises $\sim 21\mathrm{K}$ real-life open-domain images taken from the NLVR $^2$ dataset [35]. To avoid false negatives, the annotation process requires that the modifying text is only relevant to one image pair and irrelevant to any other image pairs sharing the same reference image. We assessed our model on the test set of CIRR, which contains 4.1K testing triplets. CIRCO is an open-domain dataset recently developed from the COCO dataset for further addressing the false negative issue. Different from the above two datasets, in CIRCO, each sample comprises a reference image, a modification text, and multiple target images. We utilized the test set that consists of 800 samples for evaluating our model.
|
| 228 |
+
|
| 229 |
+
4.1.2 Implementation Details. In order to make a fair comparison, we followed [2], adopting the unlabeled test split of ImageNet1K [33] as the pre-training dataset. This dataset contains $100K$ unlabeled
|
| 230 |
+
|
| 231 |
+
open-domain real-world images with a high variety of subjects. We adopted pre-trained CLIP (ViT-L/14 version) as the feature extraction backbone of FTI4CIR. The Transformer for deriving the local attribute features of images is set to 3 layers and 1 head. We set the number of latent local attributes in local attribute feature learning $n$ to 24. In terms of the local-global relevance-based filtering, we set the count of valid local attribute features $k$ in Eqn. (3) and the local-global similarity threshold $\varepsilon$ in Eqn. (4) to 12 and 0.05, respectively. The temperature $\tau$ in Eqn. (7) is set to 0.2. Regarding the loss weight, we set $\lambda_{\mathrm{reg}}$ in Eqn. (9) to 1.4. We trained FTI4CIR by AdamW [27] optimizer with an initial learning rate of $4e - 5$ . The learning rate decays by a factor of 0.1 at the 10-th epoch. We empirically set the batch size as 256. All the experiments are implemented by PyTorch [28], and we fixed the random seeds to ensure reproducibility. Furthermore, we evaluated the CIR performances of our model in a zero-shot manner, i.e., once FTI4CIR is well-trained with the pre-training image dataset, it can be tested on all three datasets.
|
| 232 |
+
|
| 233 |
+
4.1.3 Evaluation. We used the standard evaluation protocols for each dataset. For FashionIQ, consistent with previous studies [2, 34], we used the recall at rank $K$ ( $\mathbb{R}@\mathbb{K}$ ) as the evaluation metric. Specifically, we adopted $\mathbb{R}@\mathbb{10}$ and $\mathbb{R}@\mathbb{50}$ . In addition, we calculated the average performance across the three subsets of different categories to gauge the overall performance. For CIRR, as suggested by previous studies [10, 26], we employed a combination of criteria, namely $\mathbb{R}@\mathbb{K}$ (where $K = 1, 5, 10, 50$ ), $\mathbb{R}_{\text{subset}}@\mathbb{K}$ (where $K = 1, 2, 3$ ), and the average of $\mathbb{R}@\mathbb{5}$ and $\mathbb{R}_{\text{subset}}@\mathbb{1}$ , as evaluation metrics. Notably, $\mathbb{R}_{\text{subset}}@\mathbb{K}$ limits the candidate target images to those that are semantically similar to the correct target image to alleviate the problem of false negatives. For CIRCO, due to the multiple ground truths, following the previous work [2], we adopted Average Precision (mAP) as a more fine-grained metric, specifically $\mathbb{mAP}@\mathbb{K}$ (where $K = 5, 10, 25, 50$ ).
|
| 234 |
+
|
| 235 |
+
# 4.2 On Model Comparison (RQ1)
|
| 236 |
+
|
| 237 |
+
To comprehensively validate the effectiveness of our method, we adopted the following baselines, including seven zero-shot methods and one classic CLIP-based supervised method.
|
| 238 |
+
|
| 239 |
+
- Image-only. It simply takes the CLIP features of the reference image to retrieve the target images.
|
| 240 |
+
- Text-only. It simply takes the CLIP features of the modification text to retrieve the target images.
|
| 241 |
+
- Image + Text. It averages the CLIP features of the reference image and the modification text to retrieve the target image.
|
| 242 |
+
- Captioning. It first concatenates the caption of the reference image, generated from an image captioning model, and the modification text, and then takes the CLIP features of the composed text to retrieve the target images. For a fair comparison, we adopted BLIP as the image captioning model.
|
| 243 |
+
- Pic2Word [34]. It employs a textual inversion network to map the reference image into a single pseudo-word token within the CLIP token embedding space.
|
| 244 |
+
- SEARLE-XL-OTI and SEARLE-XL [2]. SEARLE-XL-OTI employs an optimization-based textual inversion to learn a pseudo-word token for encapsulating the visual content of each image, where no mapping function is involved. By
|
| 245 |
+
|
| 246 |
+
Table 1: Performance comparison on FashionIQ. The best results are in boldface, while the second best results are underlined.
|
| 247 |
+
|
| 248 |
+
<table><tr><td rowspan="2">Supervision</td><td rowspan="2">Method</td><td colspan="2">Dresses</td><td colspan="2">Shirts</td><td colspan="2">Tops&Tees</td><td colspan="2">Avg</td></tr><tr><td>R@10</td><td>R@50</td><td>R@10</td><td>R@50</td><td>R@10</td><td>R@50</td><td>R@10</td><td>R@50</td></tr><tr><td rowspan="8">ZERO-SHOT</td><td>Image-only</td><td>5.35</td><td>13.93</td><td>9.91</td><td>20.80</td><td>8.31</td><td>17.70</td><td>7.86</td><td>17.48</td></tr><tr><td>Text-only</td><td>14.38</td><td>32.92</td><td>19.28</td><td>33.02</td><td>21.52</td><td>39.16</td><td>18.39</td><td>35.03</td></tr><tr><td>Image + Text</td><td>16.81</td><td>36.14</td><td>21.10</td><td>34.49</td><td>23.97</td><td>39.42</td><td>20.62</td><td>36.69</td></tr><tr><td>Captioning</td><td>7.98</td><td>21.76</td><td>21.49</td><td>36.16</td><td>18.77</td><td>34.17</td><td>16.08</td><td>30.70</td></tr><tr><td>Pic2Word [34]</td><td>20.00</td><td>40.20</td><td>26.20</td><td>43.60</td><td>27.90</td><td>47.40</td><td>24.70</td><td>43.70</td></tr><tr><td>SEARLE-XL-OTI [2]</td><td>21.57</td><td>44.47</td><td>30.37</td><td>47.49</td><td>30.90</td><td>51.76</td><td>27.61</td><td>47.90</td></tr><tr><td>SEARLE-XL [2]</td><td>20.48</td><td>43.13</td><td>26.89</td><td>45.58</td><td>29.32</td><td>49.97</td><td>25.56</td><td>46.23</td></tr><tr><td>FTI4CIR</td><td>24.39</td><td>47.84</td><td>31.35</td><td>50.59</td><td>32.43</td><td>54.21</td><td>29.39</td><td>50.88</td></tr><tr><td>FashionIQ</td><td>Combiner [4]</td><td>30.49</td><td>54.93</td><td>37.98</td><td>57.16</td><td>38.50</td><td>60.02</td><td>35.66</td><td>57.37</td></tr><tr><td>CIRR</td><td>Combiner [4]</td><td>20.53</td><td>40.36</td><td>25.07</td><td>43.18</td><td>26.82</td><td>47.68</td><td>24.14</td><td>43.74</td></tr></table>
|
| 249 |
+
|
| 250 |
+
Table 2: Performance comparison on CIRR. The best and second-best results are highlighted in bold and underlined, respectively. - denotes results not reported in the original paper.
|
| 251 |
+
|
| 252 |
+
<table><tr><td rowspan="2">Supervision</td><td rowspan="2">Method</td><td colspan="4">R@K</td><td colspan="3">Rsubset@K</td><td rowspan="2">Avg</td></tr><tr><td>K=1</td><td>K=5</td><td>K=10</td><td>K=50</td><td>K=1</td><td>K=2</td><td>K=3</td></tr><tr><td rowspan="8">ZERO-SHOT</td><td>Image-only</td><td>7.35</td><td>23.71</td><td>33.81</td><td>57.16</td><td>20.80</td><td>42.07</td><td>61.52</td><td>22.25</td></tr><tr><td>Text-only</td><td>21.81</td><td>45.13</td><td>57.54</td><td>79.52</td><td>61.52</td><td>80.41</td><td>90.31</td><td>53.32</td></tr><tr><td>Image + Text</td><td>12.34</td><td>36.22</td><td>50.27</td><td>78.15</td><td>34.19</td><td>59.06</td><td>76.72</td><td>35.21</td></tr><tr><td>Captioning</td><td>16.60</td><td>40.00</td><td>52.94</td><td>79.33</td><td>52.99</td><td>74.27</td><td>86.87</td><td>46.49</td></tr><tr><td>Pic2Word [34]</td><td>23.90</td><td>51.70</td><td>65.30</td><td>87.80</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SEARLE-XL-OTI [2]</td><td>24.87</td><td>52.31</td><td>66.29</td><td>88.58</td><td>53.80</td><td>74.31</td><td>86.94</td><td>53.06</td></tr><tr><td>SEARLE-XL [2]</td><td>24.24</td><td>52.48</td><td>66.29</td><td>88.84</td><td>53.76</td><td>75.01</td><td>88.19</td><td>53.12</td></tr><tr><td>FTI4CIR</td><td>25.90</td><td>55.61</td><td>67.66</td><td>89.66</td><td>55.21</td><td>75.88</td><td>87.98</td><td>55.41</td></tr><tr><td>FashionIQ</td><td>Combiner [4]</td><td>21.11</td><td>50.96</td><td>64.75</td><td>87.95</td><td>48.63</td><td>71.90</td><td>86.24</td><td>49.80</td></tr><tr><td>CIRR</td><td>Combiner [4]</td><td>31.61</td><td>62.22</td><td>75.23</td><td>93.52</td><td>60.63</td><td>80.84</td><td>90.99</td><td>61.42</td></tr></table>
|
| 253 |
+
|
| 254 |
+
distilling knowledge from SEARLE-XL-OTI, SEARLE-XL learns a compact efficiency mapping network to map the image into a pseudo-word token. Both methods adopt a category-based semantic regularization to align the pseudoword token to the CLIP token embedding space.
|
| 255 |
+
|
| 256 |
+
- Combiner [4]. This is a standard CLIP-based supervised CIR model, which works on combining the visual and textual features derived from the frozen pre-trained CLIP to fulfill CIR tasks. Due to the absence of the CIRCO training set, we separately trained this model with training splits of FashionIQ (18K triplets) and CIRR (28K triplets). We then evaluated the trained Combiner network on all three datasets, to investigate its generalization ability.
|
| 257 |
+
|
| 258 |
+
Tables 1 - 3 summarize the performance comparison on the three datasets. For a fair comparison, we adopted CLIP (ViT-L/14 version), as the feature extraction backbone for all baselines. From these tables, we obtained the following observations. 1) On average, FTI4CIR consistently outperforms all zero-shot baselines across three datasets. This reflects the effectiveness of our model in the ZS-CIR setting. 2) Comparing the results of FTI4CIR with the Combiner network trained on FashionIQ or CIRR demonstrates the generality and effectiveness of FTI4CIR, as well as the domain adaptation challenge in supervised methods. For instance, in Table 1, while the Combiner trained on FashionIQ performs better than FTI4CIR, the
|
| 259 |
+
|
| 260 |
+
Table 3: Performance comparison on CIRCO. The best results are in boldface, while the second best results are underlined.
|
| 261 |
+
|
| 262 |
+
<table><tr><td rowspan="2">Supervision</td><td rowspan="2">Method</td><td colspan="4">mAP@K</td></tr><tr><td>K=5</td><td>K=10</td><td>K=25</td><td>K=50</td></tr><tr><td rowspan="8">ZERO-SHOT</td><td>Image-only</td><td>1.80</td><td>2.44</td><td>3.05</td><td>3.46</td></tr><tr><td>Text-only</td><td>3.01</td><td>3.18</td><td>3.68</td><td>3.93</td></tr><tr><td>Image + Text</td><td>4.32</td><td>5.24</td><td>6.49</td><td>7.07</td></tr><tr><td>Captioning</td><td>8.33</td><td>8.98</td><td>10.17</td><td>10.75</td></tr><tr><td>Pic2Word [34]</td><td>8.72</td><td>9.51</td><td>10.46</td><td>11.29</td></tr><tr><td>SEARLE-XL-OTI [2]</td><td>10.18</td><td>11.03</td><td>12.72</td><td>13.67</td></tr><tr><td>SEARLE-XL [2]</td><td>11.68</td><td>12.73</td><td>14.33</td><td>15.12</td></tr><tr><td>FTI4CIR</td><td>15.05</td><td>16.32</td><td>18.06</td><td>19.05</td></tr><tr><td>FashionIQ</td><td>Combiner [4]</td><td>8.91</td><td>10.29</td><td>11.72</td><td>12.52</td></tr><tr><td>CIRR</td><td>Combiner [4]</td><td>8.56</td><td>9.20</td><td>10.43</td><td>11.06</td></tr></table>
|
| 263 |
+
|
| 264 |
+
Combiner trained on CIRR exhibits significantly lower performance. Such a phenomenon is evident across all three provided datasets. This observation highlights domain discrepancies among CIR datasets, and most supervised methods tend to rely heavily on the training triplets, leading to poor generalization on other CIR datasets. 3) Table 2 shows that the Text-only baseline achieves the best performance on $\mathbb{R}_{\text{subset}}@K$ , which has been previously noted in studies [4, 34]. It emphasizes some drawbacks of CIRR: the modification text alone often suffices for image retrieval, rendering the information from reference images redundant. Besides, some
|
| 265 |
+
|
| 266 |
+
reference images can even be harmful to retrieving the target image. These issues are further exacerbated, particularly when the candidate set of the target image exhibits high visual similarity with the reference image, making the semantics of the modification text crucial in such scenarios.
|
| 267 |
+
|
| 268 |
+
# 4.3 On Ablation Study (RQ2)
|
| 269 |
+
|
| 270 |
+
To verify the importance of each component in our model, we compared FTI4CIR with its following derivatives.
|
| 271 |
+
|
| 272 |
+
- w/osubject_token. To explore the effect of learning the subject-oriented pseudo-word token, we eliminated the subject-oriented pseudo-word token mapping and accordingly used the template "a photo with $[A_1^*,\dots ,A_r^* ]]$ but $[T_m]$ " to retrieve the target image.
|
| 273 |
+
- w/o_attribute_token. To verify the importance of learning the attribute-oriented pseudo-word tokens, we removed these tokens mapping and utilized the template "a photo of $[S^*]$ but $[T_m]$ " to retrieve the target image.
|
| 274 |
+
- w/o_filter. To show the effectiveness of the local-global relevance-based filtering strategy, we disabled the filter criteria by setting $k = n$ and $\varepsilon = 0$ .
|
| 275 |
+
- w/o_ortho. To investigate the effect of orthogonal constraint on the local attribute features learning, we removed $\mathcal{L}_{\mathrm{orth}}$
|
| 276 |
+
- w/o_context_reg. To verify the benefit of regulating the pseudo-word tokens learning in the full context, we revised the derivative $\hat{T}_S$ to "a photo of $[S^*]$ ," and $\hat{T}_A$ to "a photo with $[A_1^*, \dots, A_r^*]$ ".
|
| 277 |
+
- w/osubject_reg, w/o_attribute_reg, and w/owhole_reg To examine the efficacy of our tri-wise caption-based semantic regularization, we conducted three variants, where the losses $\mathcal{L}$ subject, $\mathcal{L}$ attribute, and $\mathcal{L}$ whole are removed, respectively.
|
| 278 |
+
|
| 279 |
+
Table 4 shows the ablation results of our FTI4CIR on three datasets. From this table, we gained the following observations. 1) Both w/osubject_token and w/o_attribute_token perform inferior to FTI4CIR, emphasizing that it is essential to consider both the primary subject(s) and local attributes of the image to handle the diverse modification demands in CIR tasks. 2) FTI4CIR exceeds w/o_filter in various evaluation metrics, which validates the effectiveness of our filtering criteria in getting rid of the irrelevant local attribute features for the given image. 3) w/o_ortho performs worse than FTI4CIR, indicating that the designed orthogonal loss indeed helps guarantee the independence among local attribute features and promotes the local attribute features extraction. 4) w/o_context_reg is inferior to FTI4CIR, demonstrating the advantage of conducting the tri-wise caption-based semantic regularization with the whole context rather than the local counterparts. 5) FTI4CIR achieves better performance than both w/osubject_token, w/o_attribute_reg, and w/owhole_reg, suggesting that each semantic regularization partially contributes to the pseudo-word tokens learning. Besides, compared to other derivatives, w/owhole_reg exhibits the highest performance on FashionIQ, whereas it yields the least favorable outcome on CIRCO. This contrast highlights that the fashion-domain dataset places greater emphasis on the interplay between pseudo-word tokens
|
| 280 |
+
|
| 281 |
+
Table 4: Ablation studies on CIRCO, CIRR, and FashionIQ.
|
| 282 |
+
|
| 283 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">FashionIQ-Avg</td><td>CIRR</td><td>CIRCO</td></tr><tr><td>R@10</td><td>R@50</td><td>Avg</td><td>mAP@5</td></tr><tr><td>w/osubject_token</td><td>19.83</td><td>37.11</td><td>54.18</td><td>8.36</td></tr><tr><td>w/o_attribute_token</td><td>27.72</td><td>48.79</td><td>51.22</td><td>12.69</td></tr><tr><td>w/o_filter</td><td>23.13</td><td>42.63</td><td>46.07</td><td>11.34</td></tr><tr><td>w/o_ortho</td><td>25.51</td><td>45.33</td><td>54.13</td><td>14.57</td></tr><tr><td>w/o_context_reg</td><td>24.96</td><td>44.16</td><td>48.81</td><td>10.87</td></tr><tr><td>w/osubject_reg</td><td>25.74</td><td>45.61</td><td>54.54</td><td>14.18</td></tr><tr><td>w/o_attribute_reg</td><td>25.15</td><td>46.30</td><td>52.28</td><td>12.61</td></tr><tr><td>w/owhole_reg</td><td>26.39</td><td>46.87</td><td>53.17</td><td>11.65</td></tr><tr><td>FTI4CIR</td><td>29.39</td><td>50.88</td><td>55.41</td><td>15.05</td></tr></table>
|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
Figure 4: Sensitivity analysis of our model on the number of latent local attributes $n$ . Notably, we reported the average results of R@10 and R@50 on FashionIQ.
|
| 287 |
+
|
| 288 |
+
and real-word tokens, thus effectively conveying user retrieval requirements in comparison to the open-domain dataset.
|
| 289 |
+
|
| 290 |
+
# 4.4 On Sensitivity Analysis (RQ3)
|
| 291 |
+
|
| 292 |
+
In this part, we tested the sensitivity of our model regarding the number of latent local attributes, i.e., $n$ . As shown in Figure 4, we varied the number of latent local attributes $n$ from 12 to 36 at the step of 4. As can be seen, the performance of our model generally boosts with the increasing number of latent local attributes, followed by a subsequent decline. This is reasonable, as a greater number of latent local attributes can differentiate the latent attribute features of an image in more detail and hence facilitate the interaction between the reference image and the modification text; while an excessive number of latent local attributes may lead to excessive differentiation of local attribute features and make it challenging for the filter to screen out the irrelevant attribute features.
|
| 293 |
+
|
| 294 |
+
# 4.5 On Case Study (RQ4)
|
| 295 |
+
|
| 296 |
+
In this part, we show illustrative results on tasks of pseudo-to-real description retrieval and composed image retrieval.
|
| 297 |
+
|
| 298 |
+
4.5.1 Pseudo-to-Real Description Retrieval. To showcase that the learned subject-oriented and attribute-oriented pseudo-word tokens can accurately express the primary subject(s) and local attributes of the image, respectively, we conducted the pseudoto-real description retrieval. In particular, we first regarded all the BLIP-generated $\hat{T}_{subj} / \hat{T}_{attr}$ for images in the pre-training dataset as the candidate set of real-word-based subject/attribute descriptions.
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
Figure 5: Pseudo-to-real description retrieved results. We highlight the related real-word descriptions in green.
|
| 302 |
+
|
| 303 |
+
We then used the image's pseudo-word-based textual ("a photo of $[S^{*}]$ with $[[A_{1}^{*},\dots ,A_{r}^{*}]]$ ). representation to retrieve the related subject and attribute descriptions from the above candidate set, respectively. Figure 5 shows the top-4 retrieved descriptions for two testing cases, where the related real-word descriptions are highlighted in green. As can be seen from Figure 5 (a), the retrieved subject descriptions are correctly concerned about "a shirt", while the attribute descriptions describe the pattern of the shirt. Regarding Figure 5 (b), the retrieved subject descriptions correctly reflect the number and type of the primary subjects, namely "two birds", and the attribute descriptions reveal the position and spatial information of the two birds. Overall, these results suggest the effectiveness of the learned pseudo-word tokens in capturing the image content and the alignment between the learned pseudo-word tokens to the real-word token embedding space.
|
| 304 |
+
|
| 305 |
+
4.5.2 Composed Image Retrieval. Figure 6 illustrates several CIR results obtained by our FTI4CIR and the best-performing baseline SEARLE-XL on the fashion-domain dataset FashionIQ and the open-domain dataset CIRR and CIRCO. Due to the limited space, we reported the top 5 retrieved images. As can be seen from the first case, shown in Figure 6 (a), the user wants to change the global visual properties of the given dress. For this modification request, both our FTI4CIR and SEARLE-XL correctly rank the target image in the first place. Nevertheless, for more complex cases in Figure 6 (b) and (c), the modification texts primarily center on local attribute alterations to the reference images. The former involves adjusting sleeve length and collar type, while the latter emphasizes modifying the posture and background. In such cases, our FTI4CIR succeeds in ranking the ground-truth target images in the top-2 places, while SEARLE-XL fails to rank them within the top-5 places. This implies the superiority of the fine-grained textual inversion over the conventional coarse-grained textual inversion. Last but not least, as illustrated in Figure 6 (d), the primary subjects of the reference image are multiple cups, while the modification request involves not only detailed attribute changes, such as "heavily filled" and "on a wooden counter", but also the change of the number of subjects, i.e., "only one". For this case, our model still outperforms
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
Figure 6: Illustration of CIR results on three datasets. (a) and (b) come from FashionIQ, (c) comes from CIRR, and (d) comes from CIRCO. The ground-truth target images are highlighted in green boxes.
|
| 309 |
+
|
| 310 |
+
SEARLE-XL. This may be attributed to the tri-wise caption-based semantic regularization, in which both the subject-oriented and attribute-oriented pseudo-word tokens are enhanced by specific regularization. Notably, in our work, the subject-oriented pseudoword is designed to capture both the category and the number of the primary subject(s) in the image.
|
| 311 |
+
|
| 312 |
+
# 5 CONCLUSIONS AND FUTURE WORK
|
| 313 |
+
|
| 314 |
+
In this work, we propose a novel fine-grained textual inversion network to tackle the challenging task of ZS-CIR. Beyond current solutions, we map each image into a subject-oriented pseudo-word token and several attribute-oriented pseudo-word tokens to encapsulate the image content in a textual sentence effectively. In addition, we design a tri-wise caption-based semantic regularization to boost the alignment of the fine-grained pseudo-word tokens with the real-word token embedding space. Extensive experiments have been conducted on three public datasets, and the results demonstrate the effectiveness of our method. In addition, ablation studies verify the effectiveness of each key component of our model, confirming the benefit of conducting fine-grained textual inversion and tri-wise caption-based semantic regularization. To take it further, we plan to extend our method to address the challenging multi-turn interactive image retrieval task in a zero-shot manner. We believe that our fine-grained textual inversion network could fulfill the diverse retrieval requirements of different users.
|
| 315 |
+
|
| 316 |
+
# ACKNOWLEDGMENTS
|
| 317 |
+
|
| 318 |
+
This work was supported in part by the National Natural Science Foundation of China, No.: 62376137, No.: 62276155, No.: 62376140 and No.: U23A20315; the Shandong Provincial Natural Science Foundation, No.: ZR2022YQ59, and No.: ZR2021MF040; the Science and Technology Innovation Program for Distinguished Young Scholars of Shandong Province Higher Education Institutions, No.: 2023KJ128.
|
| 319 |
+
|
| 320 |
+
# REFERENCES
|
| 321 |
+
|
| 322 |
+
[1] Muhammad Umer Anwaar, Egor Labintcev, and Martin Kleinsteuber. 2021. Compositional learning of image-text query for image retrieval. In Proceedings of the IEEE Winter Conference on Applications of Computer Vision. IEEE, 1140-1149.
|
| 323 |
+
[2] Alberto Baldrati, Lorenzo Agnolucci, Marco Bertini, and Alberto Del Bimbo. 2023. Zero-Shot Composed Image Retrieval with Textual Inversion. In Proceedings of the IEEE/CVF International Conference on Computer Vision. IEEE, 15338-15347.
|
| 324 |
+
[3] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. 2022. Conditioned and composed image retrieval combining and partially fine-tuning clip-based features. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 4959-4968.
|
| 325 |
+
[4] Alberto Baldrati, Marco Bertini, Tiberio Uricchio, and Alberto Del Bimbo. 2022. Effective conditioned and composed image retrieval combining CLIP-based features. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 21466-21474.
|
| 326 |
+
[5] Yanbei Chen and Loris Bazzani. 2020. Learning Joint Visual Semantic Matching Embeddings for Language-guided Retrieval. In Proceedings of the European Conference on Computer Vision. Springer, 136-152.
|
| 327 |
+
[6] Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. 2021. Unifying Vision-and-Language Tasks via Text Generation. In Proceedings of the International Conference on Machine Learning. PMLR, 1931-1942.
|
| 328 |
+
[7] T-S Chua, S-K Lim, and H-K Pung. 1994. Content-based Retrieval of Segmented Images. In Proceedings of the ACM International Conference on Multimedia. ACM, 211-218.
|
| 329 |
+
[8] Niv Cohen, Rinon Gal, Eli A Meirom, Gal Chechik, and Yuval Atzmon. 2022. "This is my unicorn, Fluffy": Personalizing frozen vision-language representations. In Proceedings of the European Conference on Computer Vision. Springer, 558-577.
|
| 330 |
+
[9] Marcos V Conde and Kerem Turgutlu. 2021. CLIP-Art: Contrastive Pre-training for Fine-Grained Art Classification. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 3956-3960.
|
| 331 |
+
[10] Ginger Delmas, Rafael S. Rezende, Gabriela Csurka, and Diane Larlus. 2022. ARTEMIS: Attention-based Retrieval with Text-Explicit Matching and Implicit Similarity. In Proceedings of the International Conference on Learning Representations. OpenReview.net, 1-12.
|
| 332 |
+
[11] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit Haim Bermano, Gal Chechik, and Daniel Cohen-or. 2023. An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion. In Proceedings of the International Conference on Learning Representations. OpenReview.net, 1-18.
|
| 333 |
+
[12] Sonam Goenka, Zhaoheng Zheng, Ayush Jaiswal, Rakesh Chada, Yue Wu, Varsha Hedau, and Pradeep Natarajan. 2022. FashionVLP: Vision Language Transformer for Fashion Retrieval with Feedback. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 14105-14115.
|
| 334 |
+
[13] Chunbin Gu, Jiajun Bu, Zhen Zhang, Zhi Yu, Dongfang Ma, and Wei Wang. 2021. Image Search with Text Feedback by Deep Hierarchical Attention Mutual Information Maximization. In Proceedings of the ACM International Conference on Multimedia. ACM, 4600-4609.
|
| 335 |
+
[14] Xiao Han, Xiatian Zhu, Licheng Yu, Li Zhang, Yi-Zhe Song, and Tao Xiang. 2023. FAME-ViL: Multi-Tasking Vision-Language Model for Heterogeneous Fashion Tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 2669-2680.
|
| 336 |
+
[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 770-778.
|
| 337 |
+
[16] Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long Short-Term Memory. Neural Computation 9, 8 (1997), 1735-1780.
|
| 338 |
+
[17] Mehrdad Hosseinzadeh and Yang Wang. 2020. Composed Query Image Retrieval Using Locally Bounded Features. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 3596-3605.
|
| 339 |
+
[18] Surgan Jandial, Pinkesh Badjatiya, Pranit Chawla, Ayush Chopra, Mausoom Sarkar, and Balaji Krishnamurthy. 2022. SAC: Semantic Attention Composition for Text-Conditioned Image Retrieval. In Proceedings of the IEEE Winter Conference on Applications of Computer Vision. IEEE, 4021-4030.
|
| 340 |
+
[19] Jongseok Kim, Youngjae Yu, Hoeseoong Kim, and Gunhee Kim. 2021. Dual compositional learning in interactive image retrieval. In Proceedings of the AAAI Conference on Artificial Intelligence. AAAI, 1771-1779.
|
| 341 |
+
[20] Seungmin Lee, Dongwan Kim, and Bohyung Han. 2021. CoSMo: Content-Style Modulation for Image Retrieval with Text Feedback. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 802–812.
|
| 342 |
+
[21] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. In Proceedings of the International Conference on Machine Learning. PMLR, 12888-12900.
|
| 343 |
+
[22] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Ho. 2021. Align before Fuse: Vision and Language Representation Learning with Momentum Distillation. Advances in Neural Information Processing Systems 34 (2021), 9694-9705.
|
| 344 |
+
|
| 345 |
+
[23] Haoqiang Lin, Haokun Wen, Xiaolin Chen, and Xuemeng Song. 2023. CLIP-based Composed Image Retrieval with Comprehensive Fusion and Data Augmentation. In Proceedings of the Australasian Joint Conference on Artificial Intelligence. Springer, 190-202.
|
| 346 |
+
[24] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar, and C Lawrence Zitnick. 2014. Microsoft COCO: Common Objects in Context. In Proceedings of the European Conference on Computer Vision. Springer, 740-755.
|
| 347 |
+
[25] Yu Liu, Huai Chen, Lianghua Huang, Di Chen, Bin Wang, Pan Pan, and Lisheng Wang. 2022. Animating Images to Transfer CLIP for Video-Text Retrieval. In Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval. ACM, 1906-1911.
|
| 348 |
+
[26] Zheyuan Liu, Cristian Rodriguez-Opazo, Damien Teney, and Stephen Gould. 2021. Image Retrieval on Real-life Images with Pre-trained Vision-and-Language Models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. IEEE, 2125-2134.
|
| 349 |
+
[27] Ilya Loshchilov and Frank Hutter. 2019. Decoupled Weight Decay Regularization. In Proceedings of the International Conference on Learning Representations. OpenReview.net, 1-11.
|
| 350 |
+
[28] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. Advances in Neural Information Processing Systems 32 (2019), 8024-8035.
|
| 351 |
+
[29] Leigang Qu, Meng Liu, Da Cao, Liqiang Nie, and Qi Tian. 2020. Context-Aware Multi-View Summarization Network for Image-Text Matching. In Proceedings of the ACM International Conference on Multimedia. ACM, 1047-1055.
|
| 352 |
+
[30] Leigang Qu, Meng Liu, Jianlong Wu, Zan Gao, and Liqiang Nie. 2021. Dynamic Modality Interaction Modeling for Image-Text Retrieval. In Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval. ACM, 1104–1113.
|
| 353 |
+
[31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning Transferable Visual Models From Natural Language Supervision. In Proceedings of the International Conference on Machine Learning. PMLR, 8748-8763.
|
| 354 |
+
[32] Jun Rao, Fei Wang, Liang Ding, Shuhan Qi, Yibing Zhan, Weifeng Liu, and Dacheng Tao. 2022. Where Does the Performance Improvement Come From? - A Reproducibility Concern about Image-Text Retrieval. In Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval. ACM, 2727-2737.
|
| 355 |
+
[33] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. 2015. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision 115, 3 (2015), 211-252.
|
| 356 |
+
[34] Kuniaki Saito, Kihyuk Sohn, Xiang Zhang, Chun-Liang Li, Chen-Yu Lee, Kate Saenko, and Tomas Pfister. 2023. Pic2Word: Mapping Pictures to Words for Zero-shot Composed Image Retrieval. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 19305-19314.
|
| 357 |
+
[35] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. 2019. A Corpus for Reasoning About Natural Language Grounded in Photographs. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics. ACL, 6418-6428.
|
| 358 |
+
[36] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Ilia Polosukhin. 2017. Attention Is All You Need. Advances in Neural Information Processing Systems 30 (2017), 5998--6008.
|
| 359 |
+
[37] Nam Vo, Lu Jiang, Chen Sun, Kevin Murphy, Li-Jia Li, Li Fei-Fei, and James Hays. 2019. Composing Text and Image for Image Retrieval - An Empirical Odyssey. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 6439-6448.
|
| 360 |
+
[38] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. 2022. SimVLM: Simple Visual Language Model Pretraining with Weak Supervision. In Proceedings of the International Conference on Learning Representations. OpenReview.net, 1-14.
|
| 361 |
+
[39] Yuxiang Wei, Yabo Zhang, Zhilong Ji, Jinfeng Bai, Lei Zhang, and Wangmeng Zuo. 2023. Elite: Encoding visual concepts into textual embeddings for customized text-to-image generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision. IEEE, 15943-15953.
|
| 362 |
+
[40] Haokun Wen, Xuemeng Song, Xin Yang, Yibing Zhan, and Liqiang Nie. 2021. Comprehensive Linguistic-Visual Composition Network for Image Retrieval. In Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval. ACM, 1369-1378.
|
| 363 |
+
[41] Haokun Wen, Xuemeng Song, Jianhua Yin, Jianlong Wu, Weili Guan, and Liqiang Nie. 2024. Self-Training Boosted Multi-Factor Matching Network for Composed Image Retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence 46, 5 (2024), 3665-3678.
|
| 364 |
+
[42] Haokun Wen, Xian Zhang, Xuemeng Song, Yinwei Wei, and Liqiang Nie. 2023. Target-Guided Composed Image Retrieval. In Proceedings of the ACM International Conference on Multimedia. ACM, 915-923.
|
| 365 |
+
|
| 366 |
+
[43] Hui Wu, Yupeng Gao, Xiaoxiao Guo, Ziad Al-Halah, Steven Rennie, Kristen Grauman, and Rogerio Feres. 2021. Fashion IQ: A New Dataset Towards Retrieving Images by Natural Language Feedback. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. IEEE, 11307-11317.
|
| 367 |
+
[44] Yifei Yuan and Wai Lam. 2021. Conversational Fashion Image Retrieval via Multiturn Natural Language Feedback. In Proceedings of the International ACM
|
| 368 |
+
|
| 369 |
+
SIGIR Conference on Research and Development in Information Retrieval. ACM, 839-848.
|
| 370 |
+
[45] Yida Zhao, Yuqing Song, and Qin Jin. 2022. Progressive Learning for Image Retrieval with Hybrid-Modality Queries. In Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval. ACM, 1012-1021.
|
data/2025/2503_19xxx/2503.19296/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3c81d4215e2c69f4cffdaba252b3f99c29e0c207e1d7887472faa35735b452e
|
| 3 |
+
size 641883
|
data/2025/2503_19xxx/2503.19296/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_content_list.json
ADDED
|
@@ -0,0 +1,1867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "ImageGen-CoT: Enhancing Text-to-Image In-context Learning with Chain-of-Thought Reasoning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
156,
|
| 8 |
+
130,
|
| 9 |
+
839,
|
| 10 |
+
175
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jiaqi Liao $^{1\\dagger}$ , Zhengyuan Yang $^{1}$ , Linjie Li $^{1}$ , Dianqi Li, Kevin Lin $^{1}$ , Yu Cheng $^{2}$ , Lijuan Wang $^{1^{\\boxtimes}}$ $^{1}$ Microsoft 2 The Chinese University of Hong Kong",
|
| 17 |
+
"bbox": [
|
| 18 |
+
127,
|
| 19 |
+
208,
|
| 20 |
+
867,
|
| 21 |
+
247
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "image",
|
| 27 |
+
"img_path": "images/587665c2573a3f0796a1348e8ef827cbe49e80bef21a9240e2db65c6e4ddcbd8.jpg",
|
| 28 |
+
"image_caption": [
|
| 29 |
+
"Figure 1. Comparisons between SEED-X and SEED-X FT w ImageGen-CoT dataset. Our method forces the model to generate a thought process before the ImageGen. The top row shows SEED-X failing to infer 'Leather' style, generating only a box, while our approach enables SEED-X to recognize the leather style and produce the intended leather box. The bottom row shows SEED-X failing to capture the unkempt fur and cloud, while our method successfully recognizes these key elements."
|
| 30 |
+
],
|
| 31 |
+
"image_footnote": [],
|
| 32 |
+
"bbox": [
|
| 33 |
+
91,
|
| 34 |
+
282,
|
| 35 |
+
906,
|
| 36 |
+
481
|
| 37 |
+
],
|
| 38 |
+
"page_idx": 0
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"type": "text",
|
| 42 |
+
"text": "Abstract",
|
| 43 |
+
"text_level": 1,
|
| 44 |
+
"bbox": [
|
| 45 |
+
274,
|
| 46 |
+
556,
|
| 47 |
+
351,
|
| 48 |
+
570
|
| 49 |
+
],
|
| 50 |
+
"page_idx": 0
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"type": "text",
|
| 54 |
+
"text": "In this work, we study the problem of Text-to-Image In-Context Learning (T2I-ICL). While Unified Multimodal LLMs (MLLMs) have advanced rapidly in recent years, they struggle with contextual reasoning in T2I-ICL scenarios. To address this limitation, we propose a novel framework that incorporates a thought process called ImageGen-CoT prior to image generation. To avoid generating unstructured ineffective reasoning steps, we develop an automatic pipeline to curate a high-quality ImageGen-CoT dataset. We then fine-tune MLLMs using this dataset to enhance their contextual reasoning capabilities. To further enhance performance, we explore test-time scale-up strategies and propose a novel hybrid scaling approach. This approach first generates multiple ImageGen-CoT chains and then produces multiple images for each chain via sampling. Extensive experiments demonstrate the effectiveness of our proposed method. Notably, fine-tuning with the ImageGenCoT dataset leads to a substantial $80\\%$ performance gain for SEED-X on T2I-ICL tasks. See our project page at",
|
| 55 |
+
"bbox": [
|
| 56 |
+
86,
|
| 57 |
+
590,
|
| 58 |
+
483,
|
| 59 |
+
878
|
| 60 |
+
],
|
| 61 |
+
"page_idx": 0
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"type": "text",
|
| 65 |
+
"text": "https://ImageGen-CoT.github.io/. Code and model weights will be open-sourced.",
|
| 66 |
+
"bbox": [
|
| 67 |
+
513,
|
| 68 |
+
556,
|
| 69 |
+
906,
|
| 70 |
+
587
|
| 71 |
+
],
|
| 72 |
+
"page_idx": 0
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"type": "text",
|
| 76 |
+
"text": "1. Introduction",
|
| 77 |
+
"text_level": 1,
|
| 78 |
+
"bbox": [
|
| 79 |
+
513,
|
| 80 |
+
617,
|
| 81 |
+
643,
|
| 82 |
+
633
|
| 83 |
+
],
|
| 84 |
+
"page_idx": 0
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"type": "text",
|
| 88 |
+
"text": "Human intelligence excels at learning novel concepts through contextual observation and adapting to new inputs. When presented with a series of interleaved text-image examples—such as \"a leather-bound book\", followed by \"a leather apple\"—and then asked to generate an image for the query \"a box,\" humans intuitively infer the implicit pattern of \"leather\" and apply it to the new query, resulting in \"a leather box\". This reasoning ability to learn novel concepts from multimodal contexts underpins creative problem-solving. Existing unified Multimodal Large Language Models (unified MLLMs) [5, 8, 9, 14, 18, 28, 34] have demonstrated remarkable capabilities in multimodal understanding and generation within a single model architecture. Given their ability to process and generate across modalities similar to human cognition, it is natural to investigate whether these models can exhibit reasoning capabilities comparable to those of humans. To evaluate this,",
|
| 89 |
+
"bbox": [
|
| 90 |
+
509,
|
| 91 |
+
643,
|
| 92 |
+
906,
|
| 93 |
+
900
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "aside_text",
|
| 99 |
+
"text": "arXiv:2503.19312v1 [cs.CV] 25 Mar 2025",
|
| 100 |
+
"bbox": [
|
| 101 |
+
22,
|
| 102 |
+
260,
|
| 103 |
+
57,
|
| 104 |
+
705
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "page_footnote",
|
| 110 |
+
"text": "$\\dagger$ Interns at Microsoft.",
|
| 111 |
+
"bbox": [
|
| 112 |
+
114,
|
| 113 |
+
887,
|
| 114 |
+
235,
|
| 115 |
+
898
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 0
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "page_number",
|
| 121 |
+
"text": "1",
|
| 122 |
+
"bbox": [
|
| 123 |
+
493,
|
| 124 |
+
925,
|
| 125 |
+
501,
|
| 126 |
+
935
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 0
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "text",
|
| 132 |
+
"text": "we adopt the Text-to-Image In-Context Learning (T2I-ICL) task [40], which requires models to process interleaved text-image inputs and generate coherent outputs by learning from multimodal contexts (Figure 1). Despite the impressive capabilities of unified MLLMs, our experiments reveal that they struggle to replicate this reasoning capability, often failing to grasp contextual relationships or preserve compositional consistency in T2I-ICL tasks.",
|
| 133 |
+
"bbox": [
|
| 134 |
+
89,
|
| 135 |
+
90,
|
| 136 |
+
480,
|
| 137 |
+
210
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 1
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "text",
|
| 143 |
+
"text": "To overcome these challenges, building upon the demonstrated success of CoT prompting in enhancing complex task processing for LLMs, we propose a novel framework that involves a structured thought process called ImageGen-CoT prior to image generation. Our key insight is that explicitly generating reasoning steps before image synthesis helps unified MLLMs better understand multimodal contexts and produce more coherent outputs. However, these models often produce disorganized and incoherent thought processes, leading to suboptimal performance. To address these limitations, we first propose an automated dataset construction pipeline to generate ImageGen-CoT datasets, where each sample consists of a pair of ImageGen-CoT and a corresponding image. The pipeline comprises three main stages: 1) collecting T2I-ICL instructions, 2) using MLLMs to generate step-by-step reasoning (ImageGen-CoT), and 3) producing image descriptions via MLLMs for diffusion models to generate images. To further enhance the dataset quality, we employ an iterative refinement process: The model first generates multiple text prompts and corresponding images, selects the best one, critiques the generated image, and iteratively refines the prompt until the max round is reached. Then, we fine-tune the model using this dataset which significantly enhances the image generation capabilities of unified-MLLMs in T2I-ICL tasks.",
|
| 144 |
+
"bbox": [
|
| 145 |
+
91,
|
| 146 |
+
215,
|
| 147 |
+
483,
|
| 148 |
+
592
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 1
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "text",
|
| 154 |
+
"text": "Despite the strong performance, T2I-ICL tasks' complexity leaves room for improvement. Inspired by NLP's Best-of-N paradigm, we explore three test-time scaling strategies: 1. Multi-Chain: Generate multiple ImageGen-CoT chains, each producing one image; 2. Single-Chain: Create multiple image variants from one ImageGen-CoT; 3. Hybrid: Combine both methods - multiple reasoning chains with multiple image variants per chain. Our empirical studies reveal two critical insights: (1) Instead of changing seeds, generating multiple ImageGen-CoTs via high-temperature LLM decoding achieves a similar performance to scaling. (2) ImageGen-CoT enables bidirectional expansion—either generating multiple instances of ImageGen-CoT or modifying seeds to create diverse images—outperforming single-dimension scaling, opening new pathways for performance optimization in complex multimodal tasks.",
|
| 155 |
+
"bbox": [
|
| 156 |
+
89,
|
| 157 |
+
595,
|
| 158 |
+
482,
|
| 159 |
+
851
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "text",
|
| 165 |
+
"text": "To evaluate the effectiveness of our method, we experiment with leading Unified MLLMs. These models can be categorized into two types based on their visual representa",
|
| 166 |
+
"bbox": [
|
| 167 |
+
89,
|
| 168 |
+
854,
|
| 169 |
+
482,
|
| 170 |
+
901
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 1
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "image",
|
| 176 |
+
"img_path": "images/4a03b6e1501ec5553e4bdf8d84bf4619bc9aeeed4cbecf1bb8893fd7be1940c7.jpg",
|
| 177 |
+
"image_caption": [
|
| 178 |
+
"Figure 2. Performance comparison on CoBSAT and DreamBench++ benchmarks. Our method significantly improves SEED-X's performance through progressive enhancements: adding ImageGen-CoT, fine-tuning with the ImageGen-CoT dataset, and applying test-time scaling strategies."
|
| 179 |
+
],
|
| 180 |
+
"image_footnote": [],
|
| 181 |
+
"bbox": [
|
| 182 |
+
516,
|
| 183 |
+
87,
|
| 184 |
+
640,
|
| 185 |
+
218
|
| 186 |
+
],
|
| 187 |
+
"page_idx": 1
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"type": "image",
|
| 191 |
+
"img_path": "images/796a246c3d396567dcf31106478da51f7e32a7df38d3a8e5cfb64f4656991c64.jpg",
|
| 192 |
+
"image_caption": [],
|
| 193 |
+
"image_footnote": [],
|
| 194 |
+
"bbox": [
|
| 195 |
+
651,
|
| 196 |
+
88,
|
| 197 |
+
776,
|
| 198 |
+
217
|
| 199 |
+
],
|
| 200 |
+
"page_idx": 1
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"type": "image",
|
| 204 |
+
"img_path": "images/ac25d766f751c72084d9fad93843b33bf1ca3fcc484f2e0a42aeb3b500a6d286.jpg",
|
| 205 |
+
"image_caption": [],
|
| 206 |
+
"image_footnote": [],
|
| 207 |
+
"bbox": [
|
| 208 |
+
781,
|
| 209 |
+
88,
|
| 210 |
+
911,
|
| 211 |
+
214
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "tions: discrete visual tokens [8, 18, 22, 29, 34] and continuous visual embeddings [5, 15, 30, 35]. We select SEEDLLaMA [8] as a representative of the discrete approach and SEED-X [9] for the continuous approach, considering their open-source availability and support for interleaved text-image input. Extensive experiments demonstrate the effectiveness of our method. Specifically, as shown in Figure 2, SEED-X FT with ImageGen-CoT improves by $89\\%$ and $114\\%$ on CoBSAT and DreamBench++. With scaling strategy, it further achieves 0.909 and 0.543 respectively.",
|
| 218 |
+
"bbox": [
|
| 219 |
+
511,
|
| 220 |
+
333,
|
| 221 |
+
906,
|
| 222 |
+
484
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "text",
|
| 228 |
+
"text": "Our contributions can be summarized as follows:",
|
| 229 |
+
"bbox": [
|
| 230 |
+
532,
|
| 231 |
+
484,
|
| 232 |
+
857,
|
| 233 |
+
500
|
| 234 |
+
],
|
| 235 |
+
"page_idx": 1
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"type": "list",
|
| 239 |
+
"sub_type": "text",
|
| 240 |
+
"list_items": [
|
| 241 |
+
"1. We propose a novel framework that generates a thought process (called ImageGen-CoT) to enhance the performance of unified MLLMs in T2I-ICL tasks.",
|
| 242 |
+
"2. We construct high-quality ImageGen-CoT datasets for fine-tuning unified MLLMs through an automatic dataset construction pipeline.",
|
| 243 |
+
"3. We explore Best-of-N test-time scaling up paradigms and propose a hybrid scaling approach that first generates multiple ImageGen-CoT chains and then generates multiple image variations per chain."
|
| 244 |
+
],
|
| 245 |
+
"bbox": [
|
| 246 |
+
513,
|
| 247 |
+
501,
|
| 248 |
+
906,
|
| 249 |
+
681
|
| 250 |
+
],
|
| 251 |
+
"page_idx": 1
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"type": "text",
|
| 255 |
+
"text": "2. Related Work",
|
| 256 |
+
"text_level": 1,
|
| 257 |
+
"bbox": [
|
| 258 |
+
513,
|
| 259 |
+
700,
|
| 260 |
+
653,
|
| 261 |
+
715
|
| 262 |
+
],
|
| 263 |
+
"page_idx": 1
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"type": "text",
|
| 267 |
+
"text": "2.1. In-Context Learning",
|
| 268 |
+
"text_level": 1,
|
| 269 |
+
"bbox": [
|
| 270 |
+
513,
|
| 271 |
+
726,
|
| 272 |
+
709,
|
| 273 |
+
742
|
| 274 |
+
],
|
| 275 |
+
"page_idx": 1
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"type": "text",
|
| 279 |
+
"text": "Large language models (LLMs) [3] have exhibited exceptional capabilities for text in-context learning (T-ICL). This ability allows LLMs to adapt to new tasks by observing a few illustrative examples provided as context, without requiring parameter updates. These models demonstrate remarkable performance on various tasks, with extension to multimodal models [1, 31, 37]. With the development of image generation, recent studies have proposed text-to-image in-context learning (T2I-ICL). For instance, CoBSAT [40] is the first benchmark designed to evaluate a",
|
| 280 |
+
"bbox": [
|
| 281 |
+
511,
|
| 282 |
+
750,
|
| 283 |
+
906,
|
| 284 |
+
900
|
| 285 |
+
],
|
| 286 |
+
"page_idx": 1
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"type": "page_number",
|
| 290 |
+
"text": "2",
|
| 291 |
+
"bbox": [
|
| 292 |
+
493,
|
| 293 |
+
924,
|
| 294 |
+
504,
|
| 295 |
+
935
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 1
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "text",
|
| 301 |
+
"text": "model's T2I-ICL (Text-to-Image In-Context Learning) generation capabilities. This includes assessing the model's ability to rapidly adapt to tasks given the in-context demonstrations, which are key aspects of T2I-ICL. Emu2 [28] also evaluates models' T2I-ICL capabilities through subject customization in DreamBench [25], where the model needs to bind visual concepts from reference images to generate customized outputs. In this work, following previous studies, we validate our approach's improvement on the T2I-ICL task using CoBSAT and DreamBench++ [20].",
|
| 302 |
+
"bbox": [
|
| 303 |
+
89,
|
| 304 |
+
90,
|
| 305 |
+
480,
|
| 306 |
+
242
|
| 307 |
+
],
|
| 308 |
+
"page_idx": 2
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"type": "text",
|
| 312 |
+
"text": "2.2. Text-to-Image Generation",
|
| 313 |
+
"text_level": 1,
|
| 314 |
+
"bbox": [
|
| 315 |
+
89,
|
| 316 |
+
253,
|
| 317 |
+
326,
|
| 318 |
+
268
|
| 319 |
+
],
|
| 320 |
+
"page_idx": 2
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"type": "text",
|
| 324 |
+
"text": "Text-to-Image (T2I) generation [21, 23, 24, 26, 39] aims to generate images based on a user's textual description. With the development of T2I diffusion models, such as DALL-E 3 [2], SD3 [6], and FLUX.1-Schnell [13], users can now generate high-quality and vivid images directly from text descriptions. Building on this success, there is an increasing demand for models to generate customized content, such as specific subjects, styles, or attributes tailored to individual user needs. Consequently, a variety of methods have emerged to address the challenge of subject-customized generation. These methods [7, 12, 19, 25, 27] typically rely on fine-tuning techniques, such as LoRA [11] or contrastive learning [10], to specialize a general T2I model for subject customization. However, these methods require the collection of subject-specific datasets and involve time-consuming retraining for each new user request. This makes them resource-intensive, limiting their ability to generalize quickly to new needs. To address these challenges, researchers [28] train EMU2 on multimodal sequences, leveraging its inherent ICL ability to quickly bind visual concepts from the context. Despite these advancements, their performances remain limited. In this work, we explore how introducing a thought process prior to image generation, called ImageGen-CoT, can significantly enhance their performance on the T2I-ICL task.",
|
| 325 |
+
"bbox": [
|
| 326 |
+
91,
|
| 327 |
+
276,
|
| 328 |
+
483,
|
| 329 |
+
654
|
| 330 |
+
],
|
| 331 |
+
"page_idx": 2
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"type": "text",
|
| 335 |
+
"text": "2.3. Unified Multimodal Language Models",
|
| 336 |
+
"text_level": 1,
|
| 337 |
+
"bbox": [
|
| 338 |
+
89,
|
| 339 |
+
666,
|
| 340 |
+
419,
|
| 341 |
+
681
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 2
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "text",
|
| 347 |
+
"text": "Recent years have witnessed remarkable progress in multimodal AI across two key domains: understanding and generation. In understanding, Large Vision-Language Models (LVLMs) [1, 4, 14, 16, 17, 32, 36, 38, 41] have achieved impressive capabilities in complex visual-textual reasoning tasks. Meanwhile, in generation, Text-to-Image diffusion models [2, 6, 13] have advanced to produce photorealistic images that can rival professional artists' work. Given these developments, researchers have been exploring ways to unify multimodal understanding and generation capabilities within a single model architecture. These models can be categorized into two approaches based on their visual representations: discrete visual tokens [8, 18, 22, 29, 34] and continuous visual embeddings [5, 15, 30, 35]. Discrete ap",
|
| 348 |
+
"bbox": [
|
| 349 |
+
89,
|
| 350 |
+
689,
|
| 351 |
+
482,
|
| 352 |
+
900
|
| 353 |
+
],
|
| 354 |
+
"page_idx": 2
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"type": "text",
|
| 358 |
+
"text": "proaches leverage VQ-VAE to tokenize images into discrete tokens, enabling training and inference similar to language processing. In contrast, continuous approaches generate latent embeddings that are subsequently processed through diffusion models for image synthesis.",
|
| 359 |
+
"bbox": [
|
| 360 |
+
511,
|
| 361 |
+
90,
|
| 362 |
+
903,
|
| 363 |
+
167
|
| 364 |
+
],
|
| 365 |
+
"page_idx": 2
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"type": "text",
|
| 369 |
+
"text": "3. Method",
|
| 370 |
+
"text_level": 1,
|
| 371 |
+
"bbox": [
|
| 372 |
+
511,
|
| 373 |
+
181,
|
| 374 |
+
604,
|
| 375 |
+
196
|
| 376 |
+
],
|
| 377 |
+
"page_idx": 2
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"text": "In this section, we present our ImageGen-CoT framework in detail. First, we introduce the formulation of ImageGen-CoT. (Sec.3.1). Second, we describe our automated pipeline for collecting high-quality ImageGen-CoT datasets (Sec.3.2). Third, we provide a detailed formulation of the dataset and the loss function used to fine-tune the model with the collected dataset (Sec.3.3). Finally, we explore various strategies to enhance model performance during inference, culminating in a novel hybrid scaling approach that addresses both contextual comprehension and generation challenges (Sec.3.4).",
|
| 382 |
+
"bbox": [
|
| 383 |
+
511,
|
| 384 |
+
205,
|
| 385 |
+
906,
|
| 386 |
+
372
|
| 387 |
+
],
|
| 388 |
+
"page_idx": 2
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"text": "3.1. Formulation of ImageGen-CoT",
|
| 393 |
+
"text_level": 1,
|
| 394 |
+
"bbox": [
|
| 395 |
+
511,
|
| 396 |
+
383,
|
| 397 |
+
790,
|
| 398 |
+
398
|
| 399 |
+
],
|
| 400 |
+
"page_idx": 2
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"type": "text",
|
| 404 |
+
"text": "As described above, T2I-ICL tasks require models to have a high level of comprehension. To enhance the model's capacity, we propose a new framework that generates a Chain-of-Thought, which we call ImageGen-CoT, before performing ImageGen. While we initially expected models to simultaneously output both ImageGen-CoT reasoning chains and corresponding images in a single forward pass.",
|
| 405 |
+
"bbox": [
|
| 406 |
+
511,
|
| 407 |
+
405,
|
| 408 |
+
903,
|
| 409 |
+
511
|
| 410 |
+
],
|
| 411 |
+
"page_idx": 2
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"type": "text",
|
| 415 |
+
"text": "However, during our practice, we observe that models frequently fail to generate images even when explicitly prompted to first generate ImageGen-CoT followed by image output. As illustrated in Figure 3, to ensure reliable image generation, we develop a two-stage inference protocol. The first stage involves prompting the model to generate the ImageGen-CoT reasoning chain $R$ . In the second stage, we combine the original input $X$ with the generated ImageGen-CoT $R$ , along with a mandatory image generation token $\\langle \\mathrm{image} \\rangle$ , to guarantee the production of the target image $I$ . This process can be formally expressed as:",
|
| 416 |
+
"bbox": [
|
| 417 |
+
511,
|
| 418 |
+
511,
|
| 419 |
+
905,
|
| 420 |
+
678
|
| 421 |
+
],
|
| 422 |
+
"page_idx": 2
|
| 423 |
+
},
|
| 424 |
+
{
|
| 425 |
+
"type": "equation",
|
| 426 |
+
"text": "\n$$\n\\text {S t a g e} R = \\mathcal {M} (X \\oplus \\text {i n s t r u c t i o n})\n$$\n",
|
| 427 |
+
"text_format": "latex",
|
| 428 |
+
"bbox": [
|
| 429 |
+
584,
|
| 430 |
+
689,
|
| 431 |
+
818,
|
| 432 |
+
705
|
| 433 |
+
],
|
| 434 |
+
"page_idx": 2
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "equation",
|
| 438 |
+
"text": "\n$$\n\\begin{array}{l} \\text {S t a g e 1 : I = M (X \\oplus i n s t r u c t i o n)} \\\\ \\text {S t a g e 2 : I = M (X \\oplus R \\oplus \\langle i m a g e \\rangle)} \\end{array} \\tag {1}\n$$\n",
|
| 439 |
+
"text_format": "latex",
|
| 440 |
+
"bbox": [
|
| 441 |
+
586,
|
| 442 |
+
700,
|
| 443 |
+
903,
|
| 444 |
+
724
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 2
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "where $\\mathcal{M}$ denotes the unified MLLMs, and $\\oplus$ represents the concatenation operation.",
|
| 451 |
+
"bbox": [
|
| 452 |
+
511,
|
| 453 |
+
731,
|
| 454 |
+
903,
|
| 455 |
+
762
|
| 456 |
+
],
|
| 457 |
+
"page_idx": 2
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"type": "text",
|
| 461 |
+
"text": "3.2. Dataset Construction",
|
| 462 |
+
"text_level": 1,
|
| 463 |
+
"bbox": [
|
| 464 |
+
511,
|
| 465 |
+
772,
|
| 466 |
+
712,
|
| 467 |
+
787
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "Due to the limitations of some unified MLLMs in generating well-structured ImageGen-CoT, which leads to suboptimal performance, we propose an automated pipeline to collect an ImageGen-CoT dataset and fine-tune the model using this dataset.",
|
| 474 |
+
"bbox": [
|
| 475 |
+
511,
|
| 476 |
+
794,
|
| 477 |
+
903,
|
| 478 |
+
869
|
| 479 |
+
],
|
| 480 |
+
"page_idx": 2
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "text",
|
| 484 |
+
"text": "To collect high-quality ImageGen-CoT datasets, we first establish an instruction pool by collecting instructions from",
|
| 485 |
+
"bbox": [
|
| 486 |
+
511,
|
| 487 |
+
869,
|
| 488 |
+
903,
|
| 489 |
+
900
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 2
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "page_number",
|
| 495 |
+
"text": "3",
|
| 496 |
+
"bbox": [
|
| 497 |
+
493,
|
| 498 |
+
924,
|
| 499 |
+
503,
|
| 500 |
+
935
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 2
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "image",
|
| 506 |
+
"img_path": "images/240d40493a464d210b4b58311736c3e8ea880d0ab39e61033c1fe45be677a514.jpg",
|
| 507 |
+
"image_caption": [
|
| 508 |
+
"Figure 3. Main Pipeline. (a) Data Collection Pipeline: An automated iterative process where the MLLM acts as Generator, Selector, Critic, and Refiner to produce high-quality ImageGen-CoT (reasoning chains) and aligned images. (b) Training Pipeline: Fine-tuning unified MLLMs on the collected ImageGen-CoT dataset to enhance contextual reasoning and image generation. (c) Test-Time Scaling: Strategies for performance improvement via hybrid scaling during inference."
|
| 509 |
+
],
|
| 510 |
+
"image_footnote": [],
|
| 511 |
+
"bbox": [
|
| 512 |
+
93,
|
| 513 |
+
89,
|
| 514 |
+
903,
|
| 515 |
+
385
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 3
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "existing training datasets in T2I-ICL tasks. Second, we propose an automatic dataset construction pipeline. As illustrated in Figure 3, our pipeline proceeds as follows: In the initial stage, we let MLLM act as a Generator to generate N outputs, each consisting of an ImageGen-CoT and a prompt for the next image, which are then used by T2I-Model [13] to generate N images. Then, MLLM acts as a Selector to select the best image from the N candidates. After that, if the selected image meets our quality threshold or reaches the maximum iteration limit, the pipeline terminates and outputs the corresponding ImageGen-CoT and image pair. Otherwise, we let MLLM act as a Critic to write a critique of the selected image, assessing how well it matches the T2I-ICL prompt. Finally, MLLM acts as a Refiner to refine the prompt based on the critique, and the process iterates until meeting the termination. Finally, based on the collected responses, we construct our ImageGen-CoT dataset as follows:",
|
| 522 |
+
"bbox": [
|
| 523 |
+
88,
|
| 524 |
+
491,
|
| 525 |
+
485,
|
| 526 |
+
763
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 3
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "equation",
|
| 532 |
+
"text": "\n$$\n\\mathcal {D} _ {\\text {I m a g e G e n - C o T}} = \\left\\{\\left(T _ {i} ^ {*}, I _ {i} ^ {*}\\right) \\right\\} _ {i = 1} ^ {n} \\tag {2}\n$$\n",
|
| 533 |
+
"text_format": "latex",
|
| 534 |
+
"bbox": [
|
| 535 |
+
183,
|
| 536 |
+
791,
|
| 537 |
+
482,
|
| 538 |
+
809
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 3
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "text",
|
| 544 |
+
"text": "where $(T_{i}^{*},I_{i}^{*})$ represents the $i$ -th high-quality pair selected by our pipeline. $T_{i}^{*}$ is the ImageGen-CoT that successfully guided the generation. $I_{t}^{*}$ is the corresponding generated image that meets our quality standards. $n$ is the total number of collected pairs in the dataset.",
|
| 545 |
+
"bbox": [
|
| 546 |
+
89,
|
| 547 |
+
824,
|
| 548 |
+
483,
|
| 549 |
+
902
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 3
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "text",
|
| 555 |
+
"text": "3.3. Training Pipeline",
|
| 556 |
+
"text_level": 1,
|
| 557 |
+
"bbox": [
|
| 558 |
+
513,
|
| 559 |
+
489,
|
| 560 |
+
689,
|
| 561 |
+
507
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 3
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "After constructing the dataset, we explore the training paradigm to fine-tune Unified MLLMs using our collected dataset. In this section, we detail the process of training Unified MLLMs with the ImageGen-CoT dataset, focusing on data formulation and training objectives.",
|
| 568 |
+
"bbox": [
|
| 569 |
+
511,
|
| 570 |
+
527,
|
| 571 |
+
906,
|
| 572 |
+
602
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 3
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "text",
|
| 578 |
+
"text": "To maintain consistency between the training and inference stages, we divide the ImageGen-CoT dataset into two splits:",
|
| 579 |
+
"bbox": [
|
| 580 |
+
511,
|
| 581 |
+
603,
|
| 582 |
+
905,
|
| 583 |
+
648
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 3
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "list",
|
| 589 |
+
"sub_type": "text",
|
| 590 |
+
"list_items": [
|
| 591 |
+
"(1) $[X, p_{\\mathrm{cot}}] \\to \\mathrm{ImageGen-CoT}$ , which generates the ImageGen-CoT",
|
| 592 |
+
"(2) $[X, \\text{ImageGen-CoT}, p_{\\text{image}}] \\to \\text{image}$ , which generates the final image."
|
| 593 |
+
],
|
| 594 |
+
"bbox": [
|
| 595 |
+
511,
|
| 596 |
+
648,
|
| 597 |
+
903,
|
| 598 |
+
708
|
| 599 |
+
],
|
| 600 |
+
"page_idx": 3
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"type": "text",
|
| 604 |
+
"text": "When training with the training dataset split 1, since model only generate the ImageGen-CoT text, we apply the normal $lm\\_loss$ , formulated as follows:",
|
| 605 |
+
"bbox": [
|
| 606 |
+
511,
|
| 607 |
+
709,
|
| 608 |
+
905,
|
| 609 |
+
753
|
| 610 |
+
],
|
| 611 |
+
"page_idx": 3
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"type": "equation",
|
| 615 |
+
"text": "\n$$\nl m \\_ l o s s = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log P \\left(y _ {i} \\mid y _ {< i}, X\\right)\n$$\n",
|
| 616 |
+
"text_format": "latex",
|
| 617 |
+
"bbox": [
|
| 618 |
+
576,
|
| 619 |
+
765,
|
| 620 |
+
841,
|
| 621 |
+
806
|
| 622 |
+
],
|
| 623 |
+
"page_idx": 3
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"text": "where $y_{i}$ is the $i$ -th token in the ImageGen-CoT text, $y_{<i}$ represents the preceding tokens, $X$ is the input, and $N$ is the total number of tokens in the ImageGen-CoT sequence.",
|
| 628 |
+
"bbox": [
|
| 629 |
+
511,
|
| 630 |
+
810,
|
| 631 |
+
905,
|
| 632 |
+
854
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 3
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"text": "For training with dataset split 2, there is no uniform training loss, as different Unified MLLMs utilize varying visual representations (e.g., discrete visual tokens [8, 34])",
|
| 639 |
+
"bbox": [
|
| 640 |
+
511,
|
| 641 |
+
854,
|
| 642 |
+
905,
|
| 643 |
+
901
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 3
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "page_number",
|
| 649 |
+
"text": "4",
|
| 650 |
+
"bbox": [
|
| 651 |
+
493,
|
| 652 |
+
924,
|
| 653 |
+
504,
|
| 654 |
+
935
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 3
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"text": "or continuous visual embeddings [9]). For models using discrete visual tokens, the same loss as language modeling (lm_loss) is applied. For models using continuous visual embeddings, we apply the mse_loss between the generated and target visual embeddings, formulated as:",
|
| 661 |
+
"bbox": [
|
| 662 |
+
89,
|
| 663 |
+
90,
|
| 664 |
+
480,
|
| 665 |
+
167
|
| 666 |
+
],
|
| 667 |
+
"page_idx": 4
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "equation",
|
| 671 |
+
"text": "\n$$\nm s e \\_ l o s s = \\| \\hat {z} - z \\| ^ {2}\n$$\n",
|
| 672 |
+
"text_format": "latex",
|
| 673 |
+
"bbox": [
|
| 674 |
+
210,
|
| 675 |
+
189,
|
| 676 |
+
364,
|
| 677 |
+
208
|
| 678 |
+
],
|
| 679 |
+
"page_idx": 4
|
| 680 |
+
},
|
| 681 |
+
{
|
| 682 |
+
"type": "text",
|
| 683 |
+
"text": "where $\\hat{z}$ is the generated visual embedding and $z$ is the corresponding target visual embedding. In this study, our primary objective is to enhance the model's capability to generate accurate ImageGen-CoT. So, by default, we utilize data split 1 for fine-tuning Unified MLLMs, with more results presented in the Appendix.",
|
| 684 |
+
"bbox": [
|
| 685 |
+
89,
|
| 686 |
+
234,
|
| 687 |
+
483,
|
| 688 |
+
328
|
| 689 |
+
],
|
| 690 |
+
"page_idx": 4
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"type": "text",
|
| 694 |
+
"text": "3.4. Test time scale up",
|
| 695 |
+
"text_level": 1,
|
| 696 |
+
"bbox": [
|
| 697 |
+
89,
|
| 698 |
+
353,
|
| 699 |
+
264,
|
| 700 |
+
371
|
| 701 |
+
],
|
| 702 |
+
"page_idx": 4
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "text",
|
| 706 |
+
"text": "Though fine-tuning with the ImageGen-CoT dataset significantly improves model performance in T2I-ICL tasks, substantial room for improvement remains. Inspired by test-time scaling methods in NLP, we explore whether increasing computational investment during inference can further enhance performance. We first investigate a conventional paradigm: using SEED-X as the base model, generating multiple images by varying the seed value, and outputs are filtered via a ground-truth verifier aligned with the Pass@N metric. However, we observe that even with $\\mathrm{N} = 16$ , this approach underperforms compared to SEED-X fine-tuned with ImageGen-CoT Dataset.",
|
| 707 |
+
"bbox": [
|
| 708 |
+
89,
|
| 709 |
+
380,
|
| 710 |
+
483,
|
| 711 |
+
563
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 4
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"text": "This observation motivates our exploration of test-time scaling in the context of ImageGen-CoT, which we approach through three distinct strategies: 1. Single-Chain Scaling: This approach generates one ImageGen-CoT chain and produces multiple image variants by varying the seed values. 2. Multi-Chain Scaling: Similar to NLP's \"Best-of-N\" sampling, we generate multiple ImageGen-CoT chains through high-temperature LLM decoding. Each chain produces a unique image, potentially capturing different aspects of the contextual requirements. 3.Hybrid Scaling: Regarding the dual challenges of contextual comprehension and generation in T2I-ICL tasks, we propose a hybrid approach that combines the strengths of both strategies. As illustrated in Figure 3, this method first generates multiple ImageGen-CoT chains and then creates multiple image variations for each chain. Our extensive experiments further reveal the effectiveness of this hybrid scaling strategy: the integration of ImageGen-CoT enables effective bidirectional scaling across both comprehension and generation dimensions. This dual-axis scalability opens new pathways for optimizing MLLM performance in complex multimodal tasks.",
|
| 718 |
+
"bbox": [
|
| 719 |
+
89,
|
| 720 |
+
569,
|
| 721 |
+
483,
|
| 722 |
+
900
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 4
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "text",
|
| 728 |
+
"text": "4. Experiments",
|
| 729 |
+
"text_level": 1,
|
| 730 |
+
"bbox": [
|
| 731 |
+
513,
|
| 732 |
+
90,
|
| 733 |
+
648,
|
| 734 |
+
107
|
| 735 |
+
],
|
| 736 |
+
"page_idx": 4
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"type": "text",
|
| 740 |
+
"text": "4.1. Implementation details",
|
| 741 |
+
"text_level": 1,
|
| 742 |
+
"bbox": [
|
| 743 |
+
511,
|
| 744 |
+
114,
|
| 745 |
+
728,
|
| 746 |
+
131
|
| 747 |
+
],
|
| 748 |
+
"page_idx": 4
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"type": "text",
|
| 752 |
+
"text": "To validate the effectiveness of our ImageGen-CoT framework and dataset, we conduct experiments on two T2I-ICL benchmarks: CoBSAT [40] and DreamBench++ [20]. We employ SEED-LLaMA [8] and SEED-X [9] as our base Unified MLLMs for both ImageGen-CoT reasoning and image generation. For the dataset construction pipeline, we utilize different configurations: on DreamBench++, InternVL2.5-78B-MPO-AWQ [33] serves as the Generator, Selector, Critic, and Refiner, while for CoBSAT, we implement a self-consistency selector method with other components remaining the same. FLUX.1-schnell [13] is selected as the base T2I model for both benchmarks. We maintain CoBSAT's original split strategy, while implementing an image-level split for DreamBench++ to ensure no subject overlap. During data construction, we generate 3 outputs per query using a sampling temperature of 0.7 and top-p of 0.8, with a maximum of 2 iterations. Additional details regarding dataset splits, training procedures, and ablation studies are provided in the Appendix.",
|
| 753 |
+
"bbox": [
|
| 754 |
+
511,
|
| 755 |
+
138,
|
| 756 |
+
906,
|
| 757 |
+
426
|
| 758 |
+
],
|
| 759 |
+
"page_idx": 4
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "text",
|
| 763 |
+
"text": "4.2. Main Results",
|
| 764 |
+
"text_level": 1,
|
| 765 |
+
"bbox": [
|
| 766 |
+
511,
|
| 767 |
+
439,
|
| 768 |
+
653,
|
| 769 |
+
455
|
| 770 |
+
],
|
| 771 |
+
"page_idx": 4
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"type": "text",
|
| 775 |
+
"text": "In this section, we seek to answer the following questions: a) How much the ImageGen-CoT improves model's performance (via prompting)? b) To what extent does the performance of the model improve after fine-tuning with the ImageGen-CoT dataset? c) Can we invest more time in inference time to improve the performance? Finally, to better demonstrate the effectiveness of our method, we present visible comparison results.",
|
| 776 |
+
"bbox": [
|
| 777 |
+
511,
|
| 778 |
+
463,
|
| 779 |
+
906,
|
| 780 |
+
583
|
| 781 |
+
],
|
| 782 |
+
"page_idx": 4
|
| 783 |
+
},
|
| 784 |
+
{
|
| 785 |
+
"type": "text",
|
| 786 |
+
"text": "Question 1: How much the ImageGen-CoT (via prompt) improves model's performance.",
|
| 787 |
+
"text_level": 1,
|
| 788 |
+
"bbox": [
|
| 789 |
+
511,
|
| 790 |
+
592,
|
| 791 |
+
905,
|
| 792 |
+
622
|
| 793 |
+
],
|
| 794 |
+
"page_idx": 4
|
| 795 |
+
},
|
| 796 |
+
{
|
| 797 |
+
"type": "text",
|
| 798 |
+
"text": "To verify the effectiveness of ImageGen-CoT (via prompt), we compare the model's performance with and without generating ImageGen-CoT before ImageGen via prompt on CoBSAT [40] and Dreambench++ [20]. Since CoBSAT includes 10 tasks, we calculate the average score to represent overall performance. Similarly, for Dreambench++, we compute the average score across its tasks.",
|
| 799 |
+
"bbox": [
|
| 800 |
+
511,
|
| 801 |
+
625,
|
| 802 |
+
905,
|
| 803 |
+
729
|
| 804 |
+
],
|
| 805 |
+
"page_idx": 4
|
| 806 |
+
},
|
| 807 |
+
{
|
| 808 |
+
"type": "text",
|
| 809 |
+
"text": "Results As shown in Tables S-1 and S-2, integrating ImageGen-CoT through prompting yields consistent improvements across benchmarks. On CoBSAT, SEEDLLaMA's average score improves from 0.254 to 0.283 $(+11.4\\%)$ relative gain), while SEED-X shows a more substantial improvement from 0.349 to 0.439 $(+25.8\\%)$ . The trend persists on Dreambench++, where SEED-X achieves a $84.6\\%$ relative improvement $(0.188 \\rightarrow 0.347)$ compared to its baseline. These results highlight the effectiveness of incorporating ImageGen-CoT in enhancing model performance. However, the SEED performance on Dreambench",
|
| 810 |
+
"bbox": [
|
| 811 |
+
511,
|
| 812 |
+
734,
|
| 813 |
+
908,
|
| 814 |
+
902
|
| 815 |
+
],
|
| 816 |
+
"page_idx": 4
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"type": "page_number",
|
| 820 |
+
"text": "5",
|
| 821 |
+
"bbox": [
|
| 822 |
+
493,
|
| 823 |
+
924,
|
| 824 |
+
504,
|
| 825 |
+
936
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 4
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "table",
|
| 831 |
+
"img_path": "images/dd4a9f39eb5cffc9ac89b40b4a6ae56ca2aeae095d2a72c65acc833f5e267fe7.jpg",
|
| 832 |
+
"table_caption": [
|
| 833 |
+
"Table 1. Main results on CoBSAT benchmark. \"FT w/ GT Image\" denotes fine-tuning with ground truth images, while \"FT w/ ImageGen-CoT\" represents fine-tuning with our ImageGen-CoT dataset. The results demonstrate that ImageGen-CoT significantly improves model performance, with relative improvements over baseline model shown in red."
|
| 834 |
+
],
|
| 835 |
+
"table_footnote": [],
|
| 836 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"5\">Object-Inference Task</td><td colspan=\"5\">Attribute-Inference Task</td><td rowspan=\"2\">Avg.↑</td></tr><tr><td>Color-I</td><td>Bkg-I</td><td>Style-I</td><td>Action-I</td><td>Texture-I</td><td>Color-II</td><td>Bkg-II</td><td>Style-II</td><td>Action-II</td><td>Texture-II</td></tr><tr><td>SEED-LLaMA</td><td>.616</td><td>.216</td><td>.272</td><td>.592</td><td>.112</td><td>.088</td><td>.168</td><td>.192</td><td>.220</td><td>.056</td><td>.254</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.700</td><td>.276</td><td>.300</td><td>.408</td><td>.084</td><td>.176</td><td>.292</td><td>.272</td><td>.192</td><td>.132</td><td>.283</td></tr><tr><td>+ FT w/ GT Image</td><td>.632</td><td>.272</td><td>.352</td><td>.540</td><td>.128</td><td>.164</td><td>.200</td><td>.256</td><td>.172</td><td>.112</td><td>.283</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.620</td><td>.368</td><td>.384</td><td>.424</td><td>.060</td><td>.192</td><td>.288</td><td>.208</td><td>.216</td><td>.148</td><td>.291 ↑14.6%</td></tr><tr><td>SEED-X</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>+ FT w/ GT Image</td><td>.936</td><td>.712</td><td>.896</td><td>.860</td><td>.468</td><td>.280</td><td>.324</td><td>.388</td><td>.636</td><td>.424</td><td>.592</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658 ↑88.5%</td></tr></table>",
|
| 837 |
+
"bbox": [
|
| 838 |
+
93,
|
| 839 |
+
154,
|
| 840 |
+
903,
|
| 841 |
+
295
|
| 842 |
+
],
|
| 843 |
+
"page_idx": 5
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "table",
|
| 847 |
+
"img_path": "images/4ce40582d1e122b1072bee9b9b18025cbb8207f92e138d4f1c11b5e5fab24183.jpg",
|
| 848 |
+
"table_caption": [
|
| 849 |
+
"Table 2. Evaluation results on Dreambench++ benchmark. CP refers to concept preservation and PF refers to prompt following metrics. \"FT\" stands for fine-tuning. The relative gains over baseline model are shown in red."
|
| 850 |
+
],
|
| 851 |
+
"table_footnote": [],
|
| 852 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"5\">Concept Preservation</td><td colspan=\"4\">Prompt Following</td><td rowspan=\"2\">CP-PF↑</td></tr><tr><td>Animal</td><td>Human</td><td>Object</td><td>Style</td><td>Overall</td><td>Photorealistic</td><td>Style</td><td>Imaginative</td><td>Overall</td></tr><tr><td>SEED-LLaMA</td><td>.436</td><td>.315</td><td>.288</td><td>.381</td><td>.358</td><td>.306</td><td>.202</td><td>.154</td><td>.218</td><td>.078</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.390</td><td>.241</td><td>.262</td><td>.346</td><td>.317</td><td>.291</td><td>.211</td><td>.170</td><td>.222</td><td>.078</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.399</td><td>.290</td><td>.271</td><td>.318</td><td>.325</td><td>.348</td><td>.355</td><td>.210</td><td>.310</td><td>.101 ↑29.5%</td></tr><tr><td>SEED-X</td><td>.647</td><td>.420</td><td>.526</td><td>.571</td><td>.559</td><td>.346</td><td>.342</td><td>.303</td><td>.337</td><td>.188</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.547</td><td>.293</td><td>.369</td><td>.424</td><td>.427</td><td>.862</td><td>.775</td><td>.737</td><td>.817</td><td>.347</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.549</td><td>.410</td><td>.403</td><td>.432</td><td>.458</td><td>.922</td><td>.851</td><td>.846</td><td>.881</td><td>.403 ↑114.4%</td></tr></table>",
|
| 853 |
+
"bbox": [
|
| 854 |
+
93,
|
| 855 |
+
349,
|
| 856 |
+
903,
|
| 857 |
+
459
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 5
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "text",
|
| 863 |
+
"text": "remains unchanged. This is attributed to its limited comprehension capabilities, which result in unreasonable and disorganized ImageGen-CoT outputs. To address this, we fine-tune the model using our collected ImageGen-CoT datasets, enabling higher-quality generation. More details are provided below.",
|
| 864 |
+
"bbox": [
|
| 865 |
+
88,
|
| 866 |
+
483,
|
| 867 |
+
480,
|
| 868 |
+
575
|
| 869 |
+
],
|
| 870 |
+
"page_idx": 5
|
| 871 |
+
},
|
| 872 |
+
{
|
| 873 |
+
"type": "text",
|
| 874 |
+
"text": "Question 2: To what extent does the performance of the model improve after fine-tuning with the ImageGen-CoT dataset?",
|
| 875 |
+
"text_level": 1,
|
| 876 |
+
"bbox": [
|
| 877 |
+
88,
|
| 878 |
+
585,
|
| 879 |
+
483,
|
| 880 |
+
631
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 5
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "text",
|
| 886 |
+
"text": "To further enhance model performance, we fine-tuned both SEED-LLaMA [8] and SEED-X [9] on the ImageGen-CoT dataset, which was collected using an automatic dataset construction pipeline. The ImageGen-CoT dataset consists of two components: the first part focuses on teaching the model how to generate ImageGen-CoT text, while the second part trains the model to generate images based on the generated ImageGen-CoT text. As described in sec.3.3, our primary goal is to improve the model's capabilities in generating high-quality ImageGen-CoT. To this end, we fine-tune the models using Part I of the ImageGen-CoT Dataset by default. We compare the performance of our fine-tuned model with its version using ImageGen-CoT (via prompt) and the standard version.",
|
| 887 |
+
"bbox": [
|
| 888 |
+
88,
|
| 889 |
+
636,
|
| 890 |
+
482,
|
| 891 |
+
848
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 5
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "text",
|
| 897 |
+
"text": "Results As shown in Table S-1, SEED-LLaMA and SEED-X fine-tuned with ImageGen-CoT Dataset achieve improvements of $+2.8\\%$ $(0.283 \\rightarrow 0.291)$ and $+49.9\\%$ $(0.439 \\rightarrow$",
|
| 898 |
+
"bbox": [
|
| 899 |
+
88,
|
| 900 |
+
854,
|
| 901 |
+
482,
|
| 902 |
+
900
|
| 903 |
+
],
|
| 904 |
+
"page_idx": 5
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"text": "0.658), compared to generating ImageGen-CoT via prompting, respectively. What's more, they even outperform themselves fine-tuned with GT Images by $+2.8\\%$ ( $0.283 \\rightarrow 0.291$ ) and $+11.1\\%$ ( $0.592 \\rightarrow 0.658$ ). Additionally, on the Dreambench++ benchmark, SEED-LLaMA fine-tuned with ImageGen-CoT Dataset shows an improvement of $+29.5\\%$ ( $0.078 \\rightarrow 0.101$ ) in CP-PF score, while SEED-X achieves a $+16.1\\%$ gain ( $0.347 \\rightarrow 0.403$ ). These strong results on COBSAT and Dreambench++ underscore the effectiveness and generalizability of our collected ImageGen-CoT dataset in enhancing model reasoning and understanding abilities.",
|
| 909 |
+
"bbox": [
|
| 910 |
+
511,
|
| 911 |
+
483,
|
| 912 |
+
906,
|
| 913 |
+
650
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 5
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"text": "Question 3: Can we invest more time in inference time to improve the performance?",
|
| 920 |
+
"text_level": 1,
|
| 921 |
+
"bbox": [
|
| 922 |
+
511,
|
| 923 |
+
657,
|
| 924 |
+
903,
|
| 925 |
+
688
|
| 926 |
+
],
|
| 927 |
+
"page_idx": 5
|
| 928 |
+
},
|
| 929 |
+
{
|
| 930 |
+
"type": "text",
|
| 931 |
+
"text": "To further enhance model performance, we explore various test-time scaling strategies. We implement a Best-of-N approach where the model generates multiple image variations, with ground-truth metric evaluation (pass@N). As a baseline approach, we first experiment with the vanilla SEED-X model, generating multiple images by varying the seed values. We then investigate three advanced scaling strategies using SEED-X fine-tuned with ImageGen-CoT dataset: (1) Multi-Chain Scaling, which generates multiple distinct ImageGen-CoT chains, with each chain producing an image; (2) Single-Chain Scaling, which produces multiple image variations from a single ImageGen-CoT chain; and (3) Hybrid Scaling, a novel approach that combines the strengths of both strategies by first generating multiple",
|
| 932 |
+
"bbox": [
|
| 933 |
+
511,
|
| 934 |
+
689,
|
| 935 |
+
906,
|
| 936 |
+
900
|
| 937 |
+
],
|
| 938 |
+
"page_idx": 5
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"type": "page_number",
|
| 942 |
+
"text": "6",
|
| 943 |
+
"bbox": [
|
| 944 |
+
493,
|
| 945 |
+
925,
|
| 946 |
+
504,
|
| 947 |
+
935
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 5
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "image",
|
| 953 |
+
"img_path": "images/6b6ca563beaddcdb9e0eb60126cf473a71e71afeac6a0871c81570ab8354fd06.jpg",
|
| 954 |
+
"image_caption": [
|
| 955 |
+
"Figure 4. Test-time scaling strategies comparison. We conducted a comprehensive evaluation of three distinct scaling strategies: Multi-Chain Scaling, Single-Chain Scaling, and Hybrid Scaling, examining their performance across varying numbers of generated outputs (N=2,4,8,16). The experimental results are presented in two figures, with the left figure showing results on CoBSAT and the right figure displaying results on Dreambench++. The red numbers indicate the performance improvements achieved by Hybrid Scaling compared to Single-Chain Scaling."
|
| 956 |
+
],
|
| 957 |
+
"image_footnote": [],
|
| 958 |
+
"bbox": [
|
| 959 |
+
91,
|
| 960 |
+
88,
|
| 961 |
+
496,
|
| 962 |
+
268
|
| 963 |
+
],
|
| 964 |
+
"page_idx": 6
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "image",
|
| 968 |
+
"img_path": "images/6cb6022f4bc84e265543a17f0b413eac0e260e17d7769cda2a2e017211ede746.jpg",
|
| 969 |
+
"image_caption": [],
|
| 970 |
+
"image_footnote": [],
|
| 971 |
+
"bbox": [
|
| 972 |
+
498,
|
| 973 |
+
88,
|
| 974 |
+
903,
|
| 975 |
+
268
|
| 976 |
+
],
|
| 977 |
+
"page_idx": 6
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "image",
|
| 981 |
+
"img_path": "images/1130e9b0e4cc6e5fd16f36d90ec1bbd962a97b1190b9b5b4ef99853b1c73f60f.jpg",
|
| 982 |
+
"image_caption": [
|
| 983 |
+
"Figure 5. Qualitative Results. Comparison of generation results on COBSAT (top) and Dreambench+ (bottom) using baseline SEED-X, SEED-X with ImageGen-CoT prompting, and SEED-X fine-tuned with ImageGen-CoT dataset."
|
| 984 |
+
],
|
| 985 |
+
"image_footnote": [],
|
| 986 |
+
"bbox": [
|
| 987 |
+
94,
|
| 988 |
+
357,
|
| 989 |
+
901,
|
| 990 |
+
559
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 6
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "ImageGen-CoT chains and then producing multiple image variations for each chain. For each paradigm, we systematically evaluate scalability by generating 2, 4, 8, and 16 outputs. For Hybrid Scaling, we implement specific configurations: Hybrid@16 uses 4 ImageGen-CoT chains with 4 images per chain; Hybrid@8 explores two alternatives (2 chains $\\times$ 4 images or 4 chains $\\times$ 2 images); Hybrid@4 employs 2 chains $\\times$ 2 images; and Hybrid@2 tests either 2 chains $\\times$ 1 image or 1 chain $\\times$ 2 images. Due to the significant scale difference, we visualize the latter strategy here.",
|
| 997 |
+
"bbox": [
|
| 998 |
+
88,
|
| 999 |
+
614,
|
| 1000 |
+
485,
|
| 1001 |
+
767
|
| 1002 |
+
],
|
| 1003 |
+
"page_idx": 6
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "text",
|
| 1007 |
+
"text": "Results",
|
| 1008 |
+
"text_level": 1,
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
89,
|
| 1011 |
+
773,
|
| 1012 |
+
148,
|
| 1013 |
+
787
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 6
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "text",
|
| 1019 |
+
"text": "As shown in Figure 4, our experiments reveal three key insights. First, the Vanilla SEED-X@16 baseline (0.67 on CobSAT, 0.312 on Dreambench++) underperforms even the simplest scaling strategies (e.g., 0.747 on CobSAT@2), highlighting the necessity of ImageGen-CoT integration. Second, Multi-Chain Scaling matches Single-Chain Scaling in performance, proving that generating diverse reason",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
89,
|
| 1022 |
+
794,
|
| 1023 |
+
483,
|
| 1024 |
+
902
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 6
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "ing paths is as effective as varying outputs from a single chain. Finally, Hybrid Scaling consistently achieves the highest scores across benchmarks. At $N = 16$ , Hybrid Scaling improves CobSAT performance to 0.909 (1.9% over Single-Chain) and Dreambench++ to 0.543 (0.8% higher than Single-Chain). The integration of ImageGen-CoT enables effective bidirectional scaling across both comprehension and generation dimensions. This dual-axis scalability suggests new pathways for optimizing MLLM performance in complex multimodal tasks.",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
511,
|
| 1033 |
+
614,
|
| 1034 |
+
906,
|
| 1035 |
+
767
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 6
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "text",
|
| 1041 |
+
"text": "Qualitative Results We further validate the effectiveness of our proposed methods through visualization. Figures 5 showcase the generation results from SEED-X under different configurations: baseline SEED-X, SEED-X with ImageGen-CoT (via prompting), and SEED-X fine-tuned with the ImageGen-CoT dataset. As shown in the top of Figure 5, baseline SEED-X (b) generates a basic book shape but misses the implicit \"lace\" attribute. With ImageGen-",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
511,
|
| 1044 |
+
779,
|
| 1045 |
+
908,
|
| 1046 |
+
902
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 6
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "page_number",
|
| 1052 |
+
"text": "7",
|
| 1053 |
+
"bbox": [
|
| 1054 |
+
493,
|
| 1055 |
+
924,
|
| 1056 |
+
504,
|
| 1057 |
+
935
|
| 1058 |
+
],
|
| 1059 |
+
"page_idx": 6
|
| 1060 |
+
},
|
| 1061 |
+
{
|
| 1062 |
+
"type": "table",
|
| 1063 |
+
"img_path": "images/f57b59f67e2fec8052c50eb25c8462f972805cfec083f77c97a07cf9c817c10c.jpg",
|
| 1064 |
+
"table_caption": [
|
| 1065 |
+
"Table 3. We presents a comprehensive analysis of model performance on the COBSAT benchmark. Each model is evaluated in two generation modes: image generation (Img) and text generation (Txt)."
|
| 1066 |
+
],
|
| 1067 |
+
"table_footnote": [],
|
| 1068 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">Gen Mode</td><td colspan=\"5\">Object Inference Task</td><td colspan=\"5\">Attribute Inference Task</td><td rowspan=\"2\">Avg.↑</td></tr><tr><td>Img</td><td>Txt</td><td>Color</td><td>Bkg</td><td>Style</td><td>Action</td><td>Texture</td><td>Color</td><td>Bkg</td><td>Style</td><td>Action</td><td>Texture</td></tr><tr><td rowspan=\"2\">SEED-X</td><td>✓</td><td>-</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>-</td><td>✓</td><td>.440</td><td>.388</td><td>.096</td><td>.080</td><td>.060</td><td>.116</td><td>.080</td><td>.180</td><td>.164</td><td>.132</td><td>.174</td></tr><tr><td rowspan=\"2\">+ ImageGen-CoT (via Prompt)</td><td>✓</td><td>-</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>-</td><td>✓</td><td>.744</td><td>.212</td><td>.648</td><td>.476</td><td>.388</td><td>.356</td><td>.780</td><td>.292</td><td>.540</td><td>.136</td><td>.457</td></tr><tr><td rowspan=\"2\">+ FT w/ ImageGen-CoT Dataset</td><td>✓</td><td>-</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658 ↑88.5%</td></tr><tr><td>-</td><td>✓</td><td>.984</td><td>.568</td><td>.968</td><td>1.00</td><td>.640</td><td>.516</td><td>.984</td><td>.592</td><td>.712</td><td>.628</td><td>.760 ↑117.8%</td></tr></table>",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
94,
|
| 1071 |
+
128,
|
| 1072 |
+
903,
|
| 1073 |
+
253
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 7
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "CoT prompting (c), the model's weak comprehension leads to poor ImageGen-CoT quality and even degraded generation quality. After fine-tuning with ImageGen-CoT dataset (d), with help of ImageGen-CoT, the model first successfully infers the shared attribute \"lace\" in CoT text and then generates the correct image - a book made of lace. Similarly, as shown in the bottom of Figure 5, baseline SEED-X (b) only generates a simple egg with an open mouth, ignoring key requirements like \"on stone\", \"in garden\", and similar expression (sad with upturned closed mouth). With ImageGen-CoT prompting (c), while the egg is placed on stone, it lacks both the required facial expression and garden environment. After fine-tuning (d), the model successfully understands all task requirements and generates a complete scene with an egg properly placed on a stone in a garden setting, maintaining similar facial features to the input. These qualitative results visually demonstrate the effectiveness of ImageGen-CoT and its corresponding dataset in enhancing model comprehension and generation capability, particularly in handling complex tasks that require attention to detail and scene understanding.",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
89,
|
| 1082 |
+
277,
|
| 1083 |
+
485,
|
| 1084 |
+
599
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 7
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "5. Further Analysis",
|
| 1091 |
+
"text_level": 1,
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
89,
|
| 1094 |
+
608,
|
| 1095 |
+
259,
|
| 1096 |
+
627
|
| 1097 |
+
],
|
| 1098 |
+
"page_idx": 7
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "text",
|
| 1102 |
+
"text": "5.1. The principles behind ImageGen-CoT contribute to enhancing the model's performance.",
|
| 1103 |
+
"text_level": 1,
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
89,
|
| 1106 |
+
632,
|
| 1107 |
+
482,
|
| 1108 |
+
665
|
| 1109 |
+
],
|
| 1110 |
+
"page_idx": 7
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "text",
|
| 1114 |
+
"text": "As described above, our proposed method, ImageGen-CoT, significantly enhances model performance on T2I-ICL tasks. To better understand why ImageGen-CoT improves performance, we hypothesize that 'a better understanding leads to better generation.' Specifically, we believe that ImageGen-CoT enhances the comprehension capabilities of Unified-MLLMs. To quantitatively assess the model's comprehension ability, we have the model generate a text description for the next image, as indicated by the 'Gen_mode' label in Table 3. Then we conduct a series of experiments to validate this hypothesis.",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
89,
|
| 1117 |
+
670,
|
| 1118 |
+
482,
|
| 1119 |
+
835
|
| 1120 |
+
],
|
| 1121 |
+
"page_idx": 7
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "text",
|
| 1125 |
+
"text": "Results: The results in Table 3 demonstrate that integrating ImageGen-CoT significantly enhances model comprehension capabilities. When applied via prompting, SEED-X's text generation mode (Txt) achieves substantial gains, with",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
89,
|
| 1128 |
+
839,
|
| 1129 |
+
483,
|
| 1130 |
+
901
|
| 1131 |
+
],
|
| 1132 |
+
"page_idx": 7
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"text": "the average score improving from 0.174 to 0.457. Finetuning with the ImageGen-CoT dataset further amplifies this advantage, elevating the text mode to a remarkable average score of 0.760 (vs. SEED-X's baseline of 0.174). Notably, enhanced comprehension also improves image generation (Img): SEED-X with ImageGen-CoT via prompt raises the average score from 0.349 to 0.439, while finetuning further boosts it to 0.658. This aligns with our hypothesis: \"a better understanding leads to better generation.\"",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
511,
|
| 1139 |
+
279,
|
| 1140 |
+
906,
|
| 1141 |
+
429
|
| 1142 |
+
],
|
| 1143 |
+
"page_idx": 7
|
| 1144 |
+
},
|
| 1145 |
+
{
|
| 1146 |
+
"type": "text",
|
| 1147 |
+
"text": "5.2. Main obstacles in T2I-ICL",
|
| 1148 |
+
"text_level": 1,
|
| 1149 |
+
"bbox": [
|
| 1150 |
+
511,
|
| 1151 |
+
446,
|
| 1152 |
+
756,
|
| 1153 |
+
460
|
| 1154 |
+
],
|
| 1155 |
+
"page_idx": 7
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "text",
|
| 1159 |
+
"text": "In this section we further discuss the main obstacles in T2I-ICL. We identify two primary challenges: First, as shown in Table 3, SEED-X's text generation mode (Txt) demonstrates relatively low performance scores (0.174), highlighting its difficulties in comprehending complex T2I-ICL instructions. Second, the image generation capabilities remain a bottleneck - notably, SEED-X fine-tuned with ImageGen-CoT dataset shows lower performance in image generation mode compared to text mode, indicating that while understanding may improve, translating this understanding into accurate image generation remains challenging.",
|
| 1160 |
+
"bbox": [
|
| 1161 |
+
511,
|
| 1162 |
+
468,
|
| 1163 |
+
908,
|
| 1164 |
+
650
|
| 1165 |
+
],
|
| 1166 |
+
"page_idx": 7
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "text",
|
| 1170 |
+
"text": "6. Conclusion",
|
| 1171 |
+
"text_level": 1,
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
511,
|
| 1174 |
+
662,
|
| 1175 |
+
633,
|
| 1176 |
+
679
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 7
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "In this work, we propose a novel framework that enhances Unified MLLMs' performance on T2I-ICL tasks by incorporating CoT reasoning before ImageGen. To further improve their performance, we develop an automatic pipeline to curate high-quality ImageGen-CoT datasets and fine-tune these models. Our extensive experiments demonstrate that our method significantly improves model performance, with SEED-X achieving up to $80\\%$ gains on T2I-ICL tasks after fine-tuning. We further explore test-time scaling strategies and propose a hybrid approach that combines multiple reasoning chains with diverse image generation. Our work establishes a novel paradigm for enhancing MLLMs' capabilities in handling complex multimodal generation tasks.",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
511,
|
| 1185 |
+
688,
|
| 1186 |
+
908,
|
| 1187 |
+
885
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 7
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "page_number",
|
| 1193 |
+
"text": "8",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
493,
|
| 1196 |
+
925,
|
| 1197 |
+
504,
|
| 1198 |
+
935
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 7
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "text",
|
| 1204 |
+
"text": "ImageGen-CoT: Enhancing Text-to-Image In-context Learning with Chain-of-Thought Reasoning",
|
| 1205 |
+
"text_level": 1,
|
| 1206 |
+
"bbox": [
|
| 1207 |
+
156,
|
| 1208 |
+
85,
|
| 1209 |
+
841,
|
| 1210 |
+
130
|
| 1211 |
+
],
|
| 1212 |
+
"page_idx": 8
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "text",
|
| 1216 |
+
"text": "Supplementary Material",
|
| 1217 |
+
"bbox": [
|
| 1218 |
+
380,
|
| 1219 |
+
141,
|
| 1220 |
+
614,
|
| 1221 |
+
162
|
| 1222 |
+
],
|
| 1223 |
+
"page_idx": 8
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "text",
|
| 1227 |
+
"text": "Overview",
|
| 1228 |
+
"text_level": 1,
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
89,
|
| 1231 |
+
178,
|
| 1232 |
+
176,
|
| 1233 |
+
194
|
| 1234 |
+
],
|
| 1235 |
+
"page_idx": 8
|
| 1236 |
+
},
|
| 1237 |
+
{
|
| 1238 |
+
"type": "text",
|
| 1239 |
+
"text": "In this supplementary material, we present more details and more experimental results that are not included in the main paper. The contents include:",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
89,
|
| 1242 |
+
204,
|
| 1243 |
+
482,
|
| 1244 |
+
250
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 8
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "list",
|
| 1250 |
+
"sub_type": "text",
|
| 1251 |
+
"list_items": [
|
| 1252 |
+
"- A detailed introduction to CoBSAT [40] and Dreambench++[20] in Sec. S-A.",
|
| 1253 |
+
"Additional details on the experimental setup in Sec. S-B.",
|
| 1254 |
+
"- More experimental results in Sec. S-C.",
|
| 1255 |
+
"- Effectiveness of Iterative Refinement Strategy in Data Construction in Sec. S-D.",
|
| 1256 |
+
"- Automatic Dataset Construction Pipeline On CoBSAT S-E."
|
| 1257 |
+
],
|
| 1258 |
+
"bbox": [
|
| 1259 |
+
89,
|
| 1260 |
+
253,
|
| 1261 |
+
482,
|
| 1262 |
+
434
|
| 1263 |
+
],
|
| 1264 |
+
"page_idx": 8
|
| 1265 |
+
},
|
| 1266 |
+
{
|
| 1267 |
+
"type": "text",
|
| 1268 |
+
"text": "S-A. Dataset Details",
|
| 1269 |
+
"text_level": 1,
|
| 1270 |
+
"bbox": [
|
| 1271 |
+
89,
|
| 1272 |
+
450,
|
| 1273 |
+
264,
|
| 1274 |
+
465
|
| 1275 |
+
],
|
| 1276 |
+
"page_idx": 8
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "text",
|
| 1280 |
+
"text": "CoBSAT: CoBSAT [40] is a comprehensive benchmark dataset designed specifically to evaluate Text-to-Image In-Context Learning (T2I-ICL) capabilities of Multimodal Large Language Models (MLLMs). The dataset consists of ten distinct tasks across five thematic areas, with each task carefully structured to assess different aspects of T2I-ICL performance. The benchmark is organized into two main categories: object-inference tasks and attribute-inference tasks. In object-inference tasks, models must infer the correct object from demonstrations while being given explicit attributes in the text prompt. Conversely, in attribute-inference tasks, models are provided with the object in the text prompt and must infer the appropriate attribute from the demonstrations. This dual structure enables a thorough evaluation of MLLMs' ability to learn and generalize from multimodal in-context examples.",
|
| 1281 |
+
"bbox": [
|
| 1282 |
+
88,
|
| 1283 |
+
477,
|
| 1284 |
+
482,
|
| 1285 |
+
718
|
| 1286 |
+
],
|
| 1287 |
+
"page_idx": 8
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "text",
|
| 1291 |
+
"text": "Dreambench++: Dreambench++ [20] is a comprehensive benchmark for evaluating personalized text-to-image generation models. It features three key advantages: 1) Human-aligned evaluation through carefully designed GPT prompting that achieves over $79\\%$ agreement with human assessments; 2) Fully automated evaluation process that eliminates the need for time-consuming manual evaluation; and 3) A diverse dataset containing 150 images and 1,350 prompts across various categories including animals, humans, objects and styles. The benchmark evaluates two fundamental aspects of personalized image generation: concept preservation and prompt following capabilities.",
|
| 1292 |
+
"bbox": [
|
| 1293 |
+
88,
|
| 1294 |
+
719,
|
| 1295 |
+
482,
|
| 1296 |
+
901
|
| 1297 |
+
],
|
| 1298 |
+
"page_idx": 8
|
| 1299 |
+
},
|
| 1300 |
+
{
|
| 1301 |
+
"type": "text",
|
| 1302 |
+
"text": "S-B. Detailed Experimental Setup",
|
| 1303 |
+
"text_level": 1,
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
511,
|
| 1306 |
+
178,
|
| 1307 |
+
800,
|
| 1308 |
+
196
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 8
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "text",
|
| 1314 |
+
"text": "S-B.1. CoBSAT",
|
| 1315 |
+
"text_level": 1,
|
| 1316 |
+
"bbox": [
|
| 1317 |
+
511,
|
| 1318 |
+
202,
|
| 1319 |
+
635,
|
| 1320 |
+
218
|
| 1321 |
+
],
|
| 1322 |
+
"page_idx": 8
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"text": "Data Split. Following CoBSAT's default settings, we split the predefined lists of text inputs $(X)$ and latent variables $(\\Theta)$ into training and testing subsets with a 1:1 ratio, ensuring the test set contains completely unseen prompts and attributes. For training, we generate 300 samples per task by enumerating all possible combinations of $\\theta \\in \\Theta_{\\mathrm{train}}$ and $(x_{n})_{n = 1}^{N + 1}\\in X_{\\mathrm{train}}^{N + 1}$ , resulting in 3,000 training samples across 10 tasks. For evaluation, we randomly sample 250 prompts per task from $\\theta \\in \\Theta_{\\mathrm{test}}$ and $(x_{n})_{n = 1}^{N + 1}\\in X_{\\mathrm{test}}^{N + 1}$ , yielding a total of 2,500 test samples.",
|
| 1327 |
+
"bbox": [
|
| 1328 |
+
511,
|
| 1329 |
+
224,
|
| 1330 |
+
906,
|
| 1331 |
+
376
|
| 1332 |
+
],
|
| 1333 |
+
"page_idx": 8
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"text": "Training Strategy. For model training, we fine-tune both SEED-LLaMA and SEED-X using LoRA. Specifically, SEED-LLaMA is fine-tuned with rank=64, $\\alpha = 16$ , learning rate=1e-4 for 1 epoch, while SEED-X uses rank=64, $\\alpha = 64$ , learning rate=1e-4 for 1 epoch.",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
511,
|
| 1340 |
+
376,
|
| 1341 |
+
905,
|
| 1342 |
+
452
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 8
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"text": "S-B.2. Dreambench++",
|
| 1349 |
+
"text_level": 1,
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
511,
|
| 1352 |
+
460,
|
| 1353 |
+
687,
|
| 1354 |
+
476
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 8
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "text",
|
| 1360 |
+
"text": "Data Split. To prevent subject overlap in evaluation, we split the dataset by subjects, with $60\\%$ subjects (90 subjects, resulting in 810 samples) for training and $40\\%$ subjects (60 subjects, resulting in 540 samples) for testing.",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
511,
|
| 1363 |
+
483,
|
| 1364 |
+
905,
|
| 1365 |
+
544
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 8
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "Training Strategy. For Dreambench++, SEED-LLaMA is fine-tuned using LoRA with rank=64, $\\alpha = 16$ , learning rate=1e-4 for 5 epochs, while SEED-X uses rank=64, $\\alpha = 64$ , learning rate=1e-4 for 1 epoch.",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
511,
|
| 1374 |
+
544,
|
| 1375 |
+
905,
|
| 1376 |
+
604
|
| 1377 |
+
],
|
| 1378 |
+
"page_idx": 8
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "text",
|
| 1382 |
+
"text": "S-C. More experimental results",
|
| 1383 |
+
"text_level": 1,
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
511,
|
| 1386 |
+
618,
|
| 1387 |
+
781,
|
| 1388 |
+
635
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 8
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"text": "As described in the main paper, the ImageGen-CoT dataset comprises two distinct components. The first component focuses on training the model to generate ImageGen-CoT text, while the second component teaches the model to generate images based on the generated ImageGen-CoT text. While our main paper primarily focused on training using Part I of the dataset, here we extend our experiments by utilizing the complete dataset for comprehensive evaluation. As presented in Tables S-1 and S-2, we conducted comprehensive experiments using both parts of the ImageGen-CoT dataset. On the CoBSAT benchmark, SEED-LLaMA fine-tuned with the complete ImageGen-CoT dataset achieved a significant performance gain of $+36.6\\%$ $(0.254\\rightarrow 0.347)$ compared to the baseline model. Similarly, SEED-X demonstrated remarkable improvement with a $+79.4\\%$ increase $(0.349\\rightarrow 0.626)$ over its baseline performance. For the Dreambench++ bench",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
511,
|
| 1397 |
+
643,
|
| 1398 |
+
906,
|
| 1399 |
+
900
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 8
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "page_number",
|
| 1405 |
+
"text": "9",
|
| 1406 |
+
"bbox": [
|
| 1407 |
+
493,
|
| 1408 |
+
924,
|
| 1409 |
+
504,
|
| 1410 |
+
935
|
| 1411 |
+
],
|
| 1412 |
+
"page_idx": 8
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "table",
|
| 1416 |
+
"img_path": "images/f84e309ad26374b6ad481f54a07db5e5c50b3f2215fc1be0f14eceb83297c52b.jpg",
|
| 1417 |
+
"table_caption": [
|
| 1418 |
+
"Table S-1. Main results on CoBSAT benchmark. \"FT w/ GT Image\" denotes fine-tuning with ground truth images, while \"FT w/ ImageGen-CoT\" represents fine-tuning with our ImageGen-CoT dataset. The results demonstrate that ImageGen-CoT significantly improves model performance, with relative improvements over baseline model shown in red."
|
| 1419 |
+
],
|
| 1420 |
+
"table_footnote": [],
|
| 1421 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"5\">Object-Inference Task</td><td colspan=\"5\">Attribute-Inference Task</td><td rowspan=\"2\">Avg.↑</td></tr><tr><td>Color-I</td><td>Bkg-I</td><td>Style-I</td><td>Action-I</td><td>Texture-I</td><td>Color-II</td><td>Bkg-II</td><td>Style-II</td><td>Action-II</td><td>Texture-II</td></tr><tr><td>SEED-LLaMA</td><td>.616</td><td>.216</td><td>.272</td><td>.592</td><td>.112</td><td>.088</td><td>.168</td><td>.192</td><td>.220</td><td>.056</td><td>.254</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.700</td><td>.276</td><td>.300</td><td>.408</td><td>.084</td><td>.176</td><td>.292</td><td>.272</td><td>.192</td><td>.132</td><td>.283</td></tr><tr><td>+ FT w/ GT Image</td><td>.632</td><td>.272</td><td>.352</td><td>.540</td><td>.128</td><td>.164</td><td>.200</td><td>.256</td><td>.172</td><td>.112</td><td>.283</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.620</td><td>.368</td><td>.384</td><td>.424</td><td>.060</td><td>.192</td><td>.288</td><td>.208</td><td>.216</td><td>.148</td><td>.291</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.716</td><td>.432</td><td>.436</td><td>.420</td><td>.200</td><td>.168</td><td>.380</td><td>.256</td><td>.216</td><td>.248</td><td>.347 ↑36.6%</td></tr><tr><td>SEED-X</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>+ FT w/ GT Image</td><td>.936</td><td>.712</td><td>.896</td><td>.860</td><td>.468</td><td>.280</td><td>.324</td><td>.388</td><td>.636</td><td>.424</td><td>.592</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.832</td><td>.596</td><td>.840</td><td>.892</td><td>.484</td><td>.384</td><td>.548</td><td>.572</td><td>.608</td><td>.500</td><td>.626 ↑79.4%</td></tr></table>",
|
| 1422 |
+
"bbox": [
|
| 1423 |
+
93,
|
| 1424 |
+
154,
|
| 1425 |
+
903,
|
| 1426 |
+
308
|
| 1427 |
+
],
|
| 1428 |
+
"page_idx": 9
|
| 1429 |
+
},
|
| 1430 |
+
{
|
| 1431 |
+
"type": "table",
|
| 1432 |
+
"img_path": "images/e699551feb743e13e45a3eb5c8a3d7996c9f974b127037fa874e3fdf51976429.jpg",
|
| 1433 |
+
"table_caption": [
|
| 1434 |
+
"Table S-2. Evaluation results on Dreambench++ benchmark. CP refers to concept preservation and PF refers to prompt following metrics. \"FT\" stands for fine-tuning. The relative gains over baseline model are shown in red."
|
| 1435 |
+
],
|
| 1436 |
+
"table_footnote": [],
|
| 1437 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"5\">Concept Preservation</td><td colspan=\"4\">Prompt Following</td><td rowspan=\"2\">CP-PF↑</td></tr><tr><td>Animal</td><td>Human</td><td>Object</td><td>Style</td><td>Overall</td><td>Photorealistic</td><td>Style</td><td>Imaginative</td><td>Overall</td></tr><tr><td>SEED-LLaMA</td><td>.436</td><td>.315</td><td>.288</td><td>.381</td><td>.358</td><td>.306</td><td>.202</td><td>.154</td><td>.218</td><td>.078</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.390</td><td>.241</td><td>.262</td><td>.346</td><td>.317</td><td>.291</td><td>.211</td><td>.170</td><td>.222</td><td>.078</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.399</td><td>.290</td><td>.271</td><td>.318</td><td>.325</td><td>.348</td><td>.355</td><td>.210</td><td>.310</td><td>.101</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.414</td><td>.269</td><td>.243</td><td>.328</td><td>.319</td><td>.408</td><td>.317</td><td>.199</td><td>.334</td><td>.107 ↑37.2%</td></tr><tr><td>SEED-X</td><td>.647</td><td>.420</td><td>.526</td><td>.571</td><td>.559</td><td>.346</td><td>.342</td><td>.303</td><td>.337</td><td>.188</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.547</td><td>.293</td><td>.369</td><td>.424</td><td>.427</td><td>.862</td><td>.775</td><td>.737</td><td>.817</td><td>.347</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.549</td><td>.410</td><td>.403</td><td>.432</td><td>.458</td><td>.922</td><td>.851</td><td>.846</td><td>.881</td><td>.403</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.511</td><td>.358</td><td>.424</td><td>.303</td><td>.421</td><td>.926</td><td>.910</td><td>.870</td><td>.906</td><td>.384 ↑104.2%</td></tr></table>",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
93,
|
| 1440 |
+
363,
|
| 1441 |
+
903,
|
| 1442 |
+
488
|
| 1443 |
+
],
|
| 1444 |
+
"page_idx": 9
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "text",
|
| 1448 |
+
"text": "mark, training with the complete dataset resulted in even more pronounced improvements. SEED-LLaMA showed a $+37.2\\%$ gain $(0.078 \\rightarrow 0.107)$ in CP-PF score, while SEED-X achieved a substantial $+104.2\\%$ improvement $(0.188 \\rightarrow 0.384)$ . These comprehensive results demonstrate that utilizing the complete ImageGen-CoT dataset can still significantly improve model performance.",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
89,
|
| 1451 |
+
512,
|
| 1452 |
+
483,
|
| 1453 |
+
619
|
| 1454 |
+
],
|
| 1455 |
+
"page_idx": 9
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "text",
|
| 1459 |
+
"text": "S-D. Effectiveness of Iterative Refinement Strategy in Data Construction",
|
| 1460 |
+
"text_level": 1,
|
| 1461 |
+
"bbox": [
|
| 1462 |
+
89,
|
| 1463 |
+
652,
|
| 1464 |
+
483,
|
| 1465 |
+
690
|
| 1466 |
+
],
|
| 1467 |
+
"page_idx": 9
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "text",
|
| 1471 |
+
"text": "We evaluate the effectiveness of our iterative refinement strategy on both CoBSAT and Dreambench++ datasets. As demonstrated in Table S-3, the proposed strategy yields consistent improvements across all evaluation metrics. Specifically, on the CoBSAT dataset, our method achieves improvements of $1.1\\%$ , $2.9\\%$ , and $2.0\\%$ in object inference, attribute inference, and overall score, respectively. For Dreambench++, the refinement strategy enhances prompt following (PF) by $0.9\\%$ and concept preservation (CP) by $4.7\\%$ , resulting in a substantial $4.7\\%$ improvement in the combined PF*CP metric. These comprehensive results validate that our iterative refinement approach significantly enhances the quality of the constructed dataset.",
|
| 1472 |
+
"bbox": [
|
| 1473 |
+
89,
|
| 1474 |
+
704,
|
| 1475 |
+
483,
|
| 1476 |
+
900
|
| 1477 |
+
],
|
| 1478 |
+
"page_idx": 9
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "table",
|
| 1482 |
+
"img_path": "images/c4f384ea2210118644fde12e079ab913386437215efaf0782535d95e4110e403.jpg",
|
| 1483 |
+
"table_caption": [
|
| 1484 |
+
"Table S-3. Performance comparison of data construction with and without iterative refinement."
|
| 1485 |
+
],
|
| 1486 |
+
"table_footnote": [],
|
| 1487 |
+
"table_body": "<table><tr><td>Method</td><td>Object</td><td>Attribute</td><td>Overall</td></tr><tr><td>w/o Iterative Refine</td><td>0.782</td><td>0.704</td><td>0.743</td></tr><tr><td>w/ Iterative Refine</td><td>0.793</td><td>0.733</td><td>0.763</td></tr></table>",
|
| 1488 |
+
"bbox": [
|
| 1489 |
+
529,
|
| 1490 |
+
549,
|
| 1491 |
+
890,
|
| 1492 |
+
614
|
| 1493 |
+
],
|
| 1494 |
+
"page_idx": 9
|
| 1495 |
+
},
|
| 1496 |
+
{
|
| 1497 |
+
"type": "table",
|
| 1498 |
+
"img_path": "images/f0975a3eeaee4f1643c029b36a87e6b1a629824ea5d3055537e7b7b1d95df7e7.jpg",
|
| 1499 |
+
"table_caption": [
|
| 1500 |
+
"(b) Results on Dreambench++"
|
| 1501 |
+
],
|
| 1502 |
+
"table_footnote": [],
|
| 1503 |
+
"table_body": "<table><tr><td>Method</td><td>PF</td><td>CP</td><td>PF*CP</td></tr><tr><td>w/o Iterative Refine</td><td>0.937</td><td>0.470</td><td>0.442</td></tr><tr><td>w/ Iterative Refine</td><td>0.946</td><td>0.517</td><td>0.489</td></tr></table>",
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
545,
|
| 1506 |
+
628,
|
| 1507 |
+
877,
|
| 1508 |
+
679
|
| 1509 |
+
],
|
| 1510 |
+
"page_idx": 9
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "text",
|
| 1514 |
+
"text": "S-E. Automatic Dataset Construction Pipeline On CoBSAT",
|
| 1515 |
+
"text_level": 1,
|
| 1516 |
+
"bbox": [
|
| 1517 |
+
511,
|
| 1518 |
+
705,
|
| 1519 |
+
906,
|
| 1520 |
+
739
|
| 1521 |
+
],
|
| 1522 |
+
"page_idx": 9
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"text": "On CoBSAT, we initially adopted the same method as DreamBench++. However, we found that the self-boosting approach underperformed due to the inherent complexity of CoBSAT, which requires the model to infer implicit visual-semantic relationships—posing a significant challenge to the model's reasoning capability. To solve this challenge, we sampled multiple text prompts from the MLLM and selected the best prompts using the self-consistency method. However, this method cannot be directly applied to CoBSAT. Self-consistency is commonly used in mathematical",
|
| 1527 |
+
"bbox": [
|
| 1528 |
+
511,
|
| 1529 |
+
750,
|
| 1530 |
+
906,
|
| 1531 |
+
900
|
| 1532 |
+
],
|
| 1533 |
+
"page_idx": 9
|
| 1534 |
+
},
|
| 1535 |
+
{
|
| 1536 |
+
"type": "page_number",
|
| 1537 |
+
"text": "10",
|
| 1538 |
+
"bbox": [
|
| 1539 |
+
490,
|
| 1540 |
+
924,
|
| 1541 |
+
508,
|
| 1542 |
+
936
|
| 1543 |
+
],
|
| 1544 |
+
"page_idx": 9
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "text",
|
| 1548 |
+
"text": "problem solving, where text answers are precise (e.g., numbers or options) and consistency can be directly evaluated using string matching. In contrast, CoBSAT involves long and complex text prompts, making direct string-based consistency evaluation infeasible.",
|
| 1549 |
+
"bbox": [
|
| 1550 |
+
89,
|
| 1551 |
+
90,
|
| 1552 |
+
480,
|
| 1553 |
+
165
|
| 1554 |
+
],
|
| 1555 |
+
"page_idx": 10
|
| 1556 |
+
},
|
| 1557 |
+
{
|
| 1558 |
+
"type": "text",
|
| 1559 |
+
"text": "The pipeline proceeds as follows: We first sample multiple chains of thought (CoT) from the MLLM. These CoTs are then used, along with the input sequence context, to generate multiple text prompts. Formally, let the CoT sampled from the MLLM be denoted as $\\cot_t^i$ , where $i = 0,1,\\dots ,M - 1$ , and $M$ is the number of sampled CoTs. Each CoT, combined with the input sequence $x$ , is used to construct a corresponding text prompt $p_t^i$ as:",
|
| 1560 |
+
"bbox": [
|
| 1561 |
+
89,
|
| 1562 |
+
167,
|
| 1563 |
+
482,
|
| 1564 |
+
287
|
| 1565 |
+
],
|
| 1566 |
+
"page_idx": 10
|
| 1567 |
+
},
|
| 1568 |
+
{
|
| 1569 |
+
"type": "equation",
|
| 1570 |
+
"text": "\n$$\np _ {t} ^ {i} = \\mathcal {F} \\left(\\cot_ {t} ^ {i}, x\\right), \\quad i = 0, 1, \\dots , M - 1, \\tag {3}\n$$\n",
|
| 1571 |
+
"text_format": "latex",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
151,
|
| 1574 |
+
301,
|
| 1575 |
+
482,
|
| 1576 |
+
320
|
| 1577 |
+
],
|
| 1578 |
+
"page_idx": 10
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"type": "text",
|
| 1582 |
+
"text": "where $\\mathcal{F}$ represents generating text prompts based on the CoT and input sequence context.",
|
| 1583 |
+
"bbox": [
|
| 1584 |
+
89,
|
| 1585 |
+
327,
|
| 1586 |
+
482,
|
| 1587 |
+
356
|
| 1588 |
+
],
|
| 1589 |
+
"page_idx": 10
|
| 1590 |
+
},
|
| 1591 |
+
{
|
| 1592 |
+
"type": "text",
|
| 1593 |
+
"text": "Next, we convert each text prompt into a vector representation using a text embedding model $\\mathcal{E}$ :",
|
| 1594 |
+
"bbox": [
|
| 1595 |
+
89,
|
| 1596 |
+
357,
|
| 1597 |
+
482,
|
| 1598 |
+
387
|
| 1599 |
+
],
|
| 1600 |
+
"page_idx": 10
|
| 1601 |
+
},
|
| 1602 |
+
{
|
| 1603 |
+
"type": "equation",
|
| 1604 |
+
"text": "\n$$\nv _ {t} ^ {i} = \\mathcal {E} \\left(p _ {t} ^ {i}\\right), \\quad i = 0, 1, \\dots , M - 1, \\tag {4}\n$$\n",
|
| 1605 |
+
"text_format": "latex",
|
| 1606 |
+
"bbox": [
|
| 1607 |
+
166,
|
| 1608 |
+
401,
|
| 1609 |
+
480,
|
| 1610 |
+
420
|
| 1611 |
+
],
|
| 1612 |
+
"page_idx": 10
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "text",
|
| 1616 |
+
"text": "where $v_{t}^{i}\\in \\mathbb{R}^{d}$ is the embedding vector of the $i$ -th prompt.",
|
| 1617 |
+
"bbox": [
|
| 1618 |
+
89,
|
| 1619 |
+
426,
|
| 1620 |
+
482,
|
| 1621 |
+
455
|
| 1622 |
+
],
|
| 1623 |
+
"page_idx": 10
|
| 1624 |
+
},
|
| 1625 |
+
{
|
| 1626 |
+
"type": "text",
|
| 1627 |
+
"text": "The similarity $S_{ij}$ between two prompts is then measured using the inner product of their vector representations:",
|
| 1628 |
+
"bbox": [
|
| 1629 |
+
89,
|
| 1630 |
+
457,
|
| 1631 |
+
483,
|
| 1632 |
+
487
|
| 1633 |
+
],
|
| 1634 |
+
"page_idx": 10
|
| 1635 |
+
},
|
| 1636 |
+
{
|
| 1637 |
+
"type": "equation",
|
| 1638 |
+
"text": "\n$$\nS _ {i j} = \\left\\langle v _ {t} ^ {i}, v _ {t} ^ {j} \\right\\rangle = v _ {t} ^ {i} \\cdot v _ {t} ^ {j}, \\tag {5}\n$$\n",
|
| 1639 |
+
"text_format": "latex",
|
| 1640 |
+
"bbox": [
|
| 1641 |
+
204,
|
| 1642 |
+
501,
|
| 1643 |
+
482,
|
| 1644 |
+
520
|
| 1645 |
+
],
|
| 1646 |
+
"page_idx": 10
|
| 1647 |
+
},
|
| 1648 |
+
{
|
| 1649 |
+
"type": "text",
|
| 1650 |
+
"text": "where $\\langle \\cdot ,\\cdot \\rangle$ denotes the inner product.",
|
| 1651 |
+
"bbox": [
|
| 1652 |
+
109,
|
| 1653 |
+
526,
|
| 1654 |
+
361,
|
| 1655 |
+
541
|
| 1656 |
+
],
|
| 1657 |
+
"page_idx": 10
|
| 1658 |
+
},
|
| 1659 |
+
{
|
| 1660 |
+
"type": "text",
|
| 1661 |
+
"text": "The average similarity for each prompt $p_t^i$ is computed as:",
|
| 1662 |
+
"bbox": [
|
| 1663 |
+
89,
|
| 1664 |
+
542,
|
| 1665 |
+
482,
|
| 1666 |
+
571
|
| 1667 |
+
],
|
| 1668 |
+
"page_idx": 10
|
| 1669 |
+
},
|
| 1670 |
+
{
|
| 1671 |
+
"type": "equation",
|
| 1672 |
+
"text": "\n$$\n\\bar{S}_{i} = \\frac{1}{M - 1}\\sum_{\\substack{j = 0\\\\ j\\neq i}}^{M - 1}S_{ij}. \\tag{6}\n$$\n",
|
| 1673 |
+
"text_format": "latex",
|
| 1674 |
+
"bbox": [
|
| 1675 |
+
209,
|
| 1676 |
+
584,
|
| 1677 |
+
482,
|
| 1678 |
+
636
|
| 1679 |
+
],
|
| 1680 |
+
"page_idx": 10
|
| 1681 |
+
},
|
| 1682 |
+
{
|
| 1683 |
+
"type": "text",
|
| 1684 |
+
"text": "Finally, the prompt $p_t^{t^*}$ with the highest average similarity is selected as the best candidate:",
|
| 1685 |
+
"bbox": [
|
| 1686 |
+
89,
|
| 1687 |
+
643,
|
| 1688 |
+
482,
|
| 1689 |
+
675
|
| 1690 |
+
],
|
| 1691 |
+
"page_idx": 10
|
| 1692 |
+
},
|
| 1693 |
+
{
|
| 1694 |
+
"type": "equation",
|
| 1695 |
+
"text": "\n$$\ni ^ {*} = \\arg \\max _ {i} \\bar {S} _ {i}. \\tag {7}\n$$\n",
|
| 1696 |
+
"text_format": "latex",
|
| 1697 |
+
"bbox": [
|
| 1698 |
+
227,
|
| 1699 |
+
690,
|
| 1700 |
+
482,
|
| 1701 |
+
712
|
| 1702 |
+
],
|
| 1703 |
+
"page_idx": 10
|
| 1704 |
+
},
|
| 1705 |
+
{
|
| 1706 |
+
"type": "text",
|
| 1707 |
+
"text": "The selected prompt $p_t^{i^*}$ is then used to generate the image, which is considered the best image. Simultaneously, its corresponding CoT is also identified as the best CoT. The CoT text and the generated image are then concatenated to form the ImageGen-CoT dataset.",
|
| 1708 |
+
"bbox": [
|
| 1709 |
+
89,
|
| 1710 |
+
720,
|
| 1711 |
+
482,
|
| 1712 |
+
791
|
| 1713 |
+
],
|
| 1714 |
+
"page_idx": 10
|
| 1715 |
+
},
|
| 1716 |
+
{
|
| 1717 |
+
"type": "text",
|
| 1718 |
+
"text": "References",
|
| 1719 |
+
"text_level": 1,
|
| 1720 |
+
"bbox": [
|
| 1721 |
+
91,
|
| 1722 |
+
819,
|
| 1723 |
+
187,
|
| 1724 |
+
835
|
| 1725 |
+
],
|
| 1726 |
+
"page_idx": 10
|
| 1727 |
+
},
|
| 1728 |
+
{
|
| 1729 |
+
"type": "ref_text",
|
| 1730 |
+
"text": "[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances",
|
| 1731 |
+
"bbox": [
|
| 1732 |
+
98,
|
| 1733 |
+
844,
|
| 1734 |
+
482,
|
| 1735 |
+
900
|
| 1736 |
+
],
|
| 1737 |
+
"page_idx": 10
|
| 1738 |
+
},
|
| 1739 |
+
{
|
| 1740 |
+
"type": "list",
|
| 1741 |
+
"sub_type": "ref_text",
|
| 1742 |
+
"list_items": [
|
| 1743 |
+
"in neural information processing systems, 35:23716-23736, 2022. 2, 3",
|
| 1744 |
+
"[2] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 3",
|
| 1745 |
+
"[3] Tom B Brown. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020. 2",
|
| 1746 |
+
"[4] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 3",
|
| 1747 |
+
"[5] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1, 2, 3",
|
| 1748 |
+
"[6] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In Forty-first International Conference on Machine Learning, 2024. 3",
|
| 1749 |
+
"[7] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3",
|
| 1750 |
+
"[8] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023. 1, 2, 3, 4, 5, 6",
|
| 1751 |
+
"[9] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024. 1, 2, 5, 6",
|
| 1752 |
+
"[10] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3",
|
| 1753 |
+
"[11] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 3",
|
| 1754 |
+
"[12] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 3",
|
| 1755 |
+
"[13] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 3, 4, 5",
|
| 1756 |
+
"[14] Chunyuan Li, Zhe Gan, Zhengyuan Yang, Jianwei Yang, Linjie Li, Lijuan Wang, Jianfeng Gao, et al. Multimodal"
|
| 1757 |
+
],
|
| 1758 |
+
"bbox": [
|
| 1759 |
+
514,
|
| 1760 |
+
92,
|
| 1761 |
+
903,
|
| 1762 |
+
900
|
| 1763 |
+
],
|
| 1764 |
+
"page_idx": 10
|
| 1765 |
+
},
|
| 1766 |
+
{
|
| 1767 |
+
"type": "page_number",
|
| 1768 |
+
"text": "11",
|
| 1769 |
+
"bbox": [
|
| 1770 |
+
490,
|
| 1771 |
+
924,
|
| 1772 |
+
506,
|
| 1773 |
+
936
|
| 1774 |
+
],
|
| 1775 |
+
"page_idx": 10
|
| 1776 |
+
},
|
| 1777 |
+
{
|
| 1778 |
+
"type": "list",
|
| 1779 |
+
"sub_type": "ref_text",
|
| 1780 |
+
"list_items": [
|
| 1781 |
+
"foundation models: From specialists to general-purpose assistants. Foundations and Trends® in Computer Graphics and Vision, 16(1-2):1-214, 2024. 1, 3",
|
| 1782 |
+
"[15] Hao Li, Changyao Tian, Jie Shao, Xizhou Zhu, Zhaokai Wang, Jinguo Zhu, Wenhan Dou, Xiaogang Wang, Hongsheng Li, Lewei Lu, et al. Synergen-vl: Towards synergistic image understanding and generation with vision experts and token folding. arXiv preprint arXiv:2412.09604, 2024. 2, 3",
|
| 1783 |
+
"[16] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 3",
|
| 1784 |
+
"[17] Haoyu Lu, Wen Liu, Bo Zhang, Bingxuan Wang, Kai Dong, Bo Liu, Jingxiang Sun, Tongzheng Ren, Zhuoshu Li, Hao Yang, et al. Deepseek-vl: towards real-world vision-language understanding. arXiv preprint arXiv:2403.05525, 2024.3",
|
| 1785 |
+
"[18] Jiasen Lu, Christopher Clark, Sangho Lee, Zichen Zhang, Savya Khosla, Ryan Marten, Derek Hoiem, and Aniruddha Kembhavi. Unified-io 2: Scaling autoregressive multimodal models with vision language audio and action. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26439-26455, 2024. 1, 2, 3",
|
| 1786 |
+
"[19] Jae Wan Park, Sang Hyun Park, Jun Young Koh, Junha Lee, and Min Song. Cat: Contrastive adapter training for personalized image generation. arXiv preprint arXiv:2404.07554, 2024.3",
|
| 1787 |
+
"[20] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024. 3, 5, 9",
|
| 1788 |
+
"[21] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 3",
|
| 1789 |
+
"[22] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 2, 3",
|
| 1790 |
+
"[23] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3",
|
| 1791 |
+
"[24] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 3",
|
| 1792 |
+
"[25] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 3"
|
| 1793 |
+
],
|
| 1794 |
+
"bbox": [
|
| 1795 |
+
91,
|
| 1796 |
+
90,
|
| 1797 |
+
482,
|
| 1798 |
+
898
|
| 1799 |
+
],
|
| 1800 |
+
"page_idx": 11
|
| 1801 |
+
},
|
| 1802 |
+
{
|
| 1803 |
+
"type": "list",
|
| 1804 |
+
"sub_type": "ref_text",
|
| 1805 |
+
"list_items": [
|
| 1806 |
+
"[26] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.3",
|
| 1807 |
+
"[27] Kihyuk Sohn, Lu Jiang, Jarred Barber, Kimin Lee, Nataniel Ruiz, Dilip Krishnan, Huiwen Chang, Yuanzhen Li, Irfan Essa, Michael Rubinstein, et al. Styledrop: Text-to-image synthesis of any style. Advances in Neural Information Processing Systems, 36, 2024. 3",
|
| 1808 |
+
"[28] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14398-14409, 2024. 1, 3",
|
| 1809 |
+
"[29] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2,3",
|
| 1810 |
+
"[30] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 2, 3",
|
| 1811 |
+
"[31] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyls, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 2",
|
| 1812 |
+
"[32] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3",
|
| 1813 |
+
"[33] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024.5",
|
| 1814 |
+
"[34] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2, 3, 4",
|
| 1815 |
+
"[35] Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024. 2, 3",
|
| 1816 |
+
"[36] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 3",
|
| 1817 |
+
"[37] Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Yumao Lu, Zicheng Liu, and Lijuan Wang. An empirical study of gpt-3 for few-shot knowledge-based vqa. In Pro"
|
| 1818 |
+
],
|
| 1819 |
+
"bbox": [
|
| 1820 |
+
516,
|
| 1821 |
+
90,
|
| 1822 |
+
905,
|
| 1823 |
+
901
|
| 1824 |
+
],
|
| 1825 |
+
"page_idx": 11
|
| 1826 |
+
},
|
| 1827 |
+
{
|
| 1828 |
+
"type": "page_number",
|
| 1829 |
+
"text": "12",
|
| 1830 |
+
"bbox": [
|
| 1831 |
+
490,
|
| 1832 |
+
924,
|
| 1833 |
+
508,
|
| 1834 |
+
936
|
| 1835 |
+
],
|
| 1836 |
+
"page_idx": 11
|
| 1837 |
+
},
|
| 1838 |
+
{
|
| 1839 |
+
"type": "list",
|
| 1840 |
+
"sub_type": "ref_text",
|
| 1841 |
+
"list_items": [
|
| 1842 |
+
"ceedings of the AAAI conference on artificial intelligence, pages 3081-3089, 2022. 2",
|
| 1843 |
+
"[38] Zhengyuan Yang, Linjie Li, Kevin Lin, Jianfeng Wang, Chung-Ching Lin, Zicheng Liu, and Lijuan Wang. The dawn of Imms: Preliminary explorations with gpt-4v (isdiction). arXiv preprint arXiv:2309.17421, 2023. 3",
|
| 1844 |
+
"[39] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 3",
|
| 1845 |
+
"[40] Yuchen Zeng, Wonjun Kang, Yicong Chen, Hyung Il Koo, and Kangwook Lee. Can mllms perform text-to-image in-context learning? arXiv preprint arXiv:2402.01293, 2024. 2, 5, 9",
|
| 1846 |
+
"[41] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 3"
|
| 1847 |
+
],
|
| 1848 |
+
"bbox": [
|
| 1849 |
+
91,
|
| 1850 |
+
90,
|
| 1851 |
+
482,
|
| 1852 |
+
359
|
| 1853 |
+
],
|
| 1854 |
+
"page_idx": 12
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"type": "page_number",
|
| 1858 |
+
"text": "13",
|
| 1859 |
+
"bbox": [
|
| 1860 |
+
490,
|
| 1861 |
+
924,
|
| 1862 |
+
506,
|
| 1863 |
+
936
|
| 1864 |
+
],
|
| 1865 |
+
"page_idx": 12
|
| 1866 |
+
}
|
| 1867 |
+
]
|
data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19312/616f0c27-2aa3-4fd4-9ad8-f11a1fb51b2d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cd4b43591cc9cc83ff4315d760d0000538dfe97dc40666aab293a2c4c63927e
|
| 3 |
+
size 1944532
|
data/2025/2503_19xxx/2503.19312/full.md
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ImageGen-CoT: Enhancing Text-to-Image In-context Learning with Chain-of-Thought Reasoning
|
| 2 |
+
|
| 3 |
+
Jiaqi Liao $^{1\dagger}$ , Zhengyuan Yang $^{1}$ , Linjie Li $^{1}$ , Dianqi Li, Kevin Lin $^{1}$ , Yu Cheng $^{2}$ , Lijuan Wang $^{1^{\boxtimes}}$ $^{1}$ Microsoft 2 The Chinese University of Hong Kong
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
Figure 1. Comparisons between SEED-X and SEED-X FT w ImageGen-CoT dataset. Our method forces the model to generate a thought process before the ImageGen. The top row shows SEED-X failing to infer 'Leather' style, generating only a box, while our approach enables SEED-X to recognize the leather style and produce the intended leather box. The bottom row shows SEED-X failing to capture the unkempt fur and cloud, while our method successfully recognizes these key elements.
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
In this work, we study the problem of Text-to-Image In-Context Learning (T2I-ICL). While Unified Multimodal LLMs (MLLMs) have advanced rapidly in recent years, they struggle with contextual reasoning in T2I-ICL scenarios. To address this limitation, we propose a novel framework that incorporates a thought process called ImageGen-CoT prior to image generation. To avoid generating unstructured ineffective reasoning steps, we develop an automatic pipeline to curate a high-quality ImageGen-CoT dataset. We then fine-tune MLLMs using this dataset to enhance their contextual reasoning capabilities. To further enhance performance, we explore test-time scale-up strategies and propose a novel hybrid scaling approach. This approach first generates multiple ImageGen-CoT chains and then produces multiple images for each chain via sampling. Extensive experiments demonstrate the effectiveness of our proposed method. Notably, fine-tuning with the ImageGenCoT dataset leads to a substantial $80\%$ performance gain for SEED-X on T2I-ICL tasks. See our project page at
|
| 11 |
+
|
| 12 |
+
https://ImageGen-CoT.github.io/. Code and model weights will be open-sourced.
|
| 13 |
+
|
| 14 |
+
# 1. Introduction
|
| 15 |
+
|
| 16 |
+
Human intelligence excels at learning novel concepts through contextual observation and adapting to new inputs. When presented with a series of interleaved text-image examples—such as "a leather-bound book", followed by "a leather apple"—and then asked to generate an image for the query "a box," humans intuitively infer the implicit pattern of "leather" and apply it to the new query, resulting in "a leather box". This reasoning ability to learn novel concepts from multimodal contexts underpins creative problem-solving. Existing unified Multimodal Large Language Models (unified MLLMs) [5, 8, 9, 14, 18, 28, 34] have demonstrated remarkable capabilities in multimodal understanding and generation within a single model architecture. Given their ability to process and generate across modalities similar to human cognition, it is natural to investigate whether these models can exhibit reasoning capabilities comparable to those of humans. To evaluate this,
|
| 17 |
+
|
| 18 |
+
we adopt the Text-to-Image In-Context Learning (T2I-ICL) task [40], which requires models to process interleaved text-image inputs and generate coherent outputs by learning from multimodal contexts (Figure 1). Despite the impressive capabilities of unified MLLMs, our experiments reveal that they struggle to replicate this reasoning capability, often failing to grasp contextual relationships or preserve compositional consistency in T2I-ICL tasks.
|
| 19 |
+
|
| 20 |
+
To overcome these challenges, building upon the demonstrated success of CoT prompting in enhancing complex task processing for LLMs, we propose a novel framework that involves a structured thought process called ImageGen-CoT prior to image generation. Our key insight is that explicitly generating reasoning steps before image synthesis helps unified MLLMs better understand multimodal contexts and produce more coherent outputs. However, these models often produce disorganized and incoherent thought processes, leading to suboptimal performance. To address these limitations, we first propose an automated dataset construction pipeline to generate ImageGen-CoT datasets, where each sample consists of a pair of ImageGen-CoT and a corresponding image. The pipeline comprises three main stages: 1) collecting T2I-ICL instructions, 2) using MLLMs to generate step-by-step reasoning (ImageGen-CoT), and 3) producing image descriptions via MLLMs for diffusion models to generate images. To further enhance the dataset quality, we employ an iterative refinement process: The model first generates multiple text prompts and corresponding images, selects the best one, critiques the generated image, and iteratively refines the prompt until the max round is reached. Then, we fine-tune the model using this dataset which significantly enhances the image generation capabilities of unified-MLLMs in T2I-ICL tasks.
|
| 21 |
+
|
| 22 |
+
Despite the strong performance, T2I-ICL tasks' complexity leaves room for improvement. Inspired by NLP's Best-of-N paradigm, we explore three test-time scaling strategies: 1. Multi-Chain: Generate multiple ImageGen-CoT chains, each producing one image; 2. Single-Chain: Create multiple image variants from one ImageGen-CoT; 3. Hybrid: Combine both methods - multiple reasoning chains with multiple image variants per chain. Our empirical studies reveal two critical insights: (1) Instead of changing seeds, generating multiple ImageGen-CoTs via high-temperature LLM decoding achieves a similar performance to scaling. (2) ImageGen-CoT enables bidirectional expansion—either generating multiple instances of ImageGen-CoT or modifying seeds to create diverse images—outperforming single-dimension scaling, opening new pathways for performance optimization in complex multimodal tasks.
|
| 23 |
+
|
| 24 |
+
To evaluate the effectiveness of our method, we experiment with leading Unified MLLMs. These models can be categorized into two types based on their visual representa
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Figure 2. Performance comparison on CoBSAT and DreamBench++ benchmarks. Our method significantly improves SEED-X's performance through progressive enhancements: adding ImageGen-CoT, fine-tuning with the ImageGen-CoT dataset, and applying test-time scaling strategies.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+
tions: discrete visual tokens [8, 18, 22, 29, 34] and continuous visual embeddings [5, 15, 30, 35]. We select SEEDLLaMA [8] as a representative of the discrete approach and SEED-X [9] for the continuous approach, considering their open-source availability and support for interleaved text-image input. Extensive experiments demonstrate the effectiveness of our method. Specifically, as shown in Figure 2, SEED-X FT with ImageGen-CoT improves by $89\%$ and $114\%$ on CoBSAT and DreamBench++. With scaling strategy, it further achieves 0.909 and 0.543 respectively.
|
| 34 |
+
|
| 35 |
+
Our contributions can be summarized as follows:
|
| 36 |
+
|
| 37 |
+
1. We propose a novel framework that generates a thought process (called ImageGen-CoT) to enhance the performance of unified MLLMs in T2I-ICL tasks.
|
| 38 |
+
2. We construct high-quality ImageGen-CoT datasets for fine-tuning unified MLLMs through an automatic dataset construction pipeline.
|
| 39 |
+
3. We explore Best-of-N test-time scaling up paradigms and propose a hybrid scaling approach that first generates multiple ImageGen-CoT chains and then generates multiple image variations per chain.
|
| 40 |
+
|
| 41 |
+
# 2. Related Work
|
| 42 |
+
|
| 43 |
+
# 2.1. In-Context Learning
|
| 44 |
+
|
| 45 |
+
Large language models (LLMs) [3] have exhibited exceptional capabilities for text in-context learning (T-ICL). This ability allows LLMs to adapt to new tasks by observing a few illustrative examples provided as context, without requiring parameter updates. These models demonstrate remarkable performance on various tasks, with extension to multimodal models [1, 31, 37]. With the development of image generation, recent studies have proposed text-to-image in-context learning (T2I-ICL). For instance, CoBSAT [40] is the first benchmark designed to evaluate a
|
| 46 |
+
|
| 47 |
+
model's T2I-ICL (Text-to-Image In-Context Learning) generation capabilities. This includes assessing the model's ability to rapidly adapt to tasks given the in-context demonstrations, which are key aspects of T2I-ICL. Emu2 [28] also evaluates models' T2I-ICL capabilities through subject customization in DreamBench [25], where the model needs to bind visual concepts from reference images to generate customized outputs. In this work, following previous studies, we validate our approach's improvement on the T2I-ICL task using CoBSAT and DreamBench++ [20].
|
| 48 |
+
|
| 49 |
+
# 2.2. Text-to-Image Generation
|
| 50 |
+
|
| 51 |
+
Text-to-Image (T2I) generation [21, 23, 24, 26, 39] aims to generate images based on a user's textual description. With the development of T2I diffusion models, such as DALL-E 3 [2], SD3 [6], and FLUX.1-Schnell [13], users can now generate high-quality and vivid images directly from text descriptions. Building on this success, there is an increasing demand for models to generate customized content, such as specific subjects, styles, or attributes tailored to individual user needs. Consequently, a variety of methods have emerged to address the challenge of subject-customized generation. These methods [7, 12, 19, 25, 27] typically rely on fine-tuning techniques, such as LoRA [11] or contrastive learning [10], to specialize a general T2I model for subject customization. However, these methods require the collection of subject-specific datasets and involve time-consuming retraining for each new user request. This makes them resource-intensive, limiting their ability to generalize quickly to new needs. To address these challenges, researchers [28] train EMU2 on multimodal sequences, leveraging its inherent ICL ability to quickly bind visual concepts from the context. Despite these advancements, their performances remain limited. In this work, we explore how introducing a thought process prior to image generation, called ImageGen-CoT, can significantly enhance their performance on the T2I-ICL task.
|
| 52 |
+
|
| 53 |
+
# 2.3. Unified Multimodal Language Models
|
| 54 |
+
|
| 55 |
+
Recent years have witnessed remarkable progress in multimodal AI across two key domains: understanding and generation. In understanding, Large Vision-Language Models (LVLMs) [1, 4, 14, 16, 17, 32, 36, 38, 41] have achieved impressive capabilities in complex visual-textual reasoning tasks. Meanwhile, in generation, Text-to-Image diffusion models [2, 6, 13] have advanced to produce photorealistic images that can rival professional artists' work. Given these developments, researchers have been exploring ways to unify multimodal understanding and generation capabilities within a single model architecture. These models can be categorized into two approaches based on their visual representations: discrete visual tokens [8, 18, 22, 29, 34] and continuous visual embeddings [5, 15, 30, 35]. Discrete ap
|
| 56 |
+
|
| 57 |
+
proaches leverage VQ-VAE to tokenize images into discrete tokens, enabling training and inference similar to language processing. In contrast, continuous approaches generate latent embeddings that are subsequently processed through diffusion models for image synthesis.
|
| 58 |
+
|
| 59 |
+
# 3. Method
|
| 60 |
+
|
| 61 |
+
In this section, we present our ImageGen-CoT framework in detail. First, we introduce the formulation of ImageGen-CoT. (Sec.3.1). Second, we describe our automated pipeline for collecting high-quality ImageGen-CoT datasets (Sec.3.2). Third, we provide a detailed formulation of the dataset and the loss function used to fine-tune the model with the collected dataset (Sec.3.3). Finally, we explore various strategies to enhance model performance during inference, culminating in a novel hybrid scaling approach that addresses both contextual comprehension and generation challenges (Sec.3.4).
|
| 62 |
+
|
| 63 |
+
# 3.1. Formulation of ImageGen-CoT
|
| 64 |
+
|
| 65 |
+
As described above, T2I-ICL tasks require models to have a high level of comprehension. To enhance the model's capacity, we propose a new framework that generates a Chain-of-Thought, which we call ImageGen-CoT, before performing ImageGen. While we initially expected models to simultaneously output both ImageGen-CoT reasoning chains and corresponding images in a single forward pass.
|
| 66 |
+
|
| 67 |
+
However, during our practice, we observe that models frequently fail to generate images even when explicitly prompted to first generate ImageGen-CoT followed by image output. As illustrated in Figure 3, to ensure reliable image generation, we develop a two-stage inference protocol. The first stage involves prompting the model to generate the ImageGen-CoT reasoning chain $R$ . In the second stage, we combine the original input $X$ with the generated ImageGen-CoT $R$ , along with a mandatory image generation token $\langle \mathrm{image} \rangle$ , to guarantee the production of the target image $I$ . This process can be formally expressed as:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\text {S t a g e} R = \mathcal {M} (X \oplus \text {i n s t r u c t i o n})
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\begin{array}{l} \text {S t a g e 1 : I = M (X \oplus i n s t r u c t i o n)} \\ \text {S t a g e 2 : I = M (X \oplus R \oplus \langle i m a g e \rangle)} \end{array} \tag {1}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $\mathcal{M}$ denotes the unified MLLMs, and $\oplus$ represents the concatenation operation.
|
| 78 |
+
|
| 79 |
+
# 3.2. Dataset Construction
|
| 80 |
+
|
| 81 |
+
Due to the limitations of some unified MLLMs in generating well-structured ImageGen-CoT, which leads to suboptimal performance, we propose an automated pipeline to collect an ImageGen-CoT dataset and fine-tune the model using this dataset.
|
| 82 |
+
|
| 83 |
+
To collect high-quality ImageGen-CoT datasets, we first establish an instruction pool by collecting instructions from
|
| 84 |
+
|
| 85 |
+

|
| 86 |
+
Figure 3. Main Pipeline. (a) Data Collection Pipeline: An automated iterative process where the MLLM acts as Generator, Selector, Critic, and Refiner to produce high-quality ImageGen-CoT (reasoning chains) and aligned images. (b) Training Pipeline: Fine-tuning unified MLLMs on the collected ImageGen-CoT dataset to enhance contextual reasoning and image generation. (c) Test-Time Scaling: Strategies for performance improvement via hybrid scaling during inference.
|
| 87 |
+
|
| 88 |
+
existing training datasets in T2I-ICL tasks. Second, we propose an automatic dataset construction pipeline. As illustrated in Figure 3, our pipeline proceeds as follows: In the initial stage, we let MLLM act as a Generator to generate N outputs, each consisting of an ImageGen-CoT and a prompt for the next image, which are then used by T2I-Model [13] to generate N images. Then, MLLM acts as a Selector to select the best image from the N candidates. After that, if the selected image meets our quality threshold or reaches the maximum iteration limit, the pipeline terminates and outputs the corresponding ImageGen-CoT and image pair. Otherwise, we let MLLM act as a Critic to write a critique of the selected image, assessing how well it matches the T2I-ICL prompt. Finally, MLLM acts as a Refiner to refine the prompt based on the critique, and the process iterates until meeting the termination. Finally, based on the collected responses, we construct our ImageGen-CoT dataset as follows:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\mathcal {D} _ {\text {I m a g e G e n - C o T}} = \left\{\left(T _ {i} ^ {*}, I _ {i} ^ {*}\right) \right\} _ {i = 1} ^ {n} \tag {2}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $(T_{i}^{*},I_{i}^{*})$ represents the $i$ -th high-quality pair selected by our pipeline. $T_{i}^{*}$ is the ImageGen-CoT that successfully guided the generation. $I_{t}^{*}$ is the corresponding generated image that meets our quality standards. $n$ is the total number of collected pairs in the dataset.
|
| 95 |
+
|
| 96 |
+
# 3.3. Training Pipeline
|
| 97 |
+
|
| 98 |
+
After constructing the dataset, we explore the training paradigm to fine-tune Unified MLLMs using our collected dataset. In this section, we detail the process of training Unified MLLMs with the ImageGen-CoT dataset, focusing on data formulation and training objectives.
|
| 99 |
+
|
| 100 |
+
To maintain consistency between the training and inference stages, we divide the ImageGen-CoT dataset into two splits:
|
| 101 |
+
|
| 102 |
+
(1) $[X, p_{\mathrm{cot}}] \to \mathrm{ImageGen-CoT}$ , which generates the ImageGen-CoT
|
| 103 |
+
(2) $[X, \text{ImageGen-CoT}, p_{\text{image}}] \to \text{image}$ , which generates the final image.
|
| 104 |
+
|
| 105 |
+
When training with the training dataset split 1, since model only generate the ImageGen-CoT text, we apply the normal $lm\_loss$ , formulated as follows:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
l m \_ l o s s = - \frac {1}{N} \sum_ {i = 1} ^ {N} \log P \left(y _ {i} \mid y _ {< i}, X\right)
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $y_{i}$ is the $i$ -th token in the ImageGen-CoT text, $y_{<i}$ represents the preceding tokens, $X$ is the input, and $N$ is the total number of tokens in the ImageGen-CoT sequence.
|
| 112 |
+
|
| 113 |
+
For training with dataset split 2, there is no uniform training loss, as different Unified MLLMs utilize varying visual representations (e.g., discrete visual tokens [8, 34])
|
| 114 |
+
|
| 115 |
+
or continuous visual embeddings [9]). For models using discrete visual tokens, the same loss as language modeling (lm_loss) is applied. For models using continuous visual embeddings, we apply the mse_loss between the generated and target visual embeddings, formulated as:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
m s e \_ l o s s = \| \hat {z} - z \| ^ {2}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
where $\hat{z}$ is the generated visual embedding and $z$ is the corresponding target visual embedding. In this study, our primary objective is to enhance the model's capability to generate accurate ImageGen-CoT. So, by default, we utilize data split 1 for fine-tuning Unified MLLMs, with more results presented in the Appendix.
|
| 122 |
+
|
| 123 |
+
# 3.4. Test time scale up
|
| 124 |
+
|
| 125 |
+
Though fine-tuning with the ImageGen-CoT dataset significantly improves model performance in T2I-ICL tasks, substantial room for improvement remains. Inspired by test-time scaling methods in NLP, we explore whether increasing computational investment during inference can further enhance performance. We first investigate a conventional paradigm: using SEED-X as the base model, generating multiple images by varying the seed value, and outputs are filtered via a ground-truth verifier aligned with the Pass@N metric. However, we observe that even with $\mathrm{N} = 16$ , this approach underperforms compared to SEED-X fine-tuned with ImageGen-CoT Dataset.
|
| 126 |
+
|
| 127 |
+
This observation motivates our exploration of test-time scaling in the context of ImageGen-CoT, which we approach through three distinct strategies: 1. Single-Chain Scaling: This approach generates one ImageGen-CoT chain and produces multiple image variants by varying the seed values. 2. Multi-Chain Scaling: Similar to NLP's "Best-of-N" sampling, we generate multiple ImageGen-CoT chains through high-temperature LLM decoding. Each chain produces a unique image, potentially capturing different aspects of the contextual requirements. 3.Hybrid Scaling: Regarding the dual challenges of contextual comprehension and generation in T2I-ICL tasks, we propose a hybrid approach that combines the strengths of both strategies. As illustrated in Figure 3, this method first generates multiple ImageGen-CoT chains and then creates multiple image variations for each chain. Our extensive experiments further reveal the effectiveness of this hybrid scaling strategy: the integration of ImageGen-CoT enables effective bidirectional scaling across both comprehension and generation dimensions. This dual-axis scalability opens new pathways for optimizing MLLM performance in complex multimodal tasks.
|
| 128 |
+
|
| 129 |
+
# 4. Experiments
|
| 130 |
+
|
| 131 |
+
# 4.1. Implementation details
|
| 132 |
+
|
| 133 |
+
To validate the effectiveness of our ImageGen-CoT framework and dataset, we conduct experiments on two T2I-ICL benchmarks: CoBSAT [40] and DreamBench++ [20]. We employ SEED-LLaMA [8] and SEED-X [9] as our base Unified MLLMs for both ImageGen-CoT reasoning and image generation. For the dataset construction pipeline, we utilize different configurations: on DreamBench++, InternVL2.5-78B-MPO-AWQ [33] serves as the Generator, Selector, Critic, and Refiner, while for CoBSAT, we implement a self-consistency selector method with other components remaining the same. FLUX.1-schnell [13] is selected as the base T2I model for both benchmarks. We maintain CoBSAT's original split strategy, while implementing an image-level split for DreamBench++ to ensure no subject overlap. During data construction, we generate 3 outputs per query using a sampling temperature of 0.7 and top-p of 0.8, with a maximum of 2 iterations. Additional details regarding dataset splits, training procedures, and ablation studies are provided in the Appendix.
|
| 134 |
+
|
| 135 |
+
# 4.2. Main Results
|
| 136 |
+
|
| 137 |
+
In this section, we seek to answer the following questions: a) How much the ImageGen-CoT improves model's performance (via prompting)? b) To what extent does the performance of the model improve after fine-tuning with the ImageGen-CoT dataset? c) Can we invest more time in inference time to improve the performance? Finally, to better demonstrate the effectiveness of our method, we present visible comparison results.
|
| 138 |
+
|
| 139 |
+
# Question 1: How much the ImageGen-CoT (via prompt) improves model's performance.
|
| 140 |
+
|
| 141 |
+
To verify the effectiveness of ImageGen-CoT (via prompt), we compare the model's performance with and without generating ImageGen-CoT before ImageGen via prompt on CoBSAT [40] and Dreambench++ [20]. Since CoBSAT includes 10 tasks, we calculate the average score to represent overall performance. Similarly, for Dreambench++, we compute the average score across its tasks.
|
| 142 |
+
|
| 143 |
+
Results As shown in Tables S-1 and S-2, integrating ImageGen-CoT through prompting yields consistent improvements across benchmarks. On CoBSAT, SEEDLLaMA's average score improves from 0.254 to 0.283 $(+11.4\%)$ relative gain), while SEED-X shows a more substantial improvement from 0.349 to 0.439 $(+25.8\%)$ . The trend persists on Dreambench++, where SEED-X achieves a $84.6\%$ relative improvement $(0.188 \rightarrow 0.347)$ compared to its baseline. These results highlight the effectiveness of incorporating ImageGen-CoT in enhancing model performance. However, the SEED performance on Dreambench
|
| 144 |
+
|
| 145 |
+
Table 1. Main results on CoBSAT benchmark. "FT w/ GT Image" denotes fine-tuning with ground truth images, while "FT w/ ImageGen-CoT" represents fine-tuning with our ImageGen-CoT dataset. The results demonstrate that ImageGen-CoT significantly improves model performance, with relative improvements over baseline model shown in red.
|
| 146 |
+
|
| 147 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">Object-Inference Task</td><td colspan="5">Attribute-Inference Task</td><td rowspan="2">Avg.↑</td></tr><tr><td>Color-I</td><td>Bkg-I</td><td>Style-I</td><td>Action-I</td><td>Texture-I</td><td>Color-II</td><td>Bkg-II</td><td>Style-II</td><td>Action-II</td><td>Texture-II</td></tr><tr><td>SEED-LLaMA</td><td>.616</td><td>.216</td><td>.272</td><td>.592</td><td>.112</td><td>.088</td><td>.168</td><td>.192</td><td>.220</td><td>.056</td><td>.254</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.700</td><td>.276</td><td>.300</td><td>.408</td><td>.084</td><td>.176</td><td>.292</td><td>.272</td><td>.192</td><td>.132</td><td>.283</td></tr><tr><td>+ FT w/ GT Image</td><td>.632</td><td>.272</td><td>.352</td><td>.540</td><td>.128</td><td>.164</td><td>.200</td><td>.256</td><td>.172</td><td>.112</td><td>.283</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.620</td><td>.368</td><td>.384</td><td>.424</td><td>.060</td><td>.192</td><td>.288</td><td>.208</td><td>.216</td><td>.148</td><td>.291 ↑14.6%</td></tr><tr><td>SEED-X</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>+ FT w/ GT Image</td><td>.936</td><td>.712</td><td>.896</td><td>.860</td><td>.468</td><td>.280</td><td>.324</td><td>.388</td><td>.636</td><td>.424</td><td>.592</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658 ↑88.5%</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 2. Evaluation results on Dreambench++ benchmark. CP refers to concept preservation and PF refers to prompt following metrics. "FT" stands for fine-tuning. The relative gains over baseline model are shown in red.
|
| 150 |
+
|
| 151 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">Concept Preservation</td><td colspan="4">Prompt Following</td><td rowspan="2">CP-PF↑</td></tr><tr><td>Animal</td><td>Human</td><td>Object</td><td>Style</td><td>Overall</td><td>Photorealistic</td><td>Style</td><td>Imaginative</td><td>Overall</td></tr><tr><td>SEED-LLaMA</td><td>.436</td><td>.315</td><td>.288</td><td>.381</td><td>.358</td><td>.306</td><td>.202</td><td>.154</td><td>.218</td><td>.078</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.390</td><td>.241</td><td>.262</td><td>.346</td><td>.317</td><td>.291</td><td>.211</td><td>.170</td><td>.222</td><td>.078</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.399</td><td>.290</td><td>.271</td><td>.318</td><td>.325</td><td>.348</td><td>.355</td><td>.210</td><td>.310</td><td>.101 ↑29.5%</td></tr><tr><td>SEED-X</td><td>.647</td><td>.420</td><td>.526</td><td>.571</td><td>.559</td><td>.346</td><td>.342</td><td>.303</td><td>.337</td><td>.188</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.547</td><td>.293</td><td>.369</td><td>.424</td><td>.427</td><td>.862</td><td>.775</td><td>.737</td><td>.817</td><td>.347</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset</td><td>.549</td><td>.410</td><td>.403</td><td>.432</td><td>.458</td><td>.922</td><td>.851</td><td>.846</td><td>.881</td><td>.403 ↑114.4%</td></tr></table>
|
| 152 |
+
|
| 153 |
+
remains unchanged. This is attributed to its limited comprehension capabilities, which result in unreasonable and disorganized ImageGen-CoT outputs. To address this, we fine-tune the model using our collected ImageGen-CoT datasets, enabling higher-quality generation. More details are provided below.
|
| 154 |
+
|
| 155 |
+
# Question 2: To what extent does the performance of the model improve after fine-tuning with the ImageGen-CoT dataset?
|
| 156 |
+
|
| 157 |
+
To further enhance model performance, we fine-tuned both SEED-LLaMA [8] and SEED-X [9] on the ImageGen-CoT dataset, which was collected using an automatic dataset construction pipeline. The ImageGen-CoT dataset consists of two components: the first part focuses on teaching the model how to generate ImageGen-CoT text, while the second part trains the model to generate images based on the generated ImageGen-CoT text. As described in sec.3.3, our primary goal is to improve the model's capabilities in generating high-quality ImageGen-CoT. To this end, we fine-tune the models using Part I of the ImageGen-CoT Dataset by default. We compare the performance of our fine-tuned model with its version using ImageGen-CoT (via prompt) and the standard version.
|
| 158 |
+
|
| 159 |
+
Results As shown in Table S-1, SEED-LLaMA and SEED-X fine-tuned with ImageGen-CoT Dataset achieve improvements of $+2.8\%$ $(0.283 \rightarrow 0.291)$ and $+49.9\%$ $(0.439 \rightarrow$
|
| 160 |
+
|
| 161 |
+
0.658), compared to generating ImageGen-CoT via prompting, respectively. What's more, they even outperform themselves fine-tuned with GT Images by $+2.8\%$ ( $0.283 \rightarrow 0.291$ ) and $+11.1\%$ ( $0.592 \rightarrow 0.658$ ). Additionally, on the Dreambench++ benchmark, SEED-LLaMA fine-tuned with ImageGen-CoT Dataset shows an improvement of $+29.5\%$ ( $0.078 \rightarrow 0.101$ ) in CP-PF score, while SEED-X achieves a $+16.1\%$ gain ( $0.347 \rightarrow 0.403$ ). These strong results on COBSAT and Dreambench++ underscore the effectiveness and generalizability of our collected ImageGen-CoT dataset in enhancing model reasoning and understanding abilities.
|
| 162 |
+
|
| 163 |
+
# Question 3: Can we invest more time in inference time to improve the performance?
|
| 164 |
+
|
| 165 |
+
To further enhance model performance, we explore various test-time scaling strategies. We implement a Best-of-N approach where the model generates multiple image variations, with ground-truth metric evaluation (pass@N). As a baseline approach, we first experiment with the vanilla SEED-X model, generating multiple images by varying the seed values. We then investigate three advanced scaling strategies using SEED-X fine-tuned with ImageGen-CoT dataset: (1) Multi-Chain Scaling, which generates multiple distinct ImageGen-CoT chains, with each chain producing an image; (2) Single-Chain Scaling, which produces multiple image variations from a single ImageGen-CoT chain; and (3) Hybrid Scaling, a novel approach that combines the strengths of both strategies by first generating multiple
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
Figure 4. Test-time scaling strategies comparison. We conducted a comprehensive evaluation of three distinct scaling strategies: Multi-Chain Scaling, Single-Chain Scaling, and Hybrid Scaling, examining their performance across varying numbers of generated outputs (N=2,4,8,16). The experimental results are presented in two figures, with the left figure showing results on CoBSAT and the right figure displaying results on Dreambench++. The red numbers indicate the performance improvements achieved by Hybrid Scaling compared to Single-Chain Scaling.
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
Figure 5. Qualitative Results. Comparison of generation results on COBSAT (top) and Dreambench+ (bottom) using baseline SEED-X, SEED-X with ImageGen-CoT prompting, and SEED-X fine-tuned with ImageGen-CoT dataset.
|
| 174 |
+
|
| 175 |
+
ImageGen-CoT chains and then producing multiple image variations for each chain. For each paradigm, we systematically evaluate scalability by generating 2, 4, 8, and 16 outputs. For Hybrid Scaling, we implement specific configurations: Hybrid@16 uses 4 ImageGen-CoT chains with 4 images per chain; Hybrid@8 explores two alternatives (2 chains $\times$ 4 images or 4 chains $\times$ 2 images); Hybrid@4 employs 2 chains $\times$ 2 images; and Hybrid@2 tests either 2 chains $\times$ 1 image or 1 chain $\times$ 2 images. Due to the significant scale difference, we visualize the latter strategy here.
|
| 176 |
+
|
| 177 |
+
# Results
|
| 178 |
+
|
| 179 |
+
As shown in Figure 4, our experiments reveal three key insights. First, the Vanilla SEED-X@16 baseline (0.67 on CobSAT, 0.312 on Dreambench++) underperforms even the simplest scaling strategies (e.g., 0.747 on CobSAT@2), highlighting the necessity of ImageGen-CoT integration. Second, Multi-Chain Scaling matches Single-Chain Scaling in performance, proving that generating diverse reason
|
| 180 |
+
|
| 181 |
+
ing paths is as effective as varying outputs from a single chain. Finally, Hybrid Scaling consistently achieves the highest scores across benchmarks. At $N = 16$ , Hybrid Scaling improves CobSAT performance to 0.909 (1.9% over Single-Chain) and Dreambench++ to 0.543 (0.8% higher than Single-Chain). The integration of ImageGen-CoT enables effective bidirectional scaling across both comprehension and generation dimensions. This dual-axis scalability suggests new pathways for optimizing MLLM performance in complex multimodal tasks.
|
| 182 |
+
|
| 183 |
+
Qualitative Results We further validate the effectiveness of our proposed methods through visualization. Figures 5 showcase the generation results from SEED-X under different configurations: baseline SEED-X, SEED-X with ImageGen-CoT (via prompting), and SEED-X fine-tuned with the ImageGen-CoT dataset. As shown in the top of Figure 5, baseline SEED-X (b) generates a basic book shape but misses the implicit "lace" attribute. With ImageGen-
|
| 184 |
+
|
| 185 |
+
Table 3. We presents a comprehensive analysis of model performance on the COBSAT benchmark. Each model is evaluated in two generation modes: image generation (Img) and text generation (Txt).
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Gen Mode</td><td colspan="5">Object Inference Task</td><td colspan="5">Attribute Inference Task</td><td rowspan="2">Avg.↑</td></tr><tr><td>Img</td><td>Txt</td><td>Color</td><td>Bkg</td><td>Style</td><td>Action</td><td>Texture</td><td>Color</td><td>Bkg</td><td>Style</td><td>Action</td><td>Texture</td></tr><tr><td rowspan="2">SEED-X</td><td>✓</td><td>-</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>-</td><td>✓</td><td>.440</td><td>.388</td><td>.096</td><td>.080</td><td>.060</td><td>.116</td><td>.080</td><td>.180</td><td>.164</td><td>.132</td><td>.174</td></tr><tr><td rowspan="2">+ ImageGen-CoT (via Prompt)</td><td>✓</td><td>-</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>-</td><td>✓</td><td>.744</td><td>.212</td><td>.648</td><td>.476</td><td>.388</td><td>.356</td><td>.780</td><td>.292</td><td>.540</td><td>.136</td><td>.457</td></tr><tr><td rowspan="2">+ FT w/ ImageGen-CoT Dataset</td><td>✓</td><td>-</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658 ↑88.5%</td></tr><tr><td>-</td><td>✓</td><td>.984</td><td>.568</td><td>.968</td><td>1.00</td><td>.640</td><td>.516</td><td>.984</td><td>.592</td><td>.712</td><td>.628</td><td>.760 ↑117.8%</td></tr></table>
|
| 188 |
+
|
| 189 |
+
CoT prompting (c), the model's weak comprehension leads to poor ImageGen-CoT quality and even degraded generation quality. After fine-tuning with ImageGen-CoT dataset (d), with help of ImageGen-CoT, the model first successfully infers the shared attribute "lace" in CoT text and then generates the correct image - a book made of lace. Similarly, as shown in the bottom of Figure 5, baseline SEED-X (b) only generates a simple egg with an open mouth, ignoring key requirements like "on stone", "in garden", and similar expression (sad with upturned closed mouth). With ImageGen-CoT prompting (c), while the egg is placed on stone, it lacks both the required facial expression and garden environment. After fine-tuning (d), the model successfully understands all task requirements and generates a complete scene with an egg properly placed on a stone in a garden setting, maintaining similar facial features to the input. These qualitative results visually demonstrate the effectiveness of ImageGen-CoT and its corresponding dataset in enhancing model comprehension and generation capability, particularly in handling complex tasks that require attention to detail and scene understanding.
|
| 190 |
+
|
| 191 |
+
# 5. Further Analysis
|
| 192 |
+
|
| 193 |
+
# 5.1. The principles behind ImageGen-CoT contribute to enhancing the model's performance.
|
| 194 |
+
|
| 195 |
+
As described above, our proposed method, ImageGen-CoT, significantly enhances model performance on T2I-ICL tasks. To better understand why ImageGen-CoT improves performance, we hypothesize that 'a better understanding leads to better generation.' Specifically, we believe that ImageGen-CoT enhances the comprehension capabilities of Unified-MLLMs. To quantitatively assess the model's comprehension ability, we have the model generate a text description for the next image, as indicated by the 'Gen_mode' label in Table 3. Then we conduct a series of experiments to validate this hypothesis.
|
| 196 |
+
|
| 197 |
+
Results: The results in Table 3 demonstrate that integrating ImageGen-CoT significantly enhances model comprehension capabilities. When applied via prompting, SEED-X's text generation mode (Txt) achieves substantial gains, with
|
| 198 |
+
|
| 199 |
+
the average score improving from 0.174 to 0.457. Finetuning with the ImageGen-CoT dataset further amplifies this advantage, elevating the text mode to a remarkable average score of 0.760 (vs. SEED-X's baseline of 0.174). Notably, enhanced comprehension also improves image generation (Img): SEED-X with ImageGen-CoT via prompt raises the average score from 0.349 to 0.439, while finetuning further boosts it to 0.658. This aligns with our hypothesis: "a better understanding leads to better generation."
|
| 200 |
+
|
| 201 |
+
# 5.2. Main obstacles in T2I-ICL
|
| 202 |
+
|
| 203 |
+
In this section we further discuss the main obstacles in T2I-ICL. We identify two primary challenges: First, as shown in Table 3, SEED-X's text generation mode (Txt) demonstrates relatively low performance scores (0.174), highlighting its difficulties in comprehending complex T2I-ICL instructions. Second, the image generation capabilities remain a bottleneck - notably, SEED-X fine-tuned with ImageGen-CoT dataset shows lower performance in image generation mode compared to text mode, indicating that while understanding may improve, translating this understanding into accurate image generation remains challenging.
|
| 204 |
+
|
| 205 |
+
# 6. Conclusion
|
| 206 |
+
|
| 207 |
+
In this work, we propose a novel framework that enhances Unified MLLMs' performance on T2I-ICL tasks by incorporating CoT reasoning before ImageGen. To further improve their performance, we develop an automatic pipeline to curate high-quality ImageGen-CoT datasets and fine-tune these models. Our extensive experiments demonstrate that our method significantly improves model performance, with SEED-X achieving up to $80\%$ gains on T2I-ICL tasks after fine-tuning. We further explore test-time scaling strategies and propose a hybrid approach that combines multiple reasoning chains with diverse image generation. Our work establishes a novel paradigm for enhancing MLLMs' capabilities in handling complex multimodal generation tasks.
|
| 208 |
+
|
| 209 |
+
# ImageGen-CoT: Enhancing Text-to-Image In-context Learning with Chain-of-Thought Reasoning
|
| 210 |
+
|
| 211 |
+
Supplementary Material
|
| 212 |
+
|
| 213 |
+
# Overview
|
| 214 |
+
|
| 215 |
+
In this supplementary material, we present more details and more experimental results that are not included in the main paper. The contents include:
|
| 216 |
+
|
| 217 |
+
- A detailed introduction to CoBSAT [40] and Dreambench++[20] in Sec. S-A.
|
| 218 |
+
Additional details on the experimental setup in Sec. S-B.
|
| 219 |
+
- More experimental results in Sec. S-C.
|
| 220 |
+
- Effectiveness of Iterative Refinement Strategy in Data Construction in Sec. S-D.
|
| 221 |
+
- Automatic Dataset Construction Pipeline On CoBSAT S-E.
|
| 222 |
+
|
| 223 |
+
# S-A. Dataset Details
|
| 224 |
+
|
| 225 |
+
CoBSAT: CoBSAT [40] is a comprehensive benchmark dataset designed specifically to evaluate Text-to-Image In-Context Learning (T2I-ICL) capabilities of Multimodal Large Language Models (MLLMs). The dataset consists of ten distinct tasks across five thematic areas, with each task carefully structured to assess different aspects of T2I-ICL performance. The benchmark is organized into two main categories: object-inference tasks and attribute-inference tasks. In object-inference tasks, models must infer the correct object from demonstrations while being given explicit attributes in the text prompt. Conversely, in attribute-inference tasks, models are provided with the object in the text prompt and must infer the appropriate attribute from the demonstrations. This dual structure enables a thorough evaluation of MLLMs' ability to learn and generalize from multimodal in-context examples.
|
| 226 |
+
|
| 227 |
+
Dreambench++: Dreambench++ [20] is a comprehensive benchmark for evaluating personalized text-to-image generation models. It features three key advantages: 1) Human-aligned evaluation through carefully designed GPT prompting that achieves over $79\%$ agreement with human assessments; 2) Fully automated evaluation process that eliminates the need for time-consuming manual evaluation; and 3) A diverse dataset containing 150 images and 1,350 prompts across various categories including animals, humans, objects and styles. The benchmark evaluates two fundamental aspects of personalized image generation: concept preservation and prompt following capabilities.
|
| 228 |
+
|
| 229 |
+
# S-B. Detailed Experimental Setup
|
| 230 |
+
|
| 231 |
+
# S-B.1. CoBSAT
|
| 232 |
+
|
| 233 |
+
Data Split. Following CoBSAT's default settings, we split the predefined lists of text inputs $(X)$ and latent variables $(\Theta)$ into training and testing subsets with a 1:1 ratio, ensuring the test set contains completely unseen prompts and attributes. For training, we generate 300 samples per task by enumerating all possible combinations of $\theta \in \Theta_{\mathrm{train}}$ and $(x_{n})_{n = 1}^{N + 1}\in X_{\mathrm{train}}^{N + 1}$ , resulting in 3,000 training samples across 10 tasks. For evaluation, we randomly sample 250 prompts per task from $\theta \in \Theta_{\mathrm{test}}$ and $(x_{n})_{n = 1}^{N + 1}\in X_{\mathrm{test}}^{N + 1}$ , yielding a total of 2,500 test samples.
|
| 234 |
+
|
| 235 |
+
Training Strategy. For model training, we fine-tune both SEED-LLaMA and SEED-X using LoRA. Specifically, SEED-LLaMA is fine-tuned with rank=64, $\alpha = 16$ , learning rate=1e-4 for 1 epoch, while SEED-X uses rank=64, $\alpha = 64$ , learning rate=1e-4 for 1 epoch.
|
| 236 |
+
|
| 237 |
+
# S-B.2. Dreambench++
|
| 238 |
+
|
| 239 |
+
Data Split. To prevent subject overlap in evaluation, we split the dataset by subjects, with $60\%$ subjects (90 subjects, resulting in 810 samples) for training and $40\%$ subjects (60 subjects, resulting in 540 samples) for testing.
|
| 240 |
+
|
| 241 |
+
Training Strategy. For Dreambench++, SEED-LLaMA is fine-tuned using LoRA with rank=64, $\alpha = 16$ , learning rate=1e-4 for 5 epochs, while SEED-X uses rank=64, $\alpha = 64$ , learning rate=1e-4 for 1 epoch.
|
| 242 |
+
|
| 243 |
+
# S-C. More experimental results
|
| 244 |
+
|
| 245 |
+
As described in the main paper, the ImageGen-CoT dataset comprises two distinct components. The first component focuses on training the model to generate ImageGen-CoT text, while the second component teaches the model to generate images based on the generated ImageGen-CoT text. While our main paper primarily focused on training using Part I of the dataset, here we extend our experiments by utilizing the complete dataset for comprehensive evaluation. As presented in Tables S-1 and S-2, we conducted comprehensive experiments using both parts of the ImageGen-CoT dataset. On the CoBSAT benchmark, SEED-LLaMA fine-tuned with the complete ImageGen-CoT dataset achieved a significant performance gain of $+36.6\%$ $(0.254\rightarrow 0.347)$ compared to the baseline model. Similarly, SEED-X demonstrated remarkable improvement with a $+79.4\%$ increase $(0.349\rightarrow 0.626)$ over its baseline performance. For the Dreambench++ bench
|
| 246 |
+
|
| 247 |
+
Table S-1. Main results on CoBSAT benchmark. "FT w/ GT Image" denotes fine-tuning with ground truth images, while "FT w/ ImageGen-CoT" represents fine-tuning with our ImageGen-CoT dataset. The results demonstrate that ImageGen-CoT significantly improves model performance, with relative improvements over baseline model shown in red.
|
| 248 |
+
|
| 249 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">Object-Inference Task</td><td colspan="5">Attribute-Inference Task</td><td rowspan="2">Avg.↑</td></tr><tr><td>Color-I</td><td>Bkg-I</td><td>Style-I</td><td>Action-I</td><td>Texture-I</td><td>Color-II</td><td>Bkg-II</td><td>Style-II</td><td>Action-II</td><td>Texture-II</td></tr><tr><td>SEED-LLaMA</td><td>.616</td><td>.216</td><td>.272</td><td>.592</td><td>.112</td><td>.088</td><td>.168</td><td>.192</td><td>.220</td><td>.056</td><td>.254</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.700</td><td>.276</td><td>.300</td><td>.408</td><td>.084</td><td>.176</td><td>.292</td><td>.272</td><td>.192</td><td>.132</td><td>.283</td></tr><tr><td>+ FT w/ GT Image</td><td>.632</td><td>.272</td><td>.352</td><td>.540</td><td>.128</td><td>.164</td><td>.200</td><td>.256</td><td>.172</td><td>.112</td><td>.283</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.620</td><td>.368</td><td>.384</td><td>.424</td><td>.060</td><td>.192</td><td>.288</td><td>.208</td><td>.216</td><td>.148</td><td>.291</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.716</td><td>.432</td><td>.436</td><td>.420</td><td>.200</td><td>.168</td><td>.380</td><td>.256</td><td>.216</td><td>.248</td><td>.347 ↑36.6%</td></tr><tr><td>SEED-X</td><td>.796</td><td>.412</td><td>.316</td><td>.596</td><td>.240</td><td>.176</td><td>.344</td><td>.260</td><td>.252</td><td>.104</td><td>.349</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.724</td><td>.440</td><td>.660</td><td>.784</td><td>.216</td><td>.312</td><td>.472</td><td>.228</td><td>.320</td><td>.240</td><td>.439</td></tr><tr><td>+ FT w/ GT Image</td><td>.936</td><td>.712</td><td>.896</td><td>.860</td><td>.468</td><td>.280</td><td>.324</td><td>.388</td><td>.636</td><td>.424</td><td>.592</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.884</td><td>.692</td><td>.928</td><td>.936</td><td>.420</td><td>.504</td><td>.612</td><td>.660</td><td>.524</td><td>.424</td><td>.658</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.832</td><td>.596</td><td>.840</td><td>.892</td><td>.484</td><td>.384</td><td>.548</td><td>.572</td><td>.608</td><td>.500</td><td>.626 ↑79.4%</td></tr></table>
|
| 250 |
+
|
| 251 |
+
Table S-2. Evaluation results on Dreambench++ benchmark. CP refers to concept preservation and PF refers to prompt following metrics. "FT" stands for fine-tuning. The relative gains over baseline model are shown in red.
|
| 252 |
+
|
| 253 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">Concept Preservation</td><td colspan="4">Prompt Following</td><td rowspan="2">CP-PF↑</td></tr><tr><td>Animal</td><td>Human</td><td>Object</td><td>Style</td><td>Overall</td><td>Photorealistic</td><td>Style</td><td>Imaginative</td><td>Overall</td></tr><tr><td>SEED-LLaMA</td><td>.436</td><td>.315</td><td>.288</td><td>.381</td><td>.358</td><td>.306</td><td>.202</td><td>.154</td><td>.218</td><td>.078</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.390</td><td>.241</td><td>.262</td><td>.346</td><td>.317</td><td>.291</td><td>.211</td><td>.170</td><td>.222</td><td>.078</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.399</td><td>.290</td><td>.271</td><td>.318</td><td>.325</td><td>.348</td><td>.355</td><td>.210</td><td>.310</td><td>.101</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.414</td><td>.269</td><td>.243</td><td>.328</td><td>.319</td><td>.408</td><td>.317</td><td>.199</td><td>.334</td><td>.107 ↑37.2%</td></tr><tr><td>SEED-X</td><td>.647</td><td>.420</td><td>.526</td><td>.571</td><td>.559</td><td>.346</td><td>.342</td><td>.303</td><td>.337</td><td>.188</td></tr><tr><td>+ ImageGen-CoT (via Prompt)</td><td>.547</td><td>.293</td><td>.369</td><td>.424</td><td>.427</td><td>.862</td><td>.775</td><td>.737</td><td>.817</td><td>.347</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (Part1)</td><td>.549</td><td>.410</td><td>.403</td><td>.432</td><td>.458</td><td>.922</td><td>.851</td><td>.846</td><td>.881</td><td>.403</td></tr><tr><td>+ FT w/ ImageGen-CoT Dataset (All Part)</td><td>.511</td><td>.358</td><td>.424</td><td>.303</td><td>.421</td><td>.926</td><td>.910</td><td>.870</td><td>.906</td><td>.384 ↑104.2%</td></tr></table>
|
| 254 |
+
|
| 255 |
+
mark, training with the complete dataset resulted in even more pronounced improvements. SEED-LLaMA showed a $+37.2\%$ gain $(0.078 \rightarrow 0.107)$ in CP-PF score, while SEED-X achieved a substantial $+104.2\%$ improvement $(0.188 \rightarrow 0.384)$ . These comprehensive results demonstrate that utilizing the complete ImageGen-CoT dataset can still significantly improve model performance.
|
| 256 |
+
|
| 257 |
+
# S-D. Effectiveness of Iterative Refinement Strategy in Data Construction
|
| 258 |
+
|
| 259 |
+
We evaluate the effectiveness of our iterative refinement strategy on both CoBSAT and Dreambench++ datasets. As demonstrated in Table S-3, the proposed strategy yields consistent improvements across all evaluation metrics. Specifically, on the CoBSAT dataset, our method achieves improvements of $1.1\%$ , $2.9\%$ , and $2.0\%$ in object inference, attribute inference, and overall score, respectively. For Dreambench++, the refinement strategy enhances prompt following (PF) by $0.9\%$ and concept preservation (CP) by $4.7\%$ , resulting in a substantial $4.7\%$ improvement in the combined PF*CP metric. These comprehensive results validate that our iterative refinement approach significantly enhances the quality of the constructed dataset.
|
| 260 |
+
|
| 261 |
+
Table S-3. Performance comparison of data construction with and without iterative refinement.
|
| 262 |
+
|
| 263 |
+
<table><tr><td>Method</td><td>Object</td><td>Attribute</td><td>Overall</td></tr><tr><td>w/o Iterative Refine</td><td>0.782</td><td>0.704</td><td>0.743</td></tr><tr><td>w/ Iterative Refine</td><td>0.793</td><td>0.733</td><td>0.763</td></tr></table>
|
| 264 |
+
|
| 265 |
+
(b) Results on Dreambench++
|
| 266 |
+
|
| 267 |
+
<table><tr><td>Method</td><td>PF</td><td>CP</td><td>PF*CP</td></tr><tr><td>w/o Iterative Refine</td><td>0.937</td><td>0.470</td><td>0.442</td></tr><tr><td>w/ Iterative Refine</td><td>0.946</td><td>0.517</td><td>0.489</td></tr></table>
|
| 268 |
+
|
| 269 |
+
# S-E. Automatic Dataset Construction Pipeline On CoBSAT
|
| 270 |
+
|
| 271 |
+
On CoBSAT, we initially adopted the same method as DreamBench++. However, we found that the self-boosting approach underperformed due to the inherent complexity of CoBSAT, which requires the model to infer implicit visual-semantic relationships—posing a significant challenge to the model's reasoning capability. To solve this challenge, we sampled multiple text prompts from the MLLM and selected the best prompts using the self-consistency method. However, this method cannot be directly applied to CoBSAT. Self-consistency is commonly used in mathematical
|
| 272 |
+
|
| 273 |
+
problem solving, where text answers are precise (e.g., numbers or options) and consistency can be directly evaluated using string matching. In contrast, CoBSAT involves long and complex text prompts, making direct string-based consistency evaluation infeasible.
|
| 274 |
+
|
| 275 |
+
The pipeline proceeds as follows: We first sample multiple chains of thought (CoT) from the MLLM. These CoTs are then used, along with the input sequence context, to generate multiple text prompts. Formally, let the CoT sampled from the MLLM be denoted as $\cot_t^i$ , where $i = 0,1,\dots ,M - 1$ , and $M$ is the number of sampled CoTs. Each CoT, combined with the input sequence $x$ , is used to construct a corresponding text prompt $p_t^i$ as:
|
| 276 |
+
|
| 277 |
+
$$
|
| 278 |
+
p _ {t} ^ {i} = \mathcal {F} \left(\cot_ {t} ^ {i}, x\right), \quad i = 0, 1, \dots , M - 1, \tag {3}
|
| 279 |
+
$$
|
| 280 |
+
|
| 281 |
+
where $\mathcal{F}$ represents generating text prompts based on the CoT and input sequence context.
|
| 282 |
+
|
| 283 |
+
Next, we convert each text prompt into a vector representation using a text embedding model $\mathcal{E}$ :
|
| 284 |
+
|
| 285 |
+
$$
|
| 286 |
+
v _ {t} ^ {i} = \mathcal {E} \left(p _ {t} ^ {i}\right), \quad i = 0, 1, \dots , M - 1, \tag {4}
|
| 287 |
+
$$
|
| 288 |
+
|
| 289 |
+
where $v_{t}^{i}\in \mathbb{R}^{d}$ is the embedding vector of the $i$ -th prompt.
|
| 290 |
+
|
| 291 |
+
The similarity $S_{ij}$ between two prompts is then measured using the inner product of their vector representations:
|
| 292 |
+
|
| 293 |
+
$$
|
| 294 |
+
S _ {i j} = \left\langle v _ {t} ^ {i}, v _ {t} ^ {j} \right\rangle = v _ {t} ^ {i} \cdot v _ {t} ^ {j}, \tag {5}
|
| 295 |
+
$$
|
| 296 |
+
|
| 297 |
+
where $\langle \cdot ,\cdot \rangle$ denotes the inner product.
|
| 298 |
+
|
| 299 |
+
The average similarity for each prompt $p_t^i$ is computed as:
|
| 300 |
+
|
| 301 |
+
$$
|
| 302 |
+
\bar{S}_{i} = \frac{1}{M - 1}\sum_{\substack{j = 0\\ j\neq i}}^{M - 1}S_{ij}. \tag{6}
|
| 303 |
+
$$
|
| 304 |
+
|
| 305 |
+
Finally, the prompt $p_t^{t^*}$ with the highest average similarity is selected as the best candidate:
|
| 306 |
+
|
| 307 |
+
$$
|
| 308 |
+
i ^ {*} = \arg \max _ {i} \bar {S} _ {i}. \tag {7}
|
| 309 |
+
$$
|
| 310 |
+
|
| 311 |
+
The selected prompt $p_t^{i^*}$ is then used to generate the image, which is considered the best image. Simultaneously, its corresponding CoT is also identified as the best CoT. The CoT text and the generated image are then concatenated to form the ImageGen-CoT dataset.
|
| 312 |
+
|
| 313 |
+
# References
|
| 314 |
+
|
| 315 |
+
[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances
|
| 316 |
+
|
| 317 |
+
in neural information processing systems, 35:23716-23736, 2022. 2, 3
|
| 318 |
+
[2] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023. 3
|
| 319 |
+
[3] Tom B Brown. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020. 2
|
| 320 |
+
[4] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. Science China Information Sciences, 67(12):220101, 2024. 3
|
| 321 |
+
[5] Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499, 2023. 1, 2, 3
|
| 322 |
+
[6] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In Forty-first International Conference on Machine Learning, 2024. 3
|
| 323 |
+
[7] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3
|
| 324 |
+
[8] Yuying Ge, Sijie Zhao, Ziyun Zeng, Yixiao Ge, Chen Li, Xintao Wang, and Ying Shan. Making llama see and draw with seed tokenizer. arXiv preprint arXiv:2310.01218, 2023. 1, 2, 3, 4, 5, 6
|
| 325 |
+
[9] Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024. 1, 2, 5, 6
|
| 326 |
+
[10] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 3
|
| 327 |
+
[11] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. 3
|
| 328 |
+
[12] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 3
|
| 329 |
+
[13] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 3, 4, 5
|
| 330 |
+
[14] Chunyuan Li, Zhe Gan, Zhengyuan Yang, Jianwei Yang, Linjie Li, Lijuan Wang, Jianfeng Gao, et al. Multimodal
|
| 331 |
+
|
| 332 |
+
foundation models: From specialists to general-purpose assistants. Foundations and Trends® in Computer Graphics and Vision, 16(1-2):1-214, 2024. 1, 3
|
| 333 |
+
[15] Hao Li, Changyao Tian, Jie Shao, Xizhou Zhu, Zhaokai Wang, Jinguo Zhu, Wenhan Dou, Xiaogang Wang, Hongsheng Li, Lewei Lu, et al. Synergen-vl: Towards synergistic image understanding and generation with vision experts and token folding. arXiv preprint arXiv:2412.09604, 2024. 2, 3
|
| 334 |
+
[16] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 3
|
| 335 |
+
[17] Haoyu Lu, Wen Liu, Bo Zhang, Bingxuan Wang, Kai Dong, Bo Liu, Jingxiang Sun, Tongzheng Ren, Zhuoshu Li, Hao Yang, et al. Deepseek-vl: towards real-world vision-language understanding. arXiv preprint arXiv:2403.05525, 2024.3
|
| 336 |
+
[18] Jiasen Lu, Christopher Clark, Sangho Lee, Zichen Zhang, Savya Khosla, Ryan Marten, Derek Hoiem, and Aniruddha Kembhavi. Unified-io 2: Scaling autoregressive multimodal models with vision language audio and action. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26439-26455, 2024. 1, 2, 3
|
| 337 |
+
[19] Jae Wan Park, Sang Hyun Park, Jun Young Koh, Junha Lee, and Min Song. Cat: Contrastive adapter training for personalized image generation. arXiv preprint arXiv:2404.07554, 2024.3
|
| 338 |
+
[20] Yuang Peng, Yuxin Cui, Haomiao Tang, Zekun Qi, Runpei Dong, Jing Bai, Chunrui Han, Zheng Ge, Xiangyu Zhang, and Shu-Tao Xia. Dreambench++: A human-aligned benchmark for personalized image generation. arXiv preprint arXiv:2406.16855, 2024. 3, 5, 9
|
| 339 |
+
[21] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 3
|
| 340 |
+
[22] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 2, 3
|
| 341 |
+
[23] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3
|
| 342 |
+
[24] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695, 2022. 3
|
| 343 |
+
[25] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 3
|
| 344 |
+
|
| 345 |
+
[26] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.3
|
| 346 |
+
[27] Kihyuk Sohn, Lu Jiang, Jarred Barber, Kimin Lee, Nataniel Ruiz, Dilip Krishnan, Huiwen Chang, Yuanzhen Li, Irfan Essa, Michael Rubinstein, et al. Styledrop: Text-to-image synthesis of any style. Advances in Neural Information Processing Systems, 36, 2024. 3
|
| 347 |
+
[28] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14398-14409, 2024. 1, 3
|
| 348 |
+
[29] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024. 2,3
|
| 349 |
+
[30] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. 2, 3
|
| 350 |
+
[31] Maria Tsimpoukelli, Jacob L Menick, Serkan Cabi, SM Eslami, Oriol Vinyls, and Felix Hill. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021. 2
|
| 351 |
+
[32] Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, and Lijuan Wang. Git: A generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100, 2022. 3
|
| 352 |
+
[33] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, and Jifeng Dai. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024.5
|
| 353 |
+
[34] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2, 3, 4
|
| 354 |
+
[35] Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024. 2, 3
|
| 355 |
+
[36] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. 3
|
| 356 |
+
[37] Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Yumao Lu, Zicheng Liu, and Lijuan Wang. An empirical study of gpt-3 for few-shot knowledge-based vqa. In Pro
|
| 357 |
+
|
| 358 |
+
ceedings of the AAAI conference on artificial intelligence, pages 3081-3089, 2022. 2
|
| 359 |
+
[38] Zhengyuan Yang, Linjie Li, Kevin Lin, Jianfeng Wang, Chung-Ching Lin, Zicheng Liu, and Lijuan Wang. The dawn of Imms: Preliminary explorations with gpt-4v (isdiction). arXiv preprint arXiv:2309.17421, 2023. 3
|
| 360 |
+
[39] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. 3
|
| 361 |
+
[40] Yuchen Zeng, Wonjun Kang, Yicong Chen, Hyung Il Koo, and Kangwook Lee. Can mllms perform text-to-image in-context learning? arXiv preprint arXiv:2402.01293, 2024. 2, 5, 9
|
| 362 |
+
[41] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 3
|
data/2025/2503_19xxx/2503.19312/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b99f761c0c83e62b320809eadb6e85bd9c8004f98ed4b94c1ee83e0c7086e1bb
|
| 3 |
+
size 691529
|
data/2025/2503_19xxx/2503.19312/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_19xxx/2503.19325/4614b9a9-a2b0-4e28-a1af-5ad0467d40ad_content_list.json
ADDED
|
@@ -0,0 +1,1760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Long-Context Autoregressive Video Modeling with Next-Frame Prediction",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
106,
|
| 8 |
+
65,
|
| 9 |
+
890,
|
| 10 |
+
131
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yuchao Gu, Weijia Mao, Mike Zheng Shou",
|
| 17 |
+
"bbox": [
|
| 18 |
+
323,
|
| 19 |
+
150,
|
| 20 |
+
666,
|
| 21 |
+
167
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—Long-context video modeling is essential for enabling generative models to function as world simulators, as they must maintain temporal coherence over extended time spans. However, most existing models are trained on short clips, limiting their ability to capture long-range dependencies, even with test-time extrapolation. While training directly on long videos is a natural solution, the rapid growth of vision tokens makes it computationally prohibitive. To support exploring efficient long-context video modeling, we first establish a strong autoregressive baseline called Frame AutoRegressive (FAR). FAR models temporal dependencies between continuous frames, converges faster than video diffusion transformers, and outperforms token-level autoregressive models. Based on this baseline, we observe context redundancy in video autoregression. Nearby frames are critical for maintaining temporal consistency, whereas distant frames primarily serve as context memory. To eliminate this redundancy, we propose the long short-term context modeling using asymmetric patchify kernels, which apply large kernels to distant frames to reduce redundant tokens, and standard kernels to local frames to preserve fine-grained detail. This significantly reduces the training cost of long videos. Our method achieves state-of-the-art results on both short and long video generation, providing an effective baseline for long-context autoregressive video modeling. The code is released at https://github.com/showlab/FAR.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
104,
|
| 30 |
+
191,
|
| 31 |
+
892,
|
| 32 |
+
349
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Video Generation, Autoregressive Video Modeling, Diffusion Model.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
104,
|
| 41 |
+
361,
|
| 42 |
+
589,
|
| 43 |
+
376
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "1 INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
73,
|
| 53 |
+
445,
|
| 54 |
+
228,
|
| 55 |
+
459
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Long-context video modeling is essential for advancing video generative models toward real-world simulation [1]. However, current state-of-the-art video generative models (e.g., Wan [2], Cosmos [3]) fall short in this aspect. These models are typically trained on short video clips; for example, Wan and Cosmos are trained on approximately 5-second video segments. These methods effectively capture short-term temporal consistency, such as object or human motion. However, they fail to maintain long-term consistency, like memorizing the observed environments, which requires learning from long video observations.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
71,
|
| 64 |
+
469,
|
| 65 |
+
490,
|
| 66 |
+
631
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Learning from long videos presents inherent challenges, primarily due to the prohibitive computational cost associated with processing the large number of vision tokens. As a result, many previous efforts have focused on test-time long video generation, employing training-free approaches to produce extended video sequences [4], [5]. However, while these methods can generate visually plausible long videos, they fail to effectively leverage long-range context. To truly capture long-range dependencies, it is essential to train or fine-tune models directly on long videos. However, this remains computationally expensive. Concurrent efforts, such as direct long-context tuning [6], face high computational costs, while test-time training [7] typically requires specialized architectural designs and incurs additional inference overhead. Therefore, an efficient framework for long-video training is urgently needed to enable effective long-context video modeling.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
71,
|
| 75 |
+
631,
|
| 76 |
+
490,
|
| 77 |
+
878
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "image",
|
| 83 |
+
"img_path": "images/407f8f881cb01be6f2a10cb4ea3baf51df7e02aa19b3443a7c1f7173421c2185.jpg",
|
| 84 |
+
"image_caption": [
|
| 85 |
+
"Fig. 1: Evaluation on Long Video Prediction. FAR better exploits long video contexts and achieves accurate prediction."
|
| 86 |
+
],
|
| 87 |
+
"image_footnote": [],
|
| 88 |
+
"bbox": [
|
| 89 |
+
509,
|
| 90 |
+
445,
|
| 91 |
+
916,
|
| 92 |
+
676
|
| 93 |
+
],
|
| 94 |
+
"page_idx": 0
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"type": "text",
|
| 98 |
+
"text": "To investigate this problem, we first establish a baseline for video autoregressive modeling, namely the Frame AutoRegressive (FAR) model. Unlike token-based autoregressive models (Token-AR), FAR operates in a continuous latent space, capturing causal dependencies between frames while still allowing full attention modeling within each frame. FAR is trained using a frame-wise flow matching objective with autoregressive context. As a hybrid AR-Diffusion model, FAR also encounters a gap between the observed context during training and inference. This is a common issue in similar models (e.g., [8], [9], [10]). To address this problem, we propose training FAR with stochastic clean context, which enables the model to leverage clean context signals during training and thus reduces bias",
|
| 99 |
+
"bbox": [
|
| 100 |
+
501,
|
| 101 |
+
737,
|
| 102 |
+
921,
|
| 103 |
+
941
|
| 104 |
+
],
|
| 105 |
+
"page_idx": 0
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"type": "page_number",
|
| 109 |
+
"text": "1",
|
| 110 |
+
"bbox": [
|
| 111 |
+
911,
|
| 112 |
+
32,
|
| 113 |
+
919,
|
| 114 |
+
42
|
| 115 |
+
],
|
| 116 |
+
"page_idx": 0
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "aside_text",
|
| 120 |
+
"text": "arXiv:2503.19325v3 [cs.CV] 18 May 2025",
|
| 121 |
+
"bbox": [
|
| 122 |
+
19,
|
| 123 |
+
253,
|
| 124 |
+
58,
|
| 125 |
+
707
|
| 126 |
+
],
|
| 127 |
+
"page_idx": 0
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"type": "page_footnote",
|
| 131 |
+
"text": "- Corresponding author: Mike Zheng Shou (E-mail: mikeshou@nus.edu.sg)",
|
| 132 |
+
"bbox": [
|
| 133 |
+
71,
|
| 134 |
+
900,
|
| 135 |
+
488,
|
| 136 |
+
912
|
| 137 |
+
],
|
| 138 |
+
"page_idx": 0
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"type": "page_footnote",
|
| 142 |
+
"text": "- Yuchao Gu, Weijia Mao and Mike Zheng Shou are with the Showlab, National University of Singapore.",
|
| 143 |
+
"bbox": [
|
| 144 |
+
71,
|
| 145 |
+
912,
|
| 146 |
+
490,
|
| 147 |
+
936
|
| 148 |
+
],
|
| 149 |
+
"page_idx": 0
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"type": "text",
|
| 153 |
+
"text": "at inference time. We demonstrate that FAR trained with stochastic clean context achieves better performance than video diffusion transformers and Token-AR, establishing itself as a strong autoregressive video generation baseline.",
|
| 154 |
+
"bbox": [
|
| 155 |
+
71,
|
| 156 |
+
53,
|
| 157 |
+
491,
|
| 158 |
+
112
|
| 159 |
+
],
|
| 160 |
+
"page_idx": 1
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"type": "text",
|
| 164 |
+
"text": "Building on FAR, we observe context redundancy in video autoregressive modeling. Specifically, the current frame relies more heavily on nearby frames to capture local motion consistency, while distant frames primarily function as context memory. To exploit this property, we introduce long short-term context modeling with asymmetric patchify kernels. In this approach, we apply a large patchify kernel to distant context frames in order to compress redundant tokens, while using the standard patchify kernel on nearby context frames to preserve fine-grained temporal consistency. This method significantly reduces training costs on long videos and leads to notable improvements in FAR's long-context modeling capability, as demonstrated in action-conditioned long video prediction.",
|
| 165 |
+
"bbox": [
|
| 166 |
+
71,
|
| 167 |
+
112,
|
| 168 |
+
491,
|
| 169 |
+
316
|
| 170 |
+
],
|
| 171 |
+
"page_idx": 1
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"type": "text",
|
| 175 |
+
"text": "Our contributions are summarized as follows:",
|
| 176 |
+
"bbox": [
|
| 177 |
+
96,
|
| 178 |
+
316,
|
| 179 |
+
419,
|
| 180 |
+
330
|
| 181 |
+
],
|
| 182 |
+
"page_idx": 1
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"type": "list",
|
| 186 |
+
"sub_type": "text",
|
| 187 |
+
"list_items": [
|
| 188 |
+
"1) We introduce FAR, an strong autoregressive video generation baseline, combined with stochastic clean context to bridge the training-inference gap in observed context.",
|
| 189 |
+
"2) Building on FAR, we observe context redundancy in video autoregressive modeling and propose long short-term context modeling with asymmetric patchify kernels to substantially reduce long-video training costs.",
|
| 190 |
+
"3) FAR achieves state-of-the-art performance in both short- and long-video modeling."
|
| 191 |
+
],
|
| 192 |
+
"bbox": [
|
| 193 |
+
94,
|
| 194 |
+
339,
|
| 195 |
+
491,
|
| 196 |
+
501
|
| 197 |
+
],
|
| 198 |
+
"page_idx": 1
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"type": "text",
|
| 202 |
+
"text": "2 RELATED WORK",
|
| 203 |
+
"text_level": 1,
|
| 204 |
+
"bbox": [
|
| 205 |
+
73,
|
| 206 |
+
522,
|
| 207 |
+
241,
|
| 208 |
+
537
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 1
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "2.1 Video Generation",
|
| 215 |
+
"text_level": 1,
|
| 216 |
+
"bbox": [
|
| 217 |
+
73,
|
| 218 |
+
542,
|
| 219 |
+
245,
|
| 220 |
+
556
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "text",
|
| 226 |
+
"text": "Video Diffusion Models. Recent advances in video generation have led to the scaling of video diffusion transformers [1], [11], [12] for text-to-video generation, resulting in superior visual quality. Pretrained text-to-video models are subsequently fine-tuned to incorporate images as conditions for image-to-video generation [11], [13], [14]. The trained image-to-video models can be utilized for autoregressive long-video generation using a sliding window [4], [15], but their ability to leverage visual context is limited by the sliding window's size. In this work, we show that FAR achieves better convergence than video diffusion transformers for short-video generation while naturally supporting variable-length visual context.",
|
| 227 |
+
"bbox": [
|
| 228 |
+
71,
|
| 229 |
+
561,
|
| 230 |
+
491,
|
| 231 |
+
752
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "Token Autoregressive Models. Video generation based on token autoregressive models (i.e., Token AR) aims to follow the successful paradigm of large language models. These models typically quantize continuous frames into discrete tokens [16], [17] and learn the causal dependencies between tokens using language models [18], [19]. While they achieve plausible performance, their generation quality remains inferior to that of video diffusion transformers due to information loss from vector quantization. Additionally, unidirectional visual token modeling may be suboptimal [20]. Subsequent studies have explored continuous tokens [21] without vector quantization but have not demonstrated their effectiveness in video generation. In this work, we",
|
| 238 |
+
"bbox": [
|
| 239 |
+
71,
|
| 240 |
+
752,
|
| 241 |
+
491,
|
| 242 |
+
941
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "table",
|
| 248 |
+
"img_path": "images/89bb930095678af6ce70bb44d4f06a52e93eeeb73dbc68b815a5cd7534142d47.jpg",
|
| 249 |
+
"table_caption": [
|
| 250 |
+
"TABLE 1: Model Variants of FAR. We follow the model size configurations of DiT [39] and SiT [40]."
|
| 251 |
+
],
|
| 252 |
+
"table_footnote": [],
|
| 253 |
+
"table_body": "<table><tr><td>Models</td><td>#Layers</td><td>Hidden Size</td><td>MLP</td><td>#Heads</td><td>Params</td></tr><tr><td>FAR-B</td><td>12</td><td>768</td><td>3072</td><td>12</td><td>130M</td></tr><tr><td>FAR-M</td><td>12</td><td>1024</td><td>4096</td><td>16</td><td>230M</td></tr><tr><td>FAR-L</td><td>24</td><td>1024</td><td>4096</td><td>16</td><td>457M</td></tr><tr><td>FAR-XL</td><td>28</td><td>1152</td><td>4608</td><td>18</td><td>674M</td></tr><tr><td>FAR-B-Long</td><td>12</td><td>768</td><td>3072</td><td>12</td><td>158M</td></tr><tr><td>FAR-M-Long</td><td>12</td><td>1024</td><td>4096</td><td>16</td><td>280M</td></tr></table>",
|
| 254 |
+
"bbox": [
|
| 255 |
+
506,
|
| 256 |
+
79,
|
| 257 |
+
923,
|
| 258 |
+
178
|
| 259 |
+
],
|
| 260 |
+
"page_idx": 1
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"type": "text",
|
| 264 |
+
"text": "show that FAR can learn causal dependencies from continuous frames and achieve better performance than Token AR in both short- and long-video modeling.",
|
| 265 |
+
"bbox": [
|
| 266 |
+
501,
|
| 267 |
+
203,
|
| 268 |
+
921,
|
| 269 |
+
247
|
| 270 |
+
],
|
| 271 |
+
"page_idx": 1
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"type": "text",
|
| 275 |
+
"text": "Hybrid AR-Diffusion Models. To leverage the strengths of both continuous latent spaces and autoregressive modeling, recent studies [10], [22], [23] have explored hybrid AR-Diffusion models. These models typically employ a diffusion objective for image-level modeling with autoregressive contexts. Hybrid AR-Diffusion models are widely applicable to both visual [8]–[10] and language generation [24], [25]. Recent research has also applied it in frame-level autoregressive modeling [8], [9] for video generation. However, they suffer from a training-inference discrepancy in the observed context. Some studies [26], [27] have attempted to mitigate this issue by maintaining a clean copy of the noised sequence during training, but this approach doubles the training cost. Among these methods, FAR efficiently addresses the training-inference gap through the proposed stochastic clean context, demonstrating its superior performance in long-context video modeling.",
|
| 276 |
+
"bbox": [
|
| 277 |
+
501,
|
| 278 |
+
247,
|
| 279 |
+
923,
|
| 280 |
+
496
|
| 281 |
+
],
|
| 282 |
+
"page_idx": 1
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"type": "text",
|
| 286 |
+
"text": "2.2 Long-Context Language Modeling",
|
| 287 |
+
"text_level": 1,
|
| 288 |
+
"bbox": [
|
| 289 |
+
504,
|
| 290 |
+
516,
|
| 291 |
+
799,
|
| 292 |
+
532
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 1
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "Long-context language modeling typically follows two main approaches: test-time extrapolation and direct fine-tuning on long sequences. In the test-time extrapolation setting, many studies explore extrapolatable positional embeddings [28]–[30]. While these methods enable inference on longer sequences, they often underperform compared to models trained directly on long-text corpora. To reduce the computational cost, recent work [31], [32] has proposed efficient long-sequence fine-tuning strategies for large language models. However, training on long video sequences poses greater challenges, as vision tokens grow much faster than language tokens with increasing context length. To address this, we propose a long short-term context modeling approach using asymmetric patchify kernels, which effectively reduce context redundancy during long-video training.",
|
| 299 |
+
"bbox": [
|
| 300 |
+
501,
|
| 301 |
+
536,
|
| 302 |
+
923,
|
| 303 |
+
757
|
| 304 |
+
],
|
| 305 |
+
"page_idx": 1
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"type": "text",
|
| 309 |
+
"text": "2.3 Long-Context Video Modeling",
|
| 310 |
+
"text_level": 1,
|
| 311 |
+
"bbox": [
|
| 312 |
+
504,
|
| 313 |
+
776,
|
| 314 |
+
767,
|
| 315 |
+
792
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 1
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "Recent advancements in video generation models have enabled their use as interactive world simulators [33]–[35], which require the ability to exploit long-range context and memorize the observed environment. However, existing video diffusion transformers lack effective mechanism to utilize long-range context. Although early work [36]–[38] has explored long-video prediction, it has been limited in visual quality and long-range consistency. In this work, we introduce FAR, a efficient framework for long-context autoregressive video modeling.",
|
| 322 |
+
"bbox": [
|
| 323 |
+
501,
|
| 324 |
+
796,
|
| 325 |
+
923,
|
| 326 |
+
944
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 1
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "page_number",
|
| 332 |
+
"text": "2",
|
| 333 |
+
"bbox": [
|
| 334 |
+
911,
|
| 335 |
+
32,
|
| 336 |
+
921,
|
| 337 |
+
42
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 1
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "image",
|
| 343 |
+
"img_path": "images/2cc9046ca16b42232bbbc5c8698c2029abf6a78515dd071c251e129ab937afce.jpg",
|
| 344 |
+
"image_caption": [
|
| 345 |
+
"Fig. 2: Illustration of FAR's Training and Inference Pipeline. In short-video training, a portion of frames is randomly replaced with clean context frames, marked with a unique timestep embedding (e.g., -1) beyond the flow-matching scheduler. In long-video training, we adopt long short-term context modeling. A long-term context window with aggressive patchification is adopted to reduce redundant vision tokens, while a short-term context window is used to model fine-grained temporal consistency."
|
| 346 |
+
],
|
| 347 |
+
"image_footnote": [],
|
| 348 |
+
"bbox": [
|
| 349 |
+
76,
|
| 350 |
+
55,
|
| 351 |
+
919,
|
| 352 |
+
290
|
| 353 |
+
],
|
| 354 |
+
"page_idx": 2
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"type": "image",
|
| 358 |
+
"img_path": "images/254d4a4e258f17265e5082c5b5307e499cd02df0b57a2bcf8b83c7c845eb8f7b.jpg",
|
| 359 |
+
"image_caption": [
|
| 360 |
+
"(a) Short-Video Training"
|
| 361 |
+
],
|
| 362 |
+
"image_footnote": [],
|
| 363 |
+
"bbox": [
|
| 364 |
+
75,
|
| 365 |
+
381,
|
| 366 |
+
272,
|
| 367 |
+
535
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 2
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "image",
|
| 373 |
+
"img_path": "images/eb548ba01705d7659df2974f82b7d6edfa09881c002719e3f9fd85aeb3258de7.jpg",
|
| 374 |
+
"image_caption": [
|
| 375 |
+
"(b) Long-Context Training"
|
| 376 |
+
],
|
| 377 |
+
"image_footnote": [],
|
| 378 |
+
"bbox": [
|
| 379 |
+
295,
|
| 380 |
+
382,
|
| 381 |
+
488,
|
| 382 |
+
535
|
| 383 |
+
],
|
| 384 |
+
"page_idx": 2
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"type": "text",
|
| 388 |
+
"text": "3 PRELIMINARY",
|
| 389 |
+
"text_level": 1,
|
| 390 |
+
"bbox": [
|
| 391 |
+
73,
|
| 392 |
+
631,
|
| 393 |
+
218,
|
| 394 |
+
645
|
| 395 |
+
],
|
| 396 |
+
"page_idx": 2
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"type": "text",
|
| 400 |
+
"text": "3.1 Flow Matching",
|
| 401 |
+
"text_level": 1,
|
| 402 |
+
"bbox": [
|
| 403 |
+
71,
|
| 404 |
+
656,
|
| 405 |
+
223,
|
| 406 |
+
671
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 2
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "Flow Matching [41]–[43] is a simple alternative objective for training diffusion models. Rather than modeling the reverse process with stochastic differential equations, Flow Matching learns a continuous vector field that deterministically connect two distribution.",
|
| 413 |
+
"bbox": [
|
| 414 |
+
71,
|
| 415 |
+
676,
|
| 416 |
+
490,
|
| 417 |
+
750
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 2
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "text",
|
| 423 |
+
"text": "Specifically, given a data sample $x_0 \\sim p_{\\mathrm{data}}(x)$ and a noise sample $x_1 \\sim \\mathcal{N}(0, I)$ , we construct a continuous trajectory connecting them via linear interpolation:",
|
| 424 |
+
"bbox": [
|
| 425 |
+
71,
|
| 426 |
+
751,
|
| 427 |
+
488,
|
| 428 |
+
796
|
| 429 |
+
],
|
| 430 |
+
"page_idx": 2
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"type": "equation",
|
| 434 |
+
"text": "\n$$\nx (t) = (1 - t) x _ {0} + t x _ {1}, \\quad t \\in [ 0, 1 ]. \\tag {1}\n$$\n",
|
| 435 |
+
"text_format": "latex",
|
| 436 |
+
"bbox": [
|
| 437 |
+
158,
|
| 438 |
+
806,
|
| 439 |
+
488,
|
| 440 |
+
824
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 2
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "text",
|
| 446 |
+
"text": "This formulation implies a constant velocity:",
|
| 447 |
+
"bbox": [
|
| 448 |
+
73,
|
| 449 |
+
834,
|
| 450 |
+
387,
|
| 451 |
+
849
|
| 452 |
+
],
|
| 453 |
+
"page_idx": 2
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"type": "equation",
|
| 457 |
+
"text": "\n$$\n\\frac {d x (t)}{d t} = v ^ {*} = x _ {1} - x _ {0}. \\tag {2}\n$$\n",
|
| 458 |
+
"text_format": "latex",
|
| 459 |
+
"bbox": [
|
| 460 |
+
200,
|
| 461 |
+
859,
|
| 462 |
+
488,
|
| 463 |
+
888
|
| 464 |
+
],
|
| 465 |
+
"page_idx": 2
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"text": "To enable the model to learn the optimal transport between the data and noise distributions, we introduce a learnable time-dependent velocity field $v_{\\theta}(x,t)$ . During training, a",
|
| 470 |
+
"bbox": [
|
| 471 |
+
71,
|
| 472 |
+
898,
|
| 473 |
+
491,
|
| 474 |
+
944
|
| 475 |
+
],
|
| 476 |
+
"page_idx": 2
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"type": "image",
|
| 480 |
+
"img_path": "images/5dc2bddfca9c5824b2994683fc34d35ac8e76ae47ba5f49fcee40fa0094a44ed.jpg",
|
| 481 |
+
"image_caption": [
|
| 482 |
+
"(a) FAR without Stochastic Clean Context"
|
| 483 |
+
],
|
| 484 |
+
"image_footnote": [],
|
| 485 |
+
"bbox": [
|
| 486 |
+
514,
|
| 487 |
+
382,
|
| 488 |
+
921,
|
| 489 |
+
472
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 2
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "image",
|
| 495 |
+
"img_path": "images/3c397b2a61d17cb680ab4fc9396dee0477bfe989d184c7908fccbdd6357cb840.jpg",
|
| 496 |
+
"image_caption": [
|
| 497 |
+
"Fig. 3: Visualization of Attention Mask. FAR enables full attention within a frame while maintaining causality at the frame level. In long-context training, we adopt aggressive patchification for long-term context frames to reduce tokens.",
|
| 498 |
+
"(b) FAR with Stochastic Clean Context (Ours)",
|
| 499 |
+
"Fig. 4: Effect of Stochastic Clean Context. This technique eliminate training-inference gap in observed context."
|
| 500 |
+
],
|
| 501 |
+
"image_footnote": [],
|
| 502 |
+
"bbox": [
|
| 503 |
+
514,
|
| 504 |
+
484,
|
| 505 |
+
921,
|
| 506 |
+
563
|
| 507 |
+
],
|
| 508 |
+
"page_idx": 2
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"type": "text",
|
| 512 |
+
"text": "random time $t \\sim U(0,1)$ is sampled, and the model is optimized by minimizing the following objective:",
|
| 513 |
+
"bbox": [
|
| 514 |
+
503,
|
| 515 |
+
628,
|
| 516 |
+
921,
|
| 517 |
+
659
|
| 518 |
+
],
|
| 519 |
+
"page_idx": 2
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"type": "equation",
|
| 523 |
+
"text": "\n$$\n\\mathcal {L} (\\theta) = \\mathbb {E} _ {x _ {0}, x _ {1}, t} \\left[ \\| v _ {\\theta} (x (t), t) - v ^ {*} \\| ^ {2} \\right]. \\tag {3}\n$$\n",
|
| 524 |
+
"text_format": "latex",
|
| 525 |
+
"bbox": [
|
| 526 |
+
581,
|
| 527 |
+
667,
|
| 528 |
+
921,
|
| 529 |
+
693
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 2
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "text",
|
| 535 |
+
"text": "3.2 Autoregressive Models",
|
| 536 |
+
"text_level": 1,
|
| 537 |
+
"bbox": [
|
| 538 |
+
504,
|
| 539 |
+
715,
|
| 540 |
+
718,
|
| 541 |
+
731
|
| 542 |
+
],
|
| 543 |
+
"page_idx": 2
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"type": "text",
|
| 547 |
+
"text": "Autoregressive models are a class of probabilistic models where each element in a sequence is conditioned on its preceding elements, denote as context. Formally, given a sequence of tokens $(x_{1},x_{2},\\ldots ,x_{n})$ , an autoregressive model assumes that each token $x_{i}$ is generated based on its previous tokens $(x_{1},x_{2},\\ldots ,x_{i - 1})$ . The generative process can be expressed as a factorization of the joint probability:",
|
| 548 |
+
"bbox": [
|
| 549 |
+
501,
|
| 550 |
+
738,
|
| 551 |
+
921,
|
| 552 |
+
842
|
| 553 |
+
],
|
| 554 |
+
"page_idx": 2
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"type": "equation",
|
| 558 |
+
"text": "\n$$\np \\left(x _ {1}, x _ {2}, \\dots , x _ {n}\\right) = \\prod_ {i = 1} ^ {n} p \\left(x _ {i} \\mid x _ {1}, x _ {2}, \\dots , x _ {i - 1}\\right). \\tag {4}\n$$\n",
|
| 559 |
+
"text_format": "latex",
|
| 560 |
+
"bbox": [
|
| 561 |
+
552,
|
| 562 |
+
851,
|
| 563 |
+
921,
|
| 564 |
+
888
|
| 565 |
+
],
|
| 566 |
+
"page_idx": 2
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"text": "By modeling each token conditioned on its preceding tokens, autoregressive models naturally capture the sequential dependencies inherent in data.",
|
| 571 |
+
"bbox": [
|
| 572 |
+
503,
|
| 573 |
+
898,
|
| 574 |
+
921,
|
| 575 |
+
941
|
| 576 |
+
],
|
| 577 |
+
"page_idx": 2
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "page_number",
|
| 581 |
+
"text": "3",
|
| 582 |
+
"bbox": [
|
| 583 |
+
911,
|
| 584 |
+
32,
|
| 585 |
+
921,
|
| 586 |
+
42
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 2
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "image",
|
| 592 |
+
"img_path": "images/ec873bf468a87a77ff90e21ae5ea32c0dfe51542d1b459c00095bd98d6467055.jpg",
|
| 593 |
+
"image_caption": [
|
| 594 |
+
"Fig. 5: Comparison of FAR and video diffusion transformer. FAR achieves better convergence than video diffusion transformer in unconditional video generation on UCF-101."
|
| 595 |
+
],
|
| 596 |
+
"image_footnote": [],
|
| 597 |
+
"bbox": [
|
| 598 |
+
81,
|
| 599 |
+
54,
|
| 600 |
+
486,
|
| 601 |
+
244
|
| 602 |
+
],
|
| 603 |
+
"page_idx": 3
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"text": "4 FAR",
|
| 608 |
+
"text_level": 1,
|
| 609 |
+
"bbox": [
|
| 610 |
+
73,
|
| 611 |
+
319,
|
| 612 |
+
148,
|
| 613 |
+
335
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 3
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "We first present the FAR framework in Sec. 4.1, followed by training challenges and solutions in Sec. 4.2. Sec. 4.3 analyzes the design for long-context video modeling, and Sec. 4.4 introduces the KV cache for faster inference.",
|
| 620 |
+
"bbox": [
|
| 621 |
+
71,
|
| 622 |
+
339,
|
| 623 |
+
491,
|
| 624 |
+
398
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 3
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "4.1 Framework Overview",
|
| 631 |
+
"text_level": 1,
|
| 632 |
+
"bbox": [
|
| 633 |
+
73,
|
| 634 |
+
411,
|
| 635 |
+
272,
|
| 636 |
+
425
|
| 637 |
+
],
|
| 638 |
+
"page_idx": 3
|
| 639 |
+
},
|
| 640 |
+
{
|
| 641 |
+
"type": "text",
|
| 642 |
+
"text": "Architecture. As shown in Fig. 2 (a), FAR is built upon the diffusion transformer [39], [40]. We adopt the model configuration of DiT [39] and Latte [44], as listed in Table. 1. The key architectural difference between FAR and video diffusion transformers (e.g., Latte [44]) lies in the attention mechanism. As shown in Fig. 3(a), for each frame, we apply causal attention at the frame level while maintaining full attention within each frame. We adopt this causal spatiotemporal attention for all layers, instead of the interleaved spatial and temporal attention used in Latte. In FAR, image generation and image-conditioned video generation are jointly learned thanks to the causal mask, whereas video diffusion transformer [44] requires additional image-video co-training.",
|
| 643 |
+
"bbox": [
|
| 644 |
+
71,
|
| 645 |
+
429,
|
| 646 |
+
490,
|
| 647 |
+
633
|
| 648 |
+
],
|
| 649 |
+
"page_idx": 3
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "text",
|
| 653 |
+
"text": "Basic Training Pipeline. The training pipeline of FAR is illustrated in Fig. 2 (a). Given a video sequence $\\mathbf{X}$ , we first employ a pretrained VAE to compress it into the latent space $\\mathbf{Z} \\in \\mathbb{R}^{T \\times H \\times W}$ , where $T$ , $H$ , and $W$ denote the number of frames, height, and width of the latent features, respectively. Note that although we primarily adopt an image VAE in this work, FAR can also be trained with a video VAE since our autoregressive unit is the latent frame. Following diffusion forcing [8], we independently sample a timestep for each frame. We then interpolate between the clean latent and the sampled noise using Eq. Eq. 1 and apply the framework flow matching objective in Eq. Eq. 3 for learning. The key difference between FAR and image flow matching lies in that we adopt causal spatiotemporal attention, allowing each frame to access previous context frames during denoising.",
|
| 654 |
+
"bbox": [
|
| 655 |
+
71,
|
| 656 |
+
633,
|
| 657 |
+
491,
|
| 658 |
+
867
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 3
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "4.2 Short-Video Modeling",
|
| 665 |
+
"text_level": 1,
|
| 666 |
+
"bbox": [
|
| 667 |
+
73,
|
| 668 |
+
878,
|
| 669 |
+
277,
|
| 670 |
+
895
|
| 671 |
+
],
|
| 672 |
+
"page_idx": 3
|
| 673 |
+
},
|
| 674 |
+
{
|
| 675 |
+
"type": "text",
|
| 676 |
+
"text": "Training-Inference Gap in Observed Context. As a hybrid AR-diffusion model, FAR also encounters a training-inference gap in the observed context. As illustrated in",
|
| 677 |
+
"bbox": [
|
| 678 |
+
71,
|
| 679 |
+
898,
|
| 680 |
+
491,
|
| 681 |
+
944
|
| 682 |
+
],
|
| 683 |
+
"page_idx": 3
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"type": "table",
|
| 687 |
+
"img_path": "images/f25aa82351d8eb1aa5b2574998ad4c950d6f5dfdca988012699df81f82db0d50.jpg",
|
| 688 |
+
"table_caption": [
|
| 689 |
+
"TABLE 2: Comparison of Routes for Long-Context Video Modeling. In test-time extrapolation, FAR is trained on short videos and evaluated on long videos using different extrapolation methods."
|
| 690 |
+
],
|
| 691 |
+
"table_footnote": [],
|
| 692 |
+
"table_body": "<table><tr><td>Method</td><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td colspan=\"5\">Test-Time Extrapolation</td></tr><tr><td>Sliding Window</td><td>0.365</td><td>12.3</td><td>0.415</td><td>161</td></tr><tr><td>Naive RoPE Ext.</td><td>0.372</td><td>12.2</td><td>0.397</td><td>396</td></tr><tr><td>RIFLEX [5]</td><td>0.372</td><td>12.2</td><td>0.398</td><td>391</td></tr><tr><td colspan=\"5\">Long-Video Training</td></tr><tr><td>FAR-B-Long</td><td>0.576</td><td>19.3</td><td>0.153</td><td>34</td></tr></table>",
|
| 693 |
+
"bbox": [
|
| 694 |
+
509,
|
| 695 |
+
108,
|
| 696 |
+
919,
|
| 697 |
+
243
|
| 698 |
+
],
|
| 699 |
+
"page_idx": 3
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"type": "text",
|
| 703 |
+
"text": "Fig. 2(a), each clean latent is fused with sampled noise for the flow matching objective, as defined in Eq. Eq. 1. Consequently, later frames can only access the noised version of previous frames during training. However, during inference, this leads to a distribution shift when clean context frames is used.",
|
| 704 |
+
"bbox": [
|
| 705 |
+
501,
|
| 706 |
+
265,
|
| 707 |
+
923,
|
| 708 |
+
352
|
| 709 |
+
],
|
| 710 |
+
"page_idx": 3
|
| 711 |
+
},
|
| 712 |
+
{
|
| 713 |
+
"type": "text",
|
| 714 |
+
"text": "As shown in the example in Fig. 4(a), the training-inference gap in the observed context leads to a distribution shift when inferring with a clean context. Although adding mild noise to the context during inference can help mitigate this effect, it still causes low-level flickering, degrading the quality of the generated video. Recent works [26], [27] attempt to address this issue by maintaining a clean copy of the noised sequence during training. However, this approach doubles the training costs.",
|
| 715 |
+
"bbox": [
|
| 716 |
+
501,
|
| 717 |
+
353,
|
| 718 |
+
921,
|
| 719 |
+
484
|
| 720 |
+
],
|
| 721 |
+
"page_idx": 3
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"type": "text",
|
| 725 |
+
"text": "Our Solution: Stochastic Clean Context. To bridge the gap in observed context, we introduce stochastic clean context for training FAR. As illustrated in Fig. 2(a), we randomly replace a portion of the noised frames with their corresponding clean context and assign them a unique timestep embedding (e.g., -1) beyond the flow-matching timestep scheduler. These clean context frames are excluded from loss computation and are implicitly learned through later frames that use them as context. During inference, this unique timestep embedding guides the model to use clean context effectively. Training FAR with stochastic clean context does not add extra computation and does not conflict with different timestep sampling strategies during training (e.g., logit-normal sampling [45]). It effectively resolves the training-inference discrepancy, as exemplified in Fig. 4(b).",
|
| 726 |
+
"bbox": [
|
| 727 |
+
501,
|
| 728 |
+
484,
|
| 729 |
+
921,
|
| 730 |
+
702
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 3
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "text",
|
| 736 |
+
"text": "FAR vs. Video Diffusion Transformer. FAR and video diffusion transformer differ only in their training schemes. FAR is trained with independent noise and causal attention, while the video diffusion transformer is trained with uniform noise and full attention. This raises an interesting question: Can FAR surpass video diffusion transformers? To explore this, we convert FAR to video diffusion transformer as a baseline, denoted as Video DiT. We align the training settings to compare the two paradigms. As shown in Fig. 5, FAR achieves better convergence than the Video DiT, demonstrating its potential to become a strong baseline for autoregressive video modeling.",
|
| 737 |
+
"bbox": [
|
| 738 |
+
501,
|
| 739 |
+
702,
|
| 740 |
+
921,
|
| 741 |
+
878
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 3
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "text",
|
| 747 |
+
"text": "4.3 Long-Context Video Modeling",
|
| 748 |
+
"text_level": 1,
|
| 749 |
+
"bbox": [
|
| 750 |
+
504,
|
| 751 |
+
893,
|
| 752 |
+
767,
|
| 753 |
+
909
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 3
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "Test-Time Extrapolation vs. Long-Video Training. Two promising approaches to achieving long-context modeling",
|
| 760 |
+
"bbox": [
|
| 761 |
+
503,
|
| 762 |
+
912,
|
| 763 |
+
923,
|
| 764 |
+
944
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 3
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "page_number",
|
| 770 |
+
"text": "4",
|
| 771 |
+
"bbox": [
|
| 772 |
+
911,
|
| 773 |
+
32,
|
| 774 |
+
921,
|
| 775 |
+
42
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 3
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "image",
|
| 781 |
+
"img_path": "images/1a013fee8024c26ba299fdc759acfdaec5a22b3233843d39c08ed8a1f6211c5b.jpg",
|
| 782 |
+
"image_caption": [
|
| 783 |
+
"(a) Token Length Comparison"
|
| 784 |
+
],
|
| 785 |
+
"image_footnote": [],
|
| 786 |
+
"bbox": [
|
| 787 |
+
81,
|
| 788 |
+
54,
|
| 789 |
+
357,
|
| 790 |
+
213
|
| 791 |
+
],
|
| 792 |
+
"page_idx": 4
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "image",
|
| 796 |
+
"img_path": "images/0757a8d6f34cc849afcc7a3466fc1c4cb29e1319ac291d670b03d924b87893e9.jpg",
|
| 797 |
+
"image_caption": [
|
| 798 |
+
"(b) Training Time Comparison",
|
| 799 |
+
"Fig. 6: Relation between Token Context Length and Vision Context Length. With the proposed long short-term context modeling, the token context length scales more slowly with increasing vision context length compared to uniform context modeling. When training on long videos, the reduced number of tokens leads to significantly lower training costs and memory usage."
|
| 800 |
+
],
|
| 801 |
+
"image_footnote": [],
|
| 802 |
+
"bbox": [
|
| 803 |
+
364,
|
| 804 |
+
55,
|
| 805 |
+
638,
|
| 806 |
+
213
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 4
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "image",
|
| 812 |
+
"img_path": "images/9ff43a3908da61f011d85e8d969511025a947690847fa33a7f7c7a2ee68463d3.jpg",
|
| 813 |
+
"image_caption": [
|
| 814 |
+
"(c) Training Memory Comparison"
|
| 815 |
+
],
|
| 816 |
+
"image_footnote": [],
|
| 817 |
+
"bbox": [
|
| 818 |
+
643,
|
| 819 |
+
55,
|
| 820 |
+
916,
|
| 821 |
+
213
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 4
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "image",
|
| 827 |
+
"img_path": "images/3173cd3ff2bbc61ff58061cce68353c49c78a5e32045ff2e31b65f676f1af221.jpg",
|
| 828 |
+
"image_caption": [
|
| 829 |
+
"Fig. 7: KV Cache for Short-Video Modeling in FAR. We additionally add a caching step to encode current decoded frame into the KV cache for autoregressive generation."
|
| 830 |
+
],
|
| 831 |
+
"image_footnote": [],
|
| 832 |
+
"bbox": [
|
| 833 |
+
88,
|
| 834 |
+
311,
|
| 835 |
+
485,
|
| 836 |
+
489
|
| 837 |
+
],
|
| 838 |
+
"page_idx": 4
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "text",
|
| 842 |
+
"text": "in language modeling are test-time extrapolation [29], [30], [46] and long-sequence fine-tuning [31], [32]. In video modeling, most efforts [4], [5], [15] have focused on test-time extrapolation to generate long videos. However, we question whether test-time extrapolation can effectively solve long-context video modeling. To investigate this, we train FAR on action-conditioned short-video prediction and extend it to long-video prediction using various extrapolation techniques. As summarized in Table. 2, test-time extrapolation results in significantly lower quality than the baseline sliding window approach, leading to poor predictions. Therefore, direct training on long videos may be necessary to achieve effective long-context video modeling.",
|
| 843 |
+
"bbox": [
|
| 844 |
+
71,
|
| 845 |
+
560,
|
| 846 |
+
490,
|
| 847 |
+
750
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 4
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "text",
|
| 853 |
+
"text": "Explosive Token Growth in Long Video Training. Visual data contains redundancy, causing vision tokens to expand much faster than language tokens as context increases. For example, a video sequence of 128 frames requires more than 8K tokens, as illustrated in Fig. 6(a). Consequently, training on long videos becomes computationally prohibitive, as shown in Fig. 6(b, c).",
|
| 854 |
+
"bbox": [
|
| 855 |
+
71,
|
| 856 |
+
751,
|
| 857 |
+
488,
|
| 858 |
+
853
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 4
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "text",
|
| 864 |
+
"text": "Our Solution: Long Short-Term Context Modeling. To address explosive token growth in long-video training, we leverage the concept of context redundancy. Specifically, in video autoregression, the current frame depends more heavily on nearby frames to capture local motion consistency, whereas earlier frames primarily serve as context memory.",
|
| 865 |
+
"bbox": [
|
| 866 |
+
71,
|
| 867 |
+
854,
|
| 868 |
+
490,
|
| 869 |
+
943
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 4
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "text",
|
| 875 |
+
"text": "To eliminate context redundancy, we propose a long short-term context modeling with asymmetric patchify kernels. As shown in Fig. 2(b), we maintain a high-resolution short-term context window to capture fine-grained temporal consistency, and a low-resolution long-term context window, where we apply a large patchify kernel to compress context tokens. During training, given that the data has a maximum sequence length of $m$ frames, we fix the short-term context window to $n$ frames and randomly sample the long-term context frames from the range $[0, m - n]$ . The attention mask with long short-term context modeling is shown in Fig. 3(b), where the long-term context uses fewer tokens. As demonstrated in Fig. 6(a), this strategy ensures that increasing the vision context length maintains a manageable token context length. With long short-term context modeling, we can reduce the cost and memory usage of the long-video training significantly, as shown in Fig. 6(b, c). To prevent interference between long-term and short-term contexts, we adopt separate projection layers for each context, inspired by MM-DiT [45]. This approach results in a slightly larger number of parameters, referred to as FAR-Long in Table. 1.",
|
| 876 |
+
"bbox": [
|
| 877 |
+
501,
|
| 878 |
+
308,
|
| 879 |
+
921,
|
| 880 |
+
614
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 4
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "text",
|
| 886 |
+
"text": "4.4 Inference-Time KV Cache",
|
| 887 |
+
"text_level": 1,
|
| 888 |
+
"bbox": [
|
| 889 |
+
504,
|
| 890 |
+
632,
|
| 891 |
+
733,
|
| 892 |
+
645
|
| 893 |
+
],
|
| 894 |
+
"page_idx": 4
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"type": "text",
|
| 898 |
+
"text": "KV Cache for Short-Video Modeling. Due to the autoregressive nature of FAR, we can leverage KV-Cache to accelerate inference. As illustrated in Fig. 7, for each frame, we first use the flow-matching schedule to decode it into the clean latent frame. We then introduce an additional caching step to encode the clean latent frame into the KV cache. As discussed in Sec. 4.2, we use timestep $t = -1$ to denote the clean context frame in the caching step. These KV caches are subsequently used for autoregressive decoding of future frames.",
|
| 899 |
+
"bbox": [
|
| 900 |
+
501,
|
| 901 |
+
650,
|
| 902 |
+
921,
|
| 903 |
+
795
|
| 904 |
+
],
|
| 905 |
+
"page_idx": 4
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"type": "text",
|
| 909 |
+
"text": "Multi-Level KV Cache for Long-Video Modeling. In long-context video modeling, we employ long short-term context to reduce redundant visual tokens. To accommodate this, we introduce a multi-level KV cache. As illustrated in Fig. 8, the frames in long-term context window is encoded into L2 cache (4 tokens per frame), while the frames in short-term context window is encoded into L1 cache. When decoding current frame but exceed the short-term context window, the earliest frame in the short-term context window is moved to the long-term context window and encode it into the L2",
|
| 910 |
+
"bbox": [
|
| 911 |
+
501,
|
| 912 |
+
796,
|
| 913 |
+
921,
|
| 914 |
+
941
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "page_number",
|
| 920 |
+
"text": "5",
|
| 921 |
+
"bbox": [
|
| 922 |
+
911,
|
| 923 |
+
32,
|
| 924 |
+
921,
|
| 925 |
+
42
|
| 926 |
+
],
|
| 927 |
+
"page_idx": 4
|
| 928 |
+
},
|
| 929 |
+
{
|
| 930 |
+
"type": "image",
|
| 931 |
+
"img_path": "images/4f64ca6f3f36efa494bea69bee67e46ba0961b8e68256c9b5df99b1e19ff8648.jpg",
|
| 932 |
+
"image_caption": [
|
| 933 |
+
"Fig. 8: Multi-Level KV Cache for Long-Context Video Modeling in FAR. When a frame leaves the short-term context window, we encode it to the L2 cache and re-encode the L1 cache in the window. We then use those encoded KV cache for decoding the current frame. Note that we divide the process into three steps for better illustration, though it can be merged into a single forward pass in implementation."
|
| 934 |
+
],
|
| 935 |
+
"image_footnote": [],
|
| 936 |
+
"bbox": [
|
| 937 |
+
83,
|
| 938 |
+
55,
|
| 939 |
+
367,
|
| 940 |
+
236
|
| 941 |
+
],
|
| 942 |
+
"page_idx": 5
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "image",
|
| 946 |
+
"img_path": "images/15ca0adaf530399690bf4c9e1102d00dee7629d414819f897867483f14e5889f.jpg",
|
| 947 |
+
"image_caption": [],
|
| 948 |
+
"image_footnote": [],
|
| 949 |
+
"bbox": [
|
| 950 |
+
367,
|
| 951 |
+
55,
|
| 952 |
+
638,
|
| 953 |
+
234
|
| 954 |
+
],
|
| 955 |
+
"page_idx": 5
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "image",
|
| 959 |
+
"img_path": "images/c52abf194777d4310babf31d8c4d65c4263b48586d4f0c09529716ecb2c78e00.jpg",
|
| 960 |
+
"image_caption": [],
|
| 961 |
+
"image_footnote": [],
|
| 962 |
+
"bbox": [
|
| 963 |
+
638,
|
| 964 |
+
55,
|
| 965 |
+
916,
|
| 966 |
+
236
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 5
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "table",
|
| 972 |
+
"img_path": "images/fe72f5d1b03b31b93b22535da0e568cfebe24430d8d575979fc2304813ff6916.jpg",
|
| 973 |
+
"table_caption": [
|
| 974 |
+
"TABLE 3: Quantitative Comparison of Conditional and Unconditional Video Generation on UCF-101. We follow the evaluation setup of Latte [44]. † denotes FVD reported on 10,000 videos."
|
| 975 |
+
],
|
| 976 |
+
"table_footnote": [],
|
| 977 |
+
"table_body": "<table><tr><td>Methods</td><td>Type</td><td>Params</td><td>Double Train Cost</td><td>Cond. Gen FVD2048 ↓</td><td>Uncond. Gen FVD2048 ↓</td></tr><tr><td colspan=\"6\">Resolution-128×128</td></tr><tr><td>MAGVITv2-MLM [16]</td><td>Non-AR</td><td>307 M</td><td>X</td><td>58†</td><td>-</td></tr><tr><td>MAGVITv2-AR [16]</td><td>Token-AR</td><td>840 M</td><td>X</td><td>109†</td><td>-</td></tr><tr><td>TATS [48]</td><td>Token-AR</td><td>331 M</td><td>X</td><td>332</td><td>420</td></tr><tr><td>FAR-L (Ours)</td><td>Frame-AR</td><td>457 M</td><td>X</td><td>99 (57†)</td><td>280</td></tr><tr><td colspan=\"6\">Resolution-256×256</td></tr><tr><td>LVDM [49]</td><td>Video-DiT</td><td>437 M</td><td>X</td><td>-</td><td>372</td></tr><tr><td>Latte [44]</td><td>Video-DiT</td><td>674 M</td><td>X</td><td>-</td><td>478</td></tr><tr><td>CogVideo [19]</td><td>Token-AR</td><td>9.4 B</td><td>X</td><td>626</td><td>-</td></tr><tr><td>OmniTokenizer [50]</td><td>Token-AR</td><td>650 M</td><td>X</td><td>191</td><td>-</td></tr><tr><td>ACDIT [26]</td><td>Frame-AR</td><td>677 M</td><td>✓</td><td>111</td><td>-</td></tr><tr><td>MAGI [27]</td><td>Frame-AR</td><td>850 M</td><td>✓</td><td>-</td><td>421</td></tr><tr><td>FAR-L (Ours)</td><td>Frame-AR</td><td>457 M</td><td>X</td><td>113</td><td>303</td></tr><tr><td>FAR-XL (Ours)</td><td>Frame-AR</td><td>674 M</td><td>X</td><td>108</td><td>279</td></tr></table>",
|
| 978 |
+
"bbox": [
|
| 979 |
+
73,
|
| 980 |
+
367,
|
| 981 |
+
488,
|
| 982 |
+
545
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 5
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "text",
|
| 988 |
+
"text": "cache. Since this modifies the cache state, we subsequently re-encode the L1 cache of the frames in the short-term context window. The encoded cache is then used to decode the current frame. Note that in practice, these three steps can be merged into a single forward pass for efficiency.",
|
| 989 |
+
"bbox": [
|
| 990 |
+
71,
|
| 991 |
+
551,
|
| 992 |
+
490,
|
| 993 |
+
625
|
| 994 |
+
],
|
| 995 |
+
"page_idx": 5
|
| 996 |
+
},
|
| 997 |
+
{
|
| 998 |
+
"type": "text",
|
| 999 |
+
"text": "5 EXPERIMENT",
|
| 1000 |
+
"text_level": 1,
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
73,
|
| 1003 |
+
643,
|
| 1004 |
+
212,
|
| 1005 |
+
659
|
| 1006 |
+
],
|
| 1007 |
+
"page_idx": 5
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"type": "text",
|
| 1011 |
+
"text": "5.1 Implementation Details",
|
| 1012 |
+
"text_level": 1,
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
71,
|
| 1015 |
+
664,
|
| 1016 |
+
285,
|
| 1017 |
+
679
|
| 1018 |
+
],
|
| 1019 |
+
"page_idx": 5
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "text",
|
| 1023 |
+
"text": "We follow the DiT's structure [39] to implement FAR. To compress video latents, we train a series of image DC-AE [47] on the corresponding dataset, resulting in 64 tokens per frame. All models are trained from scratch without image pretraining. We provide training hyperparameters and evaluation setting in the Table. 8.",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
71,
|
| 1026 |
+
681,
|
| 1027 |
+
490,
|
| 1028 |
+
772
|
| 1029 |
+
],
|
| 1030 |
+
"page_idx": 5
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "text",
|
| 1034 |
+
"text": "5.2 Quantitative Comparison",
|
| 1035 |
+
"text_level": 1,
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
71,
|
| 1038 |
+
789,
|
| 1039 |
+
302,
|
| 1040 |
+
804
|
| 1041 |
+
],
|
| 1042 |
+
"page_idx": 5
|
| 1043 |
+
},
|
| 1044 |
+
{
|
| 1045 |
+
"type": "text",
|
| 1046 |
+
"text": "5.2.1 Video Generation",
|
| 1047 |
+
"text_level": 1,
|
| 1048 |
+
"bbox": [
|
| 1049 |
+
73,
|
| 1050 |
+
806,
|
| 1051 |
+
250,
|
| 1052 |
+
821
|
| 1053 |
+
],
|
| 1054 |
+
"page_idx": 5
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "text",
|
| 1058 |
+
"text": "Dataset and Evaluation Settings. We benchmark both unconditional and conditional video generation on the UCF101 dataset [58], which consists of approximately 13,000 videos. Following Latte [44], we use the entire dataset for training. For evaluation, we randomly sample 2,048 videos to compute FVD [59] against the ground-truth videos. For conditional video generation, we set the guidance scale to 2.0 during inference.",
|
| 1059 |
+
"bbox": [
|
| 1060 |
+
71,
|
| 1061 |
+
825,
|
| 1062 |
+
491,
|
| 1063 |
+
941
|
| 1064 |
+
],
|
| 1065 |
+
"page_idx": 5
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "text",
|
| 1069 |
+
"text": "Main Results. From the results listed in Table 3, we achieve state-of-the-art performance in both unconditional and conditional video generation. Specifically, Latte [44] is based on video diffusion transformer, while OmniTokenizer [50] is based on Token AR. Our method significantly outperforms both. Furthermore, compared to recent frame-autoregressive models [26], [27], which require twice the training cost, FAR achieves superior performance without any additional training cost.",
|
| 1070 |
+
"bbox": [
|
| 1071 |
+
503,
|
| 1072 |
+
313,
|
| 1073 |
+
924,
|
| 1074 |
+
446
|
| 1075 |
+
],
|
| 1076 |
+
"page_idx": 5
|
| 1077 |
+
},
|
| 1078 |
+
{
|
| 1079 |
+
"type": "text",
|
| 1080 |
+
"text": "5.2.2 Short-Video Prediction",
|
| 1081 |
+
"text_level": 1,
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
504,
|
| 1084 |
+
462,
|
| 1085 |
+
718,
|
| 1086 |
+
476
|
| 1087 |
+
],
|
| 1088 |
+
"page_idx": 5
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "text",
|
| 1092 |
+
"text": "Dataset and Evaluation Settings. We evaluate FAR on the UCF-101 [58] and BAIR [60] datasets, following the evaluation settings in MCVD [51] and ExtDM [52]. We randomly sample 256 videos based on provided context frames, each with 100 different trajectories, and select the best trajectory to compute pixel-wise metrics. For FVD, we report the average over all trajectories.",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
503,
|
| 1095 |
+
481,
|
| 1096 |
+
921,
|
| 1097 |
+
584
|
| 1098 |
+
],
|
| 1099 |
+
"page_idx": 5
|
| 1100 |
+
},
|
| 1101 |
+
{
|
| 1102 |
+
"type": "text",
|
| 1103 |
+
"text": "Main Results. We summarize the results in Table. 4. Unlike previous works such as MCVD [51] and ExtDM [52], which introduce complex multi-scale fusion strategies and optical flow, FAR achieves superior results on both datasets without requiring additional design.",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
503,
|
| 1106 |
+
584,
|
| 1107 |
+
921,
|
| 1108 |
+
657
|
| 1109 |
+
],
|
| 1110 |
+
"page_idx": 5
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "text",
|
| 1114 |
+
"text": "5.2.3 Long-Video Prediction",
|
| 1115 |
+
"text_level": 1,
|
| 1116 |
+
"bbox": [
|
| 1117 |
+
504,
|
| 1118 |
+
674,
|
| 1119 |
+
714,
|
| 1120 |
+
689
|
| 1121 |
+
],
|
| 1122 |
+
"page_idx": 5
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "text",
|
| 1126 |
+
"text": "Dataset and Evaluation Settings. We benchmark long-context video modeling results on action-conditioned video prediction using the Minecraft and DMLab datasets [36]. The Minecraft dataset contains approximately 200K videos, while the DMLab dataset contains about 40K videos. Each video consists of 300 frames with action annotations. We follow the evaluation setup in TECO [36], which uses 144 observed context frames to predict 156 future frames and compute pixel metrics. Additionally, we compute FVD on 264 generated frames based on 36 context frames.",
|
| 1127 |
+
"bbox": [
|
| 1128 |
+
501,
|
| 1129 |
+
693,
|
| 1130 |
+
921,
|
| 1131 |
+
839
|
| 1132 |
+
],
|
| 1133 |
+
"page_idx": 5
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "text",
|
| 1137 |
+
"text": "Main Results. We summarize the results in Table. 5. The previous work, TECO [36], adopts aggressive downscaling for all frames to reduce tokens for temporal modeling, creating a trade-off between training efficiency and prediction accuracy. In contrast, FAR employs long short-term context modeling, effectively achieving the lowest prediction error (i.e., LPIPS) without prohibitive computation cost.",
|
| 1138 |
+
"bbox": [
|
| 1139 |
+
503,
|
| 1140 |
+
840,
|
| 1141 |
+
923,
|
| 1142 |
+
943
|
| 1143 |
+
],
|
| 1144 |
+
"page_idx": 5
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "page_number",
|
| 1148 |
+
"text": "6",
|
| 1149 |
+
"bbox": [
|
| 1150 |
+
911,
|
| 1151 |
+
32,
|
| 1152 |
+
921,
|
| 1153 |
+
42
|
| 1154 |
+
],
|
| 1155 |
+
"page_idx": 5
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "text",
|
| 1159 |
+
"text": "TABLE 4: Quantitative Comparison on Short Video Prediction. We follow the evaluation setup of MCVD [51] and ExtDM [52], where $c$ denotes the number of context frames and $p$ denotes the number of predicted frames.",
|
| 1160 |
+
"bbox": [
|
| 1161 |
+
71,
|
| 1162 |
+
47,
|
| 1163 |
+
923,
|
| 1164 |
+
78
|
| 1165 |
+
],
|
| 1166 |
+
"page_idx": 6
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "table",
|
| 1170 |
+
"img_path": "images/e9e6a46811eb8e1269f12c67e0ca9e259b9f796b7157708de5bdbfefc810ca1a.jpg",
|
| 1171 |
+
"table_caption": [],
|
| 1172 |
+
"table_footnote": [],
|
| 1173 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Params</td><td colspan=\"4\">c=4, p=12</td></tr><tr><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td>RaMViD [53]</td><td>235 M</td><td>0.639</td><td>21.37</td><td>0.090</td><td>396.7</td></tr><tr><td>LFDM [54]</td><td>108 M</td><td>0.627</td><td>20.92</td><td>0.098</td><td>698.2</td></tr><tr><td>MCVD-cp [51]</td><td>565 M</td><td>0.658</td><td>21.82</td><td>0.088</td><td>468.1</td></tr><tr><td>ExtDM-K2 [52]</td><td>119 M</td><td>0.754</td><td>23.89</td><td>0.056</td><td>394.1</td></tr><tr><td>FAR-B (Ours)</td><td>130 M</td><td>0.818</td><td>25.64</td><td>0.037</td><td>194.1</td></tr></table>",
|
| 1174 |
+
"bbox": [
|
| 1175 |
+
73,
|
| 1176 |
+
79,
|
| 1177 |
+
416,
|
| 1178 |
+
165
|
| 1179 |
+
],
|
| 1180 |
+
"page_idx": 6
|
| 1181 |
+
},
|
| 1182 |
+
{
|
| 1183 |
+
"type": "table",
|
| 1184 |
+
"img_path": "images/5cbde1a34cbe0c4e9ab4194e56d54d938d05f96574a43baaa70b42e172e9f1bd.jpg",
|
| 1185 |
+
"table_caption": [
|
| 1186 |
+
"(a) Evaluation on UCF-101 $(64\\times 64)$"
|
| 1187 |
+
],
|
| 1188 |
+
"table_footnote": [],
|
| 1189 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Params</td><td colspan=\"4\">c = 2, p = 14</td><td colspan=\"4\">c = 2, p = 28</td></tr><tr><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td>RaMViD [53]</td><td>235 M</td><td>0.758</td><td>17.55</td><td>0.085</td><td>166.5</td><td>0.691</td><td>16.51</td><td>0.109</td><td>238.7</td></tr><tr><td>LFDM [54]</td><td>108 M</td><td>0.770</td><td>17.45</td><td>0.084</td><td>167.6</td><td>0.730</td><td>16.68</td><td>0.106</td><td>276.8</td></tr><tr><td>VIDM [55]</td><td>194 M</td><td>0.763</td><td>16.97</td><td>0.080</td><td>131.7</td><td>0.728</td><td>16.20</td><td>0.096</td><td>194.6</td></tr><tr><td>MCVD-cp [51]</td><td>565 M</td><td>0.838</td><td>19.10</td><td>0.075</td><td>87.8</td><td>0.797</td><td>17.70</td><td>0.078</td><td>119.0</td></tr><tr><td>ExtDM-K4 [52]</td><td>121 M</td><td>0.845</td><td>20.04</td><td>0.053</td><td>81.6</td><td>0.814</td><td>18.74</td><td>0.069</td><td>102.8</td></tr><tr><td>FAR-B (Ours)</td><td>130 M</td><td>0.849</td><td>20.87</td><td>0.038</td><td>99.3</td><td>0.819</td><td>19.40</td><td>0.049</td><td>144.3</td></tr></table>",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
431,
|
| 1192 |
+
78,
|
| 1193 |
+
923,
|
| 1194 |
+
166
|
| 1195 |
+
],
|
| 1196 |
+
"page_idx": 6
|
| 1197 |
+
},
|
| 1198 |
+
{
|
| 1199 |
+
"type": "table",
|
| 1200 |
+
"img_path": "images/aadf8e6ac2652d51fea66ba4d1d1a75302d3af93aa7c0c573f3d5e9aec149397.jpg",
|
| 1201 |
+
"table_caption": [
|
| 1202 |
+
"(b) Evaluation on BAIR (64×64)",
|
| 1203 |
+
"TABLE 5: Quantitative Comparison on Long-Context Video Prediction. We follow the evaluation setup of TECO [36], where $c$ denotes the number of context frames and $p$ denotes the number of predicted frames."
|
| 1204 |
+
],
|
| 1205 |
+
"table_footnote": [],
|
| 1206 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Params</td><td colspan=\"3\">c = 144, p = 156</td><td>c = 36, p = 264</td></tr><tr><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td>FitVid [56]</td><td>165 M</td><td>0.356</td><td>12.0</td><td>0.491</td><td>176</td></tr><tr><td>CW-VAE [57]</td><td>111 M</td><td>0.372</td><td>12.6</td><td>0.465</td><td>125</td></tr><tr><td>Perceiver AR [37]</td><td>30 M</td><td>0.304</td><td>11.2</td><td>0.487</td><td>96</td></tr><tr><td>Latent FDM [38]</td><td>31 M</td><td>0.588</td><td>17.8</td><td>0.222</td><td>181</td></tr><tr><td>TECO [36]</td><td>169 M</td><td>0.703</td><td>21.9</td><td>0.157</td><td>48</td></tr><tr><td>FAR-B-Long (Ours)</td><td>150 M</td><td>0.687</td><td>22.3</td><td>0.104</td><td>64</td></tr></table>",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
73,
|
| 1209 |
+
227,
|
| 1210 |
+
501,
|
| 1211 |
+
325
|
| 1212 |
+
],
|
| 1213 |
+
"page_idx": 6
|
| 1214 |
+
},
|
| 1215 |
+
{
|
| 1216 |
+
"type": "table",
|
| 1217 |
+
"img_path": "images/63a94babeb831c52884a2576fe7479931147e7868fe5371681a1a0d2e08dbad5.jpg",
|
| 1218 |
+
"table_caption": [
|
| 1219 |
+
"(a) Evaluation on DMLab (64×64)"
|
| 1220 |
+
],
|
| 1221 |
+
"table_footnote": [],
|
| 1222 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Params</td><td colspan=\"3\">c = 144, p = 156</td><td>c = 36, p = 264</td></tr><tr><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td>FitVid [56]</td><td>176 M</td><td>0.343</td><td>13.0</td><td>0.519</td><td>956</td></tr><tr><td>CW-VAE [57]</td><td>140 M</td><td>0.338</td><td>13.4</td><td>0.441</td><td>397</td></tr><tr><td>Perceiver AR [37]</td><td>166 M</td><td>0.323</td><td>13.2</td><td>0.441</td><td>76</td></tr><tr><td>Latent FDM [38]</td><td>33 M</td><td>0.349</td><td>13.4</td><td>0.429</td><td>167</td></tr><tr><td>TECO [36]</td><td>274 M</td><td>0.381</td><td>15.4</td><td>0.340</td><td>116</td></tr><tr><td>FAR-M-Long (Ours)</td><td>280 M</td><td>0.448</td><td>16.9</td><td>0.251</td><td>39</td></tr></table>",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
501,
|
| 1225 |
+
227,
|
| 1226 |
+
923,
|
| 1227 |
+
325
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 6
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "text",
|
| 1233 |
+
"text": "(b) Evaluation on Minecraft $(128\\times 128)$",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
578,
|
| 1236 |
+
327,
|
| 1237 |
+
839,
|
| 1238 |
+
339
|
| 1239 |
+
],
|
| 1240 |
+
"page_idx": 6
|
| 1241 |
+
},
|
| 1242 |
+
{
|
| 1243 |
+
"type": "text",
|
| 1244 |
+
"text": "5.3 Qualitative Comparison",
|
| 1245 |
+
"text_level": 1,
|
| 1246 |
+
"bbox": [
|
| 1247 |
+
71,
|
| 1248 |
+
373,
|
| 1249 |
+
290,
|
| 1250 |
+
388
|
| 1251 |
+
],
|
| 1252 |
+
"page_idx": 6
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "text",
|
| 1256 |
+
"text": "We present a qualitative comparison of long-video prediction in Fig. 9. Compared to previous methods, FAR effectively utilizes the observed context and generates predictions that most closely resemble the ground truth, demonstrating its ability to leverage long-range context.",
|
| 1257 |
+
"bbox": [
|
| 1258 |
+
71,
|
| 1259 |
+
392,
|
| 1260 |
+
490,
|
| 1261 |
+
465
|
| 1262 |
+
],
|
| 1263 |
+
"page_idx": 6
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"type": "text",
|
| 1267 |
+
"text": "5.4 Ablation Study",
|
| 1268 |
+
"text_level": 1,
|
| 1269 |
+
"bbox": [
|
| 1270 |
+
71,
|
| 1271 |
+
484,
|
| 1272 |
+
225,
|
| 1273 |
+
500
|
| 1274 |
+
],
|
| 1275 |
+
"page_idx": 6
|
| 1276 |
+
},
|
| 1277 |
+
{
|
| 1278 |
+
"type": "text",
|
| 1279 |
+
"text": "How to decide patchify kernel for remote context? The key to selecting an appropriate patchify kernel lies in whether the compressed latent representation can reliably retain information from past observations. Since patchifying is a spatial-to-channel transformation, we hypothesize that it should satisfy the condition $c \\times c \\times d \\leq D$ , where $c$ is the patchify kernel size, $d$ is the latent dimension, and $D$ is the model's channel dimension. For example, in our experiment, the latent dimension is 32 and the model dimension is 768. Using a patchify kernel of size $4 \\times 4$ , we get $4 \\times 4 \\times 32 = 512 < 768$ , which suggests that nearly all input information can be preserved during patchification. As demonstrated in Table. 7, the [4, 4] patchify kernel significantly reduces training cost, enabling feasible training on long videos, without sacrificing prediction accuracy compared to a larger [8, 8] kernel.",
|
| 1280 |
+
"bbox": [
|
| 1281 |
+
71,
|
| 1282 |
+
503,
|
| 1283 |
+
490,
|
| 1284 |
+
736
|
| 1285 |
+
],
|
| 1286 |
+
"page_idx": 6
|
| 1287 |
+
},
|
| 1288 |
+
{
|
| 1289 |
+
"type": "text",
|
| 1290 |
+
"text": "How to decide the local context length? We gradually increase the local context length during training and observe that the performance converges at certain short-term context length (i.e., 8 frames in Fig. 10). Further increasing the local context length significantly raises the training cost without improving performance. This experiment verifies the presence of context redundancy in video autoregressive modeling. Therefore, we select the saturate point as the optimal short-term context length.",
|
| 1291 |
+
"bbox": [
|
| 1292 |
+
71,
|
| 1293 |
+
737,
|
| 1294 |
+
488,
|
| 1295 |
+
868
|
| 1296 |
+
],
|
| 1297 |
+
"page_idx": 6
|
| 1298 |
+
},
|
| 1299 |
+
{
|
| 1300 |
+
"type": "text",
|
| 1301 |
+
"text": "Effect of Stochastic Clean Context. We have visualized the effectiveness of stochastic clean context in Fig. 4. Based on the quantitative evaluation of video prediction in Table. 6, FAR with stochastic clean context achieves significantly improved performance.",
|
| 1302 |
+
"bbox": [
|
| 1303 |
+
71,
|
| 1304 |
+
869,
|
| 1305 |
+
491,
|
| 1306 |
+
941
|
| 1307 |
+
],
|
| 1308 |
+
"page_idx": 6
|
| 1309 |
+
},
|
| 1310 |
+
{
|
| 1311 |
+
"type": "table",
|
| 1312 |
+
"img_path": "images/1263bbdf0e778ed7e5255f93bf86c426bc0aad4712756a3bb2824c62e487b272.jpg",
|
| 1313 |
+
"table_caption": [
|
| 1314 |
+
"TABLE 6: Ablation Study of Stochastic Clean Context (SCC) on UCF-101. Stochastic clean context mitigates the training-inference discrepancy in observed context, leading to improved performance."
|
| 1315 |
+
],
|
| 1316 |
+
"table_footnote": [],
|
| 1317 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"4\">c = 1, p = 15</td></tr><tr><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td></tr><tr><td>w/o. SCC</td><td>0.540</td><td>16.42</td><td>0.211</td><td>399</td></tr><tr><td>w/. SCC</td><td>0.596</td><td>18.46</td><td>0.187</td><td>347</td></tr></table>",
|
| 1318 |
+
"bbox": [
|
| 1319 |
+
504,
|
| 1320 |
+
428,
|
| 1321 |
+
923,
|
| 1322 |
+
513
|
| 1323 |
+
],
|
| 1324 |
+
"page_idx": 6
|
| 1325 |
+
},
|
| 1326 |
+
{
|
| 1327 |
+
"type": "table",
|
| 1328 |
+
"img_path": "images/a330c7a04357283b0e03fc80f4a9caaaa7c97f8584233aa5c6709e056a0e68b1.jpg",
|
| 1329 |
+
"table_caption": [
|
| 1330 |
+
"TABLE 7: Ablation Study on the Patchify Kernel of Distant Context. Larger patchify kernels significantly reduce training cost."
|
| 1331 |
+
],
|
| 1332 |
+
"table_footnote": [],
|
| 1333 |
+
"table_body": "<table><tr><td>Patchify Kernel</td><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>FVD↓</td><td>Training Memory</td></tr><tr><td>[1,1]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>OOM</td></tr><tr><td>[2,2]</td><td>0.570</td><td>19.1</td><td>0.156</td><td>38</td><td>38.9 G</td></tr><tr><td>[4,4]</td><td>0.576</td><td>19.3</td><td>0.153</td><td>34</td><td>15.3 G</td></tr><tr><td>[8,8]</td><td>0.558</td><td>18.6</td><td>0.171</td><td>33</td><td>0.9 G</td></tr></table>",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
509,
|
| 1336 |
+
578,
|
| 1337 |
+
919,
|
| 1338 |
+
669
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 6
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "text",
|
| 1344 |
+
"text": "Effect of the KV Cache on Inference Speedup. As shown in Fig. 11, the baseline FAR model, which samples without using long short-term context or KV cache requires approximately 1341 seconds to generate a 256-frame video. When the KV cache is introduced, the inference time is significantly reduced to 171 seconds. Finally, by incorporating both long short-term context and the corresponding multi-level KV cache, the sampling time further decreases to approximately 104 seconds. These results demonstrate that KV caching, especially when combined with long short-term context modeling, significantly improves sampling efficiency for long video generation.",
|
| 1345 |
+
"bbox": [
|
| 1346 |
+
501,
|
| 1347 |
+
693,
|
| 1348 |
+
921,
|
| 1349 |
+
869
|
| 1350 |
+
],
|
| 1351 |
+
"page_idx": 6
|
| 1352 |
+
},
|
| 1353 |
+
{
|
| 1354 |
+
"type": "text",
|
| 1355 |
+
"text": "6 CONCLUSION",
|
| 1356 |
+
"text_level": 1,
|
| 1357 |
+
"bbox": [
|
| 1358 |
+
504,
|
| 1359 |
+
892,
|
| 1360 |
+
645,
|
| 1361 |
+
906
|
| 1362 |
+
],
|
| 1363 |
+
"page_idx": 6
|
| 1364 |
+
},
|
| 1365 |
+
{
|
| 1366 |
+
"type": "text",
|
| 1367 |
+
"text": "In this paper, we systematically investigate long-context video modeling using the proposed Frame Autoregressive",
|
| 1368 |
+
"bbox": [
|
| 1369 |
+
503,
|
| 1370 |
+
912,
|
| 1371 |
+
923,
|
| 1372 |
+
943
|
| 1373 |
+
],
|
| 1374 |
+
"page_idx": 6
|
| 1375 |
+
},
|
| 1376 |
+
{
|
| 1377 |
+
"type": "page_number",
|
| 1378 |
+
"text": "7",
|
| 1379 |
+
"bbox": [
|
| 1380 |
+
911,
|
| 1381 |
+
32,
|
| 1382 |
+
919,
|
| 1383 |
+
42
|
| 1384 |
+
],
|
| 1385 |
+
"page_idx": 6
|
| 1386 |
+
},
|
| 1387 |
+
{
|
| 1388 |
+
"type": "image",
|
| 1389 |
+
"img_path": "images/608bf03c67e98e01bf6570c1d62bd0957bface162eae7f3af7f5dd0be3922f24.jpg",
|
| 1390 |
+
"image_caption": [
|
| 1391 |
+
"Fig. 9: Qualitative Comparison of Long-Context Video Prediction on DMLab. FAR fully utilizes the long-range context (144 frames), resulting in more consistent prediction (156 frames) compared to previous methods."
|
| 1392 |
+
],
|
| 1393 |
+
"image_footnote": [],
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
76,
|
| 1396 |
+
53,
|
| 1397 |
+
923,
|
| 1398 |
+
414
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 7
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "image",
|
| 1404 |
+
"img_path": "images/e2b24e4d722aa1f3c8295eee567928c1e3ec0e73ebf2255e7fe700c363144292.jpg",
|
| 1405 |
+
"image_caption": [
|
| 1406 |
+
"Fig. 10: Ablation Study of the Short-Term Context Window Size. Performance saturates as the window size increases."
|
| 1407 |
+
],
|
| 1408 |
+
"image_footnote": [],
|
| 1409 |
+
"bbox": [
|
| 1410 |
+
76,
|
| 1411 |
+
469,
|
| 1412 |
+
486,
|
| 1413 |
+
661
|
| 1414 |
+
],
|
| 1415 |
+
"page_idx": 7
|
| 1416 |
+
},
|
| 1417 |
+
{
|
| 1418 |
+
"type": "text",
|
| 1419 |
+
"text": "Model (FAR). First, We show that direct test-time extrapolation is insufficient for effective long-context video modeling, highlighting the necessity of efficient long-video training. We then identify context redundancy as a key bottleneck in video autoregression. To address this, we propose a long short-term context modeling strategy with asymmetric patchify kernels, a simple yet effective method to eliminate redundant context and significantly reduce the training cost for long videos. Extensive experiments validate the effectiveness of FAR in handling long-context video generation and highlight a promising direction for the evolution of next-generation video generative models—shifting the focus from short-term temporal consistency to long-term world modeling.",
|
| 1420 |
+
"bbox": [
|
| 1421 |
+
71,
|
| 1422 |
+
720,
|
| 1423 |
+
491,
|
| 1424 |
+
925
|
| 1425 |
+
],
|
| 1426 |
+
"page_idx": 7
|
| 1427 |
+
},
|
| 1428 |
+
{
|
| 1429 |
+
"type": "text",
|
| 1430 |
+
"text": "Limitations. The primary limitation lies in the lack of",
|
| 1431 |
+
"bbox": [
|
| 1432 |
+
73,
|
| 1433 |
+
926,
|
| 1434 |
+
491,
|
| 1435 |
+
941
|
| 1436 |
+
],
|
| 1437 |
+
"page_idx": 7
|
| 1438 |
+
},
|
| 1439 |
+
{
|
| 1440 |
+
"type": "image",
|
| 1441 |
+
"img_path": "images/9ca7bab91db62b0623ede84184b6aa76d213b09d64552ee23792e60d7e6de70c.jpg",
|
| 1442 |
+
"image_caption": [
|
| 1443 |
+
"Fig. 11: Ablation Study of the KV Cache. FAR-Long with proposed multi-level KV cache achieves the best speedup on long videos."
|
| 1444 |
+
],
|
| 1445 |
+
"image_footnote": [],
|
| 1446 |
+
"bbox": [
|
| 1447 |
+
506,
|
| 1448 |
+
465,
|
| 1449 |
+
921,
|
| 1450 |
+
662
|
| 1451 |
+
],
|
| 1452 |
+
"page_idx": 7
|
| 1453 |
+
},
|
| 1454 |
+
{
|
| 1455 |
+
"type": "text",
|
| 1456 |
+
"text": "scaled-up experiments. Although FAR demonstrates great potential, we still lack large-scale training on text-to-video generation datasets. Additionally, restricted by the available datasets, we only experiment with FAR on up to 300 frames (about 20 seconds), not fully investigating its ability on minute-level videos.",
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
501,
|
| 1459 |
+
744,
|
| 1460 |
+
921,
|
| 1461 |
+
833
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 7
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "text",
|
| 1467 |
+
"text": "Future Work. One future direction is to scale up FAR and benchmark it against video diffusion transformers on large-scale text-to-video generation tasks. Additionally, we plan to simulate a longer video dataset (on the minute level) to better evaluate the model's long-context capabilities. Finally, it would be interesting to explore whether FAR's long-context modeling can enable video-level in-context learning.",
|
| 1468 |
+
"bbox": [
|
| 1469 |
+
501,
|
| 1470 |
+
839,
|
| 1471 |
+
923,
|
| 1472 |
+
944
|
| 1473 |
+
],
|
| 1474 |
+
"page_idx": 7
|
| 1475 |
+
},
|
| 1476 |
+
{
|
| 1477 |
+
"type": "page_number",
|
| 1478 |
+
"text": "8",
|
| 1479 |
+
"bbox": [
|
| 1480 |
+
911,
|
| 1481 |
+
32,
|
| 1482 |
+
921,
|
| 1483 |
+
42
|
| 1484 |
+
],
|
| 1485 |
+
"page_idx": 7
|
| 1486 |
+
},
|
| 1487 |
+
{
|
| 1488 |
+
"type": "text",
|
| 1489 |
+
"text": "REFERENCES",
|
| 1490 |
+
"text_level": 1,
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
75,
|
| 1493 |
+
51,
|
| 1494 |
+
187,
|
| 1495 |
+
66
|
| 1496 |
+
],
|
| 1497 |
+
"page_idx": 8
|
| 1498 |
+
},
|
| 1499 |
+
{
|
| 1500 |
+
"type": "list",
|
| 1501 |
+
"sub_type": "ref_text",
|
| 1502 |
+
"list_items": [
|
| 1503 |
+
"[1] T. Brooks, B. Peebles, C. Holmes, W. DePue, Y. Guo, L. Jing, D. Schnurr, J. Taylor, T. Luhman, E. Luhman, C. Ng, R. Wang, and A. Ramesh, \"Video generation models as world simulators,\" 2024. [Online]. Available: https://openai.com/research/video-generation-models-as-world-simulators 1, 2",
|
| 1504 |
+
"[2] A. Wang, B. Ai, B. Wen, C. Mao, C.-W. Xie, D. Chen, F. Yu, H. Zhao, J. Yang, J. Zeng et al., \"Wan: Open and advanced large-scale video generative models,\" arXiv preprint arXiv:2503.20314, 2025. 1",
|
| 1505 |
+
"[3] N. Agarwal, A. Ali, M. Bala, Y. Balaji, E. Barker, T. Cai, P. Chattopadhyay, Y. Chen, Y. Cui, Y. Ding et al., \"Cosmos world foundation model platform for physical ai,\" arXiv preprint arXiv:2501.03575, 2025. 1",
|
| 1506 |
+
"[4] Y. Lu, Y. Liang, L. Zhu, and Y. Yang, \"Freelong: Training-free long video generation with spectralblend temporal attention,\" arXiv preprint arXiv:2407.19918, 2024. 1, 2, 5",
|
| 1507 |
+
"[5] M. Zhao, G. He, Y. Chen, H. Zhu, C. Li, and J. Zhu, \"Riflex: A free lunch for length extrapolation in video diffusion transformers,\" arXiv preprint arXiv:2502.15894, 2025. 1, 4, 5",
|
| 1508 |
+
"[6] Y. Guo, C. Yang, Z. Yang, Z. Ma, Z. Lin, Z. Yang, D. Lin, and L. Jiang, \"Long context tuning for video generation,\" arXiv preprint arXiv:2503.10589, 2025. 1",
|
| 1509 |
+
"[7] K. Dalal, D. Koceja, G. Hussein, J. Xu, Y. Zhao, Y. Song, S. Han, K. C. Cheung, J. Kautz, C. Guestrin et al., \"One-minute video generation with test-time training,\" arXiv preprint arXiv:2504.05298, 2025.1",
|
| 1510 |
+
"[8] B. Chen, D. Martí Monsó, Y. Du, M. Simchowitz, R. Tedrake, and V. Sitzmann, \"Diffusion forcing: Next-token prediction meets full-sequence diffusion,\" Advances in Neural Information Processing Systems, vol. 37, pp. 24081-24125, 2025. 1, 2, 4",
|
| 1511 |
+
"[9] Y. Jin, Z. Sun, N. Li, K. Xu, H. Jiang, N. Zhuang, Q. Huang, Y. Song, Y. Mu, and Z. Lin, \"Pyramidal flow matching for efficient video generative modeling,\" arXiv preprint arXiv:2410.05954, 2024. 1, 2",
|
| 1512 |
+
"[10] J. Xie, W. Mao, Z. Bai, D. J. Zhang, W. Wang, K. Q. Lin, Y. Gu, Z. Chen, Z. Yang, and M. Z. Shou, \"Show-o: One single transformer to unify multimodal understanding and generation,\" arXiv preprint arXiv:2408.12528, 2024. 1, 2",
|
| 1513 |
+
"[11] Z. Yang, J. Teng, W. Zheng, M. Ding, S. Huang, J. Xu, Y. Yang, W. Hong, X. Zhang, G. Feng et al., \"Cogvideox: Text-to-video diffusion models with an expert transformer,\" arXiv preprint arXiv:2408.06072, 2024. 2",
|
| 1514 |
+
"[12] W. Kong, Q. Tian, Z. Zhang, R. Min, Z. Dai, J. Zhou, J. Xiong, X. Li, B. Wu, J. Zhang et al., \"Hunyuanvideo: A systematic framework for large video generative models,\" arXiv preprint arXiv:2412.03603, 2024. 2",
|
| 1515 |
+
"[13] Y. Guo, C. Yang, A. Rao, Z. Liang, Y. Wang, Y. Qiao, M. Agrawala, D. Lin, and B. Dai, \"Animatediff:Animate your personalized text-to-image diffusion models without specific tuning,\" arXiv preprint arXiv:2307.04725, 2023. 2",
|
| 1516 |
+
"[14] J. Xing, M. Xia, Y. Zhang, H. Chen, W. Yu, H. Liu, G. Liu, X. Wang, Y. Shan, and T.-T. Wong, \"Dynamiccafter: Animating open-domain images with video diffusion priors,\" in European Conference on Computer Vision. Springer, 2024, pp. 399-417. 2",
|
| 1517 |
+
"[15] F.-Y. Wang, W. Chen, G. Song, H.-J. Ye, Y. Liu, and H. Li, \"Genl-video: Multi-text to long video generation via temporal codenoising,\" arXiv preprint arXiv:2305.18264, 2023. 2, 5",
|
| 1518 |
+
"[16] L. Yu, J. Lezama, N. B. Gundavarapu, L. Versari, K. Sohn, D. Minnen, Y. Cheng, V. Birodkar, A. Gupta, X. Gu et al., \"Language model beats diffusion-tokenizer is key to visual generation,\" arXiv preprint arXiv:2310.05737, 2023. 2, 6",
|
| 1519 |
+
"[17] Y. Gu, X. Wang, Y. Ge, Y. Shan, and M. Z. Shou, \"Rethinking the objectives of vector-quantized tokenizers for image synthesis,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 7631-7640. 2",
|
| 1520 |
+
"[18] D. Kondratyuk, L. Yu, X. Gu, J. Lezama, J. Huang, G. Schindler, R. Hornung, V. Birodkar, J. Yan, M.-C. Chiu et al., \"Videopoet: A large language model for zero-shot video generation,\" arXiv preprint arXiv:2312.14125, 2023. 2",
|
| 1521 |
+
"[19] W. Hong, M. Ding, W. Zheng, X. Liu, and J. Tang, \"Cogvideo: Large-scale pretraining for text-to-video generation via transformers,\" arXiv preprint arXiv:2205.15868, 2022. 2, 6",
|
| 1522 |
+
"[20] L. Fan, T. Li, S. Qin, Y. Li, C. Sun, M. Rubinstein, D. Sun, K. He, and Y. Tian, \"Fluid: Scaling autoregressive text-to-image generative models with continuous tokens,\" arXiv preprint arXiv:2410.13863, 2024. 2"
|
| 1523 |
+
],
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
76,
|
| 1526 |
+
78,
|
| 1527 |
+
491,
|
| 1528 |
+
940
|
| 1529 |
+
],
|
| 1530 |
+
"page_idx": 8
|
| 1531 |
+
},
|
| 1532 |
+
{
|
| 1533 |
+
"type": "list",
|
| 1534 |
+
"sub_type": "ref_text",
|
| 1535 |
+
"list_items": [
|
| 1536 |
+
"[21] T. Li, Y. Tian, H. Li, M. Deng, and K. He, \"Autoregressive image generation without vector quantization,\" Advances in Neural Information Processing Systems, vol. 37, pp. 56424-56445, 2025. 2",
|
| 1537 |
+
"[22] C. Zhou, L. Yu, A. Babu, K. Tirumala, M. Yasunaga, L. Shamis, J. Kahn, X. Ma, L. Zettlemoyer, and O. Levy, \"Transfusion: Predict the next token and diffuse images with one multi-modal model,\" arXiv preprint arXiv:2408.11039, 2024. 2",
|
| 1538 |
+
"[23] Y. Ma, X. Liu, X. Chen, W. Liu, C. Wu, Z. Wu, Z. Pan, Z. Xie, H. Zhang, L. Zhao et al., \"Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation,\" arXiv preprint arXiv:2411.07975, 2024. 2",
|
| 1539 |
+
"[24] L. Barrault, P.-A. Duquenne, M. Elbayad, A. Kozhevnikov, B. Alastruey, P. Andrews, M. Coria, G. Couairon, M. R. Costa-jussa, D. Dale et al., \"Large concept models: Language modeling in a sentence representation space,\" arXiv e-prints, pp. arXiv-2412, 2024. 2",
|
| 1540 |
+
"[25] T. Wu, Z. Fan, X. Liu, H.-T. Zheng, Y. Gong, J. Jiao, J. Li, J. Guo, N. Duan, W. Chen et al., \"Ar-diffusion: Auto-regressive diffusion model for text generation,\" Advances in Neural Information Processing Systems, vol. 36, pp. 39-957-39-974, 2023. 2",
|
| 1541 |
+
"[26] J. Hu, S. Hu, Y. Song, Y. Huang, M. Wang, H. Zhou, Z. Liu, W.-Y. Ma, and M. Sun, \"Acfit: Interpolating autoregressive conditional modeling and diffusion transformer,\" arXiv preprint arXiv:2412.07720, 2024. 2, 4, 6",
|
| 1542 |
+
"[27] D. Zhou, Q. Sun, Y. Peng, K. Yan, R. Dong, D. Wang, Z. Ge, N. Duan, X. Zhang, L. M. Ni et al., \"Taming teacher forcing for masked autoregressive video generation,\" arXiv preprint arXiv:2501.12389, 2025. 2, 4, 6",
|
| 1543 |
+
"[28] O. Press, N. A. Smith, and M. Lewis, \"Train short, test long: Attention with linear biases enables input length extrapolation,\" arXiv preprint arXiv:2108.12409, 2021. 2",
|
| 1544 |
+
"[29] B. Peng, J. Quesnelle, H. Fan, and E. Shippole, \"Yarn: Efficient context window extension of large language models,\" arXiv preprint arXiv:2309.00071, 2023. 2, 5",
|
| 1545 |
+
"[30] bloc97, \"NTK-Aware Scaled RoPE allows LLaMA models to have extended $(8k+)$ context size without any finetuning and minimal perplexity degradation.\" 2023. [Online]. Available: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkawareScaled_ripe Allows_llama_models_to_have/2,5",
|
| 1546 |
+
"[31] S. Chen, S. Wong, L. Chen, and Y. Tian, \"Extending context window of large language models via positional interpolation,\" arXiv preprint arXiv:2306.15595, 2023. 2, 5",
|
| 1547 |
+
"[32] Y. Chen, S. Qian, H. Tang, X. Lai, Z. Liu, S. Han, and J. Jia, \"Longlora: Efficient fine-tuning of long-context large language models,\" arXiv preprint arXiv:2309.12307, 2023. 2, 5",
|
| 1548 |
+
"[33] D. Valevski, Y. Leviathan, M. Arar, and S. Fruchter, \"Diffusion models are real-time game engines,\" arXiv preprint arXiv:2408.14837, 2024. 2",
|
| 1549 |
+
"[34] J. Bruce, M. D. Dennis, A. Edwards, J. Parker-Holder, Y. Shi, E. Hughes, M. Lai, A. Mavalankar, R. Steigerwald, C. Apps et al., \"Genie: Generative interactive environments,\" in *Forty-first International Conference on Machine Learning*, 2024. 2",
|
| 1550 |
+
"[35] J. Parker-Holder, P. Ball, J. Bruce, V. Dasagi, K. Holsheimer, C. Kaplanis, A. Moufarek, G. Scully, J. Shar, J. Shi, S. Spencer, J. Yung, M. Dennis, S. Kenjeyev, S. Long, V. Mnih, H. Chan, M. Gazeau, B. Li, F. Pardo, L. Wang, L. Zhang, F. Besse, T. Harley, A. Mitenkova, J. Wang, J. Clune, D. Hassabis, R. Hadsell, A. Bolton, S. Singh, and T. Rocktäschel, \"Genie 2: A large-scale foundation world model,\" 2024. [Online]. Available: https://deepmind.google/discover/blog/genie-2-a-large-scale-foundation-world-model/2",
|
| 1551 |
+
"[36] W. Yan, D. Hafner, S. James, and P. Abbeel, \"Temporally consistent transformers for video generation,\" in International Conference on Machine Learning. PMLR, 2023, pp. 39062-39098. 2, 6, 7, 11",
|
| 1552 |
+
"[37] C. Hawthorne, A. Jaegle, C. Cangea, S. Borgeaud, C. Nash, M. Malinowski, S. Dieleman, O. Vinyals, M. Botvinick, I. Simon et al., \"General-purpose, long-context autoregressive modeling with perceiver ar,\" in International Conference on Machine Learning. PMLR, 2022, pp. 8535-8558. 2, 7",
|
| 1553 |
+
"[38] W. Harvey, S. Naderiparizi, V. Masrani, C. Weilbach, and F. Wood, \"Flexible diffusion modeling of long videos,\" Advances in Neural Information Processing Systems, vol. 35, pp. 27953-27965, 2022. 2, 7",
|
| 1554 |
+
"[39] W. Peebles and S. Xie, \"Scalable diffusion models with transformers,\" in Proceedings of the IEEE/CVF international conference on computer vision, 2023, pp. 4195-4205. 2, 4, 6"
|
| 1555 |
+
],
|
| 1556 |
+
"bbox": [
|
| 1557 |
+
506,
|
| 1558 |
+
55,
|
| 1559 |
+
921,
|
| 1560 |
+
941
|
| 1561 |
+
],
|
| 1562 |
+
"page_idx": 8
|
| 1563 |
+
},
|
| 1564 |
+
{
|
| 1565 |
+
"type": "page_number",
|
| 1566 |
+
"text": "9",
|
| 1567 |
+
"bbox": [
|
| 1568 |
+
911,
|
| 1569 |
+
32,
|
| 1570 |
+
921,
|
| 1571 |
+
42
|
| 1572 |
+
],
|
| 1573 |
+
"page_idx": 8
|
| 1574 |
+
},
|
| 1575 |
+
{
|
| 1576 |
+
"type": "list",
|
| 1577 |
+
"sub_type": "ref_text",
|
| 1578 |
+
"list_items": [
|
| 1579 |
+
"[40] N. Ma, M. Goldstein, M. S. Albergo, N. M. Boffi, E. Vanden-Eijnden, and S. Xie, \"Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers,\" in European Conference on Computer Vision. Springer, 2024, pp. 23-40. 2, 4",
|
| 1580 |
+
"[41] X. Liu, C. Gong, and Q. Liu, \"Flow straight and fast: Learning to generate and transfer data with rectified flow,\" arXiv preprint arXiv:2209.03003, 2022. 3",
|
| 1581 |
+
"[42] Y. Lipman, R. T. Chen, H. Ben-Hamu, M. Nickel, and M. Le, \"Flow matching for generative modeling,\" arXiv preprint arXiv:2210.02747, 2022.3",
|
| 1582 |
+
"[43] M. S. Albergo and E. Vanden-Eijnden, \"Building normalizing flows with stochastic interpolants,\" arXiv preprint arXiv:2209.15571, 2022. 3",
|
| 1583 |
+
"[44] X. Ma, Y. Wang, G. Jia, X. Chen, Z. Liu, Y.-F. Li, C. Chen, and Y. Qiao, \"Latte: Latent diffusion transformer for video generation,\" arXiv preprint arXiv:2401.03048, 2024. 4, 6, 11",
|
| 1584 |
+
"[45] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel et al., \"Scaling rectified flow transformers for high-resolution image synthesis,\" in *Forty-first international conference on machine learning*, 2024. 4, 5",
|
| 1585 |
+
"[46] J. Su, M. Ahmed, Y. Lu, S. Pan, W. Bo, and Y. Liu, \"Rofomer: Enhanced transformer with rotary position embedding,\" Neurocomputing, vol. 568, p. 127063, 2024. 5",
|
| 1586 |
+
"[47] J. Chen, H. Cai, J. Chen, E. Xie, S. Yang, H. Tang, M. Li, Y. Lu, and S. Han, \"Deep compression autoencoder for efficient high-resolution diffusion models,\" arXiv preprint arXiv:2410.10733, 2024. 6, 11",
|
| 1587 |
+
"[48] S. Ge, T. Hayes, H. Yang, X. Yin, G. Pang, D. Jacobs, J.-B. Huang, and D. Parikh, \"Long video generation with time-agnostic vqgan and time-sensitive transformer,\" in European Conference on Computer Vision. Springer, 2022, pp. 102-118. 6",
|
| 1588 |
+
"[49] Y. He, T. Yang, Y. Zhang, Y. Shan, and Q. Chen, \"Latent video diffusion models for high-fidelity long video generation,\" arXiv preprint arXiv:2211.13221, 2022.6",
|
| 1589 |
+
"[50] J. Wang, Y. Jiang, Z. Yuan, B. Peng, Z. Wu, and Y.-G. Jiang, \"Omnitokenizer: A joint image-video tokenizer for visual generation,\" arXiv preprint arXiv:2406.09399, 2024. 6",
|
| 1590 |
+
"[51] V. Voleti, A. Jolicoeur-Martineau, and C. Pal, \"Mcvd-masked conditional video diffusion for prediction, generation, and interpolation,\" Advances in neural information processing systems, vol. 35, pp. 23371-23385, 2022. 6, 7",
|
| 1591 |
+
"[52] Z. Zhang, J. Hu, W. Cheng, D. Paudel, and J. Yang, \"Extdm: Distribution extrapolation diffusion model for video prediction,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 19310-19320. 6, 7, 11",
|
| 1592 |
+
"[53] T. Hoppe, A. Mehrjou, S. Bauer, D. Nielsen, and A. Dittadi, \"Diffusion models for video prediction and infilling,\" arXiv preprint arXiv:2206.07696, 2022. 7",
|
| 1593 |
+
"[54] H. Ni, C. Shi, K. Li, S. X. Huang, and M. R. Min, \"Conditional image-to-video generation with latent flow diffusion models,\" in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2023, pp. 18444-18455. 7",
|
| 1594 |
+
"[55] K. Mei and V. Patel, \"Vidm: Video implicit diffusion models,\" in Proceedings of the AAAI conference on artificial intelligence, vol. 37, no. 8, 2023, pp. 9117-9125. 7",
|
| 1595 |
+
"[56] M. Babaeizadeh, M. T. Saffar, S. Nair, S. Levine, C. Finn, and D. Erhan, \"Fitvid: Overfitting in pixel-level video prediction,\" arXiv preprint arXiv:2106.13195, 2021. 7",
|
| 1596 |
+
"[57] V. Saxena, J. Ba, and D. Hafner, \"Clockwork variational autoencoders,\" Advances in Neural Information Processing Systems, vol. 34, pp. 29246-29257, 2021. 7",
|
| 1597 |
+
"[58] K. Soomro, A. R. Zamir, and M. Shah, \"Ucf101: A dataset of 101 human actions classes from videos in the wild,\" arXiv preprint arXiv:1212.0402, 2012. 6",
|
| 1598 |
+
"[59] T. Unterthiner, S. Van Steenkiste, K. Kurach, R. Marinier, M. Michalski, and S. Gelly, \"Fvd: A new metric for video generation,\" 2019. 6",
|
| 1599 |
+
"[60] F. Ebert, C. Finn, A. X. Lee, and S. Levine, \"Self-supervised visual planning with temporal skip connections.\" CoRL, vol. 12, no. 16, p. 23, 2017. 6"
|
| 1600 |
+
],
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
76,
|
| 1603 |
+
54,
|
| 1604 |
+
491,
|
| 1605 |
+
929
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 9
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "text",
|
| 1611 |
+
"text": "7 APPENDIX",
|
| 1612 |
+
"text_level": 1,
|
| 1613 |
+
"bbox": [
|
| 1614 |
+
506,
|
| 1615 |
+
51,
|
| 1616 |
+
622,
|
| 1617 |
+
66
|
| 1618 |
+
],
|
| 1619 |
+
"page_idx": 9
|
| 1620 |
+
},
|
| 1621 |
+
{
|
| 1622 |
+
"type": "text",
|
| 1623 |
+
"text": "7.1 Experimental Settings",
|
| 1624 |
+
"text_level": 1,
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
506,
|
| 1627 |
+
71,
|
| 1628 |
+
710,
|
| 1629 |
+
85
|
| 1630 |
+
],
|
| 1631 |
+
"page_idx": 9
|
| 1632 |
+
},
|
| 1633 |
+
{
|
| 1634 |
+
"type": "text",
|
| 1635 |
+
"text": "As shown in Table. 8, we list the detailed training and evaluation configurations of FAR. For the ablation study in this paper, we halve the training iterations while keeping other settings the same.",
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
504,
|
| 1638 |
+
90,
|
| 1639 |
+
921,
|
| 1640 |
+
148
|
| 1641 |
+
],
|
| 1642 |
+
"page_idx": 9
|
| 1643 |
+
},
|
| 1644 |
+
{
|
| 1645 |
+
"type": "text",
|
| 1646 |
+
"text": "7.2 Qualitative Comparison",
|
| 1647 |
+
"text_level": 1,
|
| 1648 |
+
"bbox": [
|
| 1649 |
+
506,
|
| 1650 |
+
167,
|
| 1651 |
+
720,
|
| 1652 |
+
183
|
| 1653 |
+
],
|
| 1654 |
+
"page_idx": 9
|
| 1655 |
+
},
|
| 1656 |
+
{
|
| 1657 |
+
"type": "text",
|
| 1658 |
+
"text": "We provide additional visualization of long-video prediction results on DMLab and Minecraft in Fig. 12 and Fig. 13. From the results, FAR better exploits the provided context and provides more consistent results in later predictions compared to previous works.",
|
| 1659 |
+
"bbox": [
|
| 1660 |
+
504,
|
| 1661 |
+
186,
|
| 1662 |
+
921,
|
| 1663 |
+
260
|
| 1664 |
+
],
|
| 1665 |
+
"page_idx": 9
|
| 1666 |
+
},
|
| 1667 |
+
{
|
| 1668 |
+
"type": "page_number",
|
| 1669 |
+
"text": "10",
|
| 1670 |
+
"bbox": [
|
| 1671 |
+
906,
|
| 1672 |
+
32,
|
| 1673 |
+
921,
|
| 1674 |
+
42
|
| 1675 |
+
],
|
| 1676 |
+
"page_idx": 9
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "table",
|
| 1680 |
+
"img_path": "images/7a9dcfa185f9a9fbd4f8b684d50baca2a8765ee62b0b371f9b60741826491697.jpg",
|
| 1681 |
+
"table_caption": [
|
| 1682 |
+
"TABLE 8: Experimental Configurations of FAR. We follow the evaluation settings from Latte [44], MCVD [52], and TECO [36]."
|
| 1683 |
+
],
|
| 1684 |
+
"table_footnote": [],
|
| 1685 |
+
"table_body": "<table><tr><td rowspan=\"2\">Hyperparameters</td><td colspan=\"2\">Short-Video Generation</td><td colspan=\"2\">Short-Video Prediction</td><td colspan=\"2\">Long-Video Prediction</td></tr><tr><td>Cond. UCF-101</td><td>Uncond. UCF-101</td><td>BAIR</td><td>UCF-101</td><td>Minecraft</td><td>DMLab</td></tr><tr><td colspan=\"7\">Dataset Configuration</td></tr><tr><td>Resolution</td><td>256/128</td><td>256/128</td><td>64</td><td>64</td><td>128</td><td>64</td></tr><tr><td>Total Training Samples</td><td>13,320</td><td>13,320</td><td>43,264</td><td>9,624</td><td>194,051</td><td>39,375</td></tr><tr><td colspan=\"7\">Training Configuration</td></tr><tr><td>Training Cost (H100 Days)</td><td>12.7</td><td>12.7</td><td>2.6</td><td>3.6</td><td>18.2</td><td>17.5</td></tr><tr><td>Batch Size</td><td>32</td><td>32</td><td>32</td><td>32</td><td>32</td><td>32</td></tr><tr><td>Latent Size</td><td>8×8 (DC-AE [47])</td><td>8×8 (DC-AE [47])</td><td>8×8 (DC-AE [47])</td><td>8×8 (DC-AE [47])</td><td>8×8 (DC-AE [47])</td><td>8×8 (DC-AE [47])</td></tr><tr><td>Training Sequence Length</td><td>16</td><td>16</td><td>32</td><td>16</td><td>300</td><td>300</td></tr><tr><td>LR</td><td>1×10-4</td><td>1×10-4</td><td>1×10-4</td><td>1×10-4</td><td>1×10-4</td><td>1×10-4</td></tr><tr><td>LR Schedule</td><td>constant</td><td>constant</td><td>constant</td><td>constant</td><td>constant</td><td>constant</td></tr><tr><td>Warmup Steps</td><td>-</td><td>-</td><td>-</td><td>-</td><td>10K</td><td>10K</td></tr><tr><td>Total Training Steps</td><td>400K</td><td>400K</td><td>200K</td><td>200K</td><td>1M</td><td>1M</td></tr><tr><td>Stochastic Clean Context</td><td>0.1</td><td>0.1</td><td>0.1</td><td>0.1</td><td>0.1</td><td>0.1</td></tr><tr><td>Short-Term Context Window</td><td>16</td><td>16</td><td>32</td><td>16</td><td>16</td><td>16</td></tr><tr><td>Patchify Kernel for Distant Context</td><td>-</td><td>-</td><td>-</td><td>-</td><td>[4, 4]</td><td>[4, 4]</td></tr></table>",
|
| 1686 |
+
"bbox": [
|
| 1687 |
+
76,
|
| 1688 |
+
125,
|
| 1689 |
+
923,
|
| 1690 |
+
335
|
| 1691 |
+
],
|
| 1692 |
+
"page_idx": 10
|
| 1693 |
+
},
|
| 1694 |
+
{
|
| 1695 |
+
"type": "table",
|
| 1696 |
+
"img_path": "images/6d4a7a5b619f84abbe727a3ed06989fd7e4cb7e67cb2c59203762f0281c38712.jpg",
|
| 1697 |
+
"table_caption": [],
|
| 1698 |
+
"table_footnote": [],
|
| 1699 |
+
"table_body": "<table><tr><td colspan=\"7\">Evaluation Configuration</td></tr><tr><td>Samples</td><td>4×2048</td><td>4×2048</td><td>100×256</td><td>100×256</td><td>4×256</td><td>4×256</td></tr><tr><td>Guidance Scale</td><td>2.0</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>1.5</td></tr><tr><td>Reference Work</td><td>Latte [44]</td><td>Latte [44]</td><td>MCVD [52]</td><td>MCVD [52]</td><td>TECO [36]</td><td>TECO [36]</td></tr></table>",
|
| 1700 |
+
"bbox": [
|
| 1701 |
+
78,
|
| 1702 |
+
335,
|
| 1703 |
+
921,
|
| 1704 |
+
392
|
| 1705 |
+
],
|
| 1706 |
+
"page_idx": 10
|
| 1707 |
+
},
|
| 1708 |
+
{
|
| 1709 |
+
"type": "image",
|
| 1710 |
+
"img_path": "images/d369111b810ef7f5f05b2934760e3c72c6aa53d3bec7ec3fe33e06ba77c7bc58.jpg",
|
| 1711 |
+
"image_caption": [
|
| 1712 |
+
"Fig. 12: Qualitative Comparison of Long-Context Video Prediction on DMLab. FAR fully utilizes the long-range context (144 frames), resulting in more consistent prediction (156 frames) compared to previous methods."
|
| 1713 |
+
],
|
| 1714 |
+
"image_footnote": [],
|
| 1715 |
+
"bbox": [
|
| 1716 |
+
76,
|
| 1717 |
+
500,
|
| 1718 |
+
906,
|
| 1719 |
+
858
|
| 1720 |
+
],
|
| 1721 |
+
"page_idx": 10
|
| 1722 |
+
},
|
| 1723 |
+
{
|
| 1724 |
+
"type": "page_number",
|
| 1725 |
+
"text": "11",
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
906,
|
| 1728 |
+
32,
|
| 1729 |
+
919,
|
| 1730 |
+
42
|
| 1731 |
+
],
|
| 1732 |
+
"page_idx": 10
|
| 1733 |
+
},
|
| 1734 |
+
{
|
| 1735 |
+
"type": "image",
|
| 1736 |
+
"img_path": "images/717dc2d3ea15effc0c478e41741da6baf6380d6c84b17a74041785f64ac32d87.jpg",
|
| 1737 |
+
"image_caption": [
|
| 1738 |
+
"Fig. 13: Qualitative Comparison of Long-Context Video Prediction on Minecraft. FAR fully utilizes the long-range context (144 frames), resulting in more consistent prediction (156 frames) compared to previous methods."
|
| 1739 |
+
],
|
| 1740 |
+
"image_footnote": [],
|
| 1741 |
+
"bbox": [
|
| 1742 |
+
81,
|
| 1743 |
+
297,
|
| 1744 |
+
921,
|
| 1745 |
+
661
|
| 1746 |
+
],
|
| 1747 |
+
"page_idx": 11
|
| 1748 |
+
},
|
| 1749 |
+
{
|
| 1750 |
+
"type": "page_number",
|
| 1751 |
+
"text": "12",
|
| 1752 |
+
"bbox": [
|
| 1753 |
+
906,
|
| 1754 |
+
32,
|
| 1755 |
+
921,
|
| 1756 |
+
42
|
| 1757 |
+
],
|
| 1758 |
+
"page_idx": 11
|
| 1759 |
+
}
|
| 1760 |
+
]
|