SlowGuess commited on
Commit
7a20029
·
verified ·
1 Parent(s): 9c65bed

Add Batch 99d7ff8a-0e5e-417f-a29a-38b6f9a940d0

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_content_list.json +0 -0
  3. 2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_model.json +0 -0
  4. 2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_origin.pdf +3 -0
  5. 2401.02xxx/2401.02418/full.md +453 -0
  6. 2401.02xxx/2401.02418/images.zip +3 -0
  7. 2401.02xxx/2401.02418/layout.json +0 -0
  8. 2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_content_list.json +1342 -0
  9. 2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_model.json +0 -0
  10. 2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_origin.pdf +3 -0
  11. 2401.02xxx/2401.02500/full.md +347 -0
  12. 2401.02xxx/2401.02500/images.zip +3 -0
  13. 2401.02xxx/2401.02500/layout.json +0 -0
  14. 2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_content_list.json +0 -0
  15. 2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_model.json +0 -0
  16. 2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_origin.pdf +3 -0
  17. 2401.02xxx/2401.02524/full.md +0 -0
  18. 2401.02xxx/2401.02524/images.zip +3 -0
  19. 2401.02xxx/2401.02524/layout.json +0 -0
  20. 2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_content_list.json +1406 -0
  21. 2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_model.json +1894 -0
  22. 2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_origin.pdf +3 -0
  23. 2401.02xxx/2401.02582/full.md +274 -0
  24. 2401.02xxx/2401.02582/images.zip +3 -0
  25. 2401.02xxx/2401.02582/layout.json +0 -0
  26. 2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_content_list.json +0 -0
  27. 2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_model.json +0 -0
  28. 2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_origin.pdf +3 -0
  29. 2401.02xxx/2401.02634/full.md +518 -0
  30. 2401.02xxx/2401.02634/images.zip +3 -0
  31. 2401.02xxx/2401.02634/layout.json +0 -0
  32. 2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_content_list.json +0 -0
  33. 2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_model.json +0 -0
  34. 2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_origin.pdf +3 -0
  35. 2401.02xxx/2401.02644/full.md +569 -0
  36. 2401.02xxx/2401.02644/images.zip +3 -0
  37. 2401.02xxx/2401.02644/layout.json +0 -0
  38. 2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_content_list.json +0 -0
  39. 2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_model.json +0 -0
  40. 2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_origin.pdf +3 -0
  41. 2401.02xxx/2401.02669/full.md +539 -0
  42. 2401.02xxx/2401.02669/images.zip +3 -0
  43. 2401.02xxx/2401.02669/layout.json +0 -0
  44. 2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_content_list.json +1692 -0
  45. 2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_model.json +2110 -0
  46. 2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_origin.pdf +3 -0
  47. 2401.02xxx/2401.02677/full.md +278 -0
  48. 2401.02xxx/2401.02677/images.zip +3 -0
  49. 2401.02xxx/2401.02677/layout.json +0 -0
  50. 2401.02xxx/2401.02695/0b4460a7-9d5c-4b40-9964-41bc630a0668_content_list.json +0 -0
.gitattributes CHANGED
@@ -10774,3 +10774,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10774
  2401.06xxx/2401.06800/d5c87e05-dbed-4f33-ae02-a4e5ad7e786f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10775
  2401.08xxx/2401.08671/2e5c320c-031d-4931-b5b1-a628a4a2fbc6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10776
  2402.00xxx/2402.00037/2f104688-c113-4bb9-8c9b-d751ad18cf0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10774
  2401.06xxx/2401.06800/d5c87e05-dbed-4f33-ae02-a4e5ad7e786f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10775
  2401.08xxx/2401.08671/2e5c320c-031d-4931-b5b1-a628a4a2fbc6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10776
  2402.00xxx/2402.00037/2f104688-c113-4bb9-8c9b-d751ad18cf0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10777
+ 2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10778
+ 2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10779
+ 2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10780
+ 2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10781
+ 2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_origin.pdf filter=lfs diff=lfs merge=lfs -text
10782
+ 2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_origin.pdf filter=lfs diff=lfs merge=lfs -text
10783
+ 2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10784
+ 2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10785
+ 2401.02xxx/2401.02695/0b4460a7-9d5c-4b40-9964-41bc630a0668_origin.pdf filter=lfs diff=lfs merge=lfs -text
10786
+ 2401.02xxx/2401.02702/477b4a20-ab56-4a69-b11b-e928c3963d2b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10787
+ 2401.02xxx/2401.02772/b06e6987-7fab-496a-91a6-f80d8a92d042_origin.pdf filter=lfs diff=lfs merge=lfs -text
10788
+ 2401.02xxx/2401.02777/ad990f52-01ee-4b30-9113-d76a1a9d3503_origin.pdf filter=lfs diff=lfs merge=lfs -text
10789
+ 2401.02xxx/2401.02789/39bde678-756f-48e9-8501-18c8dad7ce75_origin.pdf filter=lfs diff=lfs merge=lfs -text
10790
+ 2401.02xxx/2401.02810/24e4fb24-1e09-40f8-9df2-b6c891ed57b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10791
+ 2401.02xxx/2401.02843/ad3260ba-b84e-4853-a53c-f698a206f7c8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10792
+ 2401.02xxx/2401.02844/5ec12ab4-6e25-463c-9810-eae9f5b5144c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10793
+ 2401.02xxx/2401.02861/7632717b-6b8b-4dba-ad53-f561555eb5db_origin.pdf filter=lfs diff=lfs merge=lfs -text
10794
+ 2401.02xxx/2401.02906/3c5f721b-5b54-441e-9fc6-600622d99192_origin.pdf filter=lfs diff=lfs merge=lfs -text
10795
+ 2401.02xxx/2401.02913/d62fd1e8-4ff5-46da-9129-5fe340877392_origin.pdf filter=lfs diff=lfs merge=lfs -text
10796
+ 2401.02xxx/2401.02954/50677117-a1bd-4d06-9926-9ae377e477dd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10797
+ 2401.02xxx/2401.02955/b30a3aea-bd80-4bd0-811a-f5ce9564729f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10798
+ 2401.02xxx/2401.02957/56e85510-e77f-4cda-95f5-e2c81b1399f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10799
+ 2401.03xxx/2401.03003/a969dc0e-9981-4530-9fb4-5886ba6ee5b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10800
+ 2401.03xxx/2401.03048/c9e470ea-106d-4348-9ba2-bc811355d324_origin.pdf filter=lfs diff=lfs merge=lfs -text
10801
+ 2401.03xxx/2401.03065/167ff25b-9bba-49ea-a88c-ec4321443933_origin.pdf filter=lfs diff=lfs merge=lfs -text
10802
+ 2401.03xxx/2401.03078/16167558-2e89-4fea-9215-f5f063e8d416_origin.pdf filter=lfs diff=lfs merge=lfs -text
10803
+ 2401.03xxx/2401.03082/c64909c5-fc3c-472a-96ef-b898523701ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10804
+ 2401.03xxx/2401.03142/d190c210-d1db-483f-ac8b-05e3ef25b237_origin.pdf filter=lfs diff=lfs merge=lfs -text
10805
+ 2401.03xxx/2401.03160/914daf7a-6836-466c-b2d9-725e79f23c8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10806
+ 2401.03xxx/2401.03179/4ff9c099-4b1c-4d00-8960-84c761c703be_origin.pdf filter=lfs diff=lfs merge=lfs -text
10807
+ 2401.03xxx/2401.03201/1472d40a-5c27-4380-bd45-b9c5267c259a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10808
+ 2401.03xxx/2401.03205/9673c892-96ff-4d7a-9f1e-34270d10522f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10809
+ 2401.03xxx/2401.03217/2174aa2e-690c-44ce-b170-b118cb84c4cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10810
+ 2401.03xxx/2401.03230/bd610ae5-1eaf-4381-91ad-b11343c9317c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10811
+ 2401.03xxx/2401.03286/38a095c2-b9de-4ad5-bf2d-69a43bd6ae08_origin.pdf filter=lfs diff=lfs merge=lfs -text
10812
+ 2401.03xxx/2401.03315/ed62fede-8a39-47c0-8b6d-e9278cadfd04_origin.pdf filter=lfs diff=lfs merge=lfs -text
10813
+ 2401.03xxx/2401.03374/61b752ef-5922-406f-81ff-bbd00841453d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10814
+ 2401.03xxx/2401.03379/6e23ea50-6594-4e48-ab94-9656d58848ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10815
+ 2401.03xxx/2401.03395/cebcd8a2-f5e9-4bee-99c1-99cee35b5823_origin.pdf filter=lfs diff=lfs merge=lfs -text
10816
+ 2401.03xxx/2401.03407/e9452424-9ebb-4bc3-9a14-f214e780b38b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10817
+ 2401.03xxx/2401.03408/a27ec618-c493-4d97-992a-c57c47132ff9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10818
+ 2401.03xxx/2401.03428/558f4238-f348-4587-9afe-70156c728683_origin.pdf filter=lfs diff=lfs merge=lfs -text
10819
+ 2401.03xxx/2401.03429/1c0582f3-46f3-403a-90c8-b2e75ca70892_origin.pdf filter=lfs diff=lfs merge=lfs -text
10820
+ 2401.03xxx/2401.03462/94edaf15-180c-4260-b6e8-6aeaa06326ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10821
+ 2401.03xxx/2401.03482/4f082384-c7c1-46ed-b326-5cd5987e4223_origin.pdf filter=lfs diff=lfs merge=lfs -text
10822
+ 2401.03xxx/2401.03495/1d7d887f-4123-4274-bcc6-59b9449fd225_origin.pdf filter=lfs diff=lfs merge=lfs -text
10823
+ 2401.03xxx/2401.03497/02f88790-f151-47c4-b2fa-af3d1a463bc5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10824
+ 2401.03xxx/2401.03530/237eb6e3-3fa4-476a-889d-43b216d2402e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10825
+ 2401.03xxx/2401.03552/ae1a3eb5-c3fd-4786-bab3-9a12fba05e4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10826
+ 2401.03xxx/2401.03568/2cf32d6a-ef86-4fac-9f53-1fc9ad91466e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10827
+ 2401.03xxx/2401.03582/4b15f127-e90d-4ab0-98e6-f906ef65a3a3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10828
+ 2401.03xxx/2401.03601/2fc0fef2-dec8-44a0-8a79-a1f080d1de7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10829
+ 2401.03xxx/2401.03641/c57c0d05-4cad-4f2c-9ee5-1bb1286fd3c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10830
+ 2401.03xxx/2401.03676/beb4610d-2b56-49d8-9baf-2d915fba8efd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10831
+ 2401.03xxx/2401.03703/88b8a83a-f2b6-42d5-9f97-7b6001d34260_origin.pdf filter=lfs diff=lfs merge=lfs -text
10832
+ 2401.03xxx/2401.03717/af5e158b-febe-4f9c-a915-aa34c6a233e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10833
+ 2401.03xxx/2401.03726/4f36e4ec-6267-43ed-9495-7ed38b12ea64_origin.pdf filter=lfs diff=lfs merge=lfs -text
10834
+ 2401.03xxx/2401.03729/17421137-057c-48b9-b4eb-c2a737b3f651_origin.pdf filter=lfs diff=lfs merge=lfs -text
10835
+ 2401.04xxx/2401.04136/1447012c-0c13-4f37-bf93-97987e0b439d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10836
+ 2401.06xxx/2401.06782/4a1cad1f-282f-4565-8e4e-d759d9912349_origin.pdf filter=lfs diff=lfs merge=lfs -text
10837
+ 2401.06xxx/2401.06785/13ea74c3-bddf-4f95-81c8-ae240d94304a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10838
+ 2401.08xxx/2401.08667/1fbd6031-9048-4614-ae29-2eba52cd4bb5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10839
+ 2401.15xxx/2401.15081/85d8f556-1346-4260-ad67-7b29e5dafd96_origin.pdf filter=lfs diff=lfs merge=lfs -text
10840
+ 2402.00xxx/2402.00024/628c1feb-9ec2-466c-9ea8-51879890441d_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02418/680421ce-2946-4afa-a8d6-d72e840bf7f8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453917759568411eb7f98798e840ebd9f73be3238774cd92fdfb0342cae3e52a
3
+ size 3202397
2401.02xxx/2401.02418/full.md ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Learning to Prompt with Text Only Supervision for Vision-Language Models
2
+
3
+ Muhammad Uzair Khattak<sup>1</sup> Muhammad Ferjad Naeem<sup>2</sup> Muzammal Naseer<sup>1</sup> Luc Van Gool<sup>2</sup> Federico Tombari<sup>3,4</sup>
4
+
5
+ $^{1}$ Mohamed bin Zayed University of AI $^{2}$ ETH Zurich $^{3}$ TU Munich $^{4}$ Google
6
+
7
+ # Abstract
8
+
9
+ Foundational vision-language models such as CLIP are becoming a new paradigm in vision, due to their excellent generalization abilities. However, adapting these models for downstream tasks while maintaining their generalization remains a challenge. In literature, one branch of methods adapts CLIP by learning prompts using visual information. While effective, most of these works require labeled data which is not practical, and often struggle to generalize towards new datasets due to over-fitting on the source data. An alternative approach resorts to training-free methods by generating class descriptions from large language models (LLMs) and perform prompt ensembling. However, these methods often generate class specific prompts that cannot be transferred to other classes, which incur higher costs by generating LLM descriptions for each class separately. In this work, we propose to combine the strengths of these both streams of methods by learning prompts using only text data derived from LLMs. As supervised training of prompts is not trivial due to absence of images, we develop a training approach that allows prompts to extract rich contextual knowledge from LLM data. Moreover, with LLM contextual data mapped within the learned prompts, it enables zero-shot transfer of prompts to new classes and datasets potentially cutting the LLM prompt engineering cost. To the best of our knowledge, this is the first work that learns generalized prompts using text only data. We perform extensive evaluations on 4 benchmarks where our method improves over prior ensembling works while being competitive to those utilizing labeled images. Our code and pre-trained models are available at https://github.com/muzairkhattak/ProText.
10
+
11
+ # 1. Introduction
12
+
13
+ The Vision field is experiencing a new paradigm in its model-building approach with the emergence of foundational models [18, 23, 37, 47], which are large DNNs pretrained on web-scale data. Among these, Vision-Language models (VLMs) such as CLIP [37] stand out as the latest
14
+
15
+ <table><tr><td></td><td>Method</td><td>Do not require images</td><td>Transfer to unseen datasets</td></tr><tr><td rowspan="4">Prompt learning methods</td><td>CoOp [50]</td><td>×</td><td>✓</td></tr><tr><td>CoCoOp [49]</td><td>×</td><td>✓</td></tr><tr><td>MaPLe [20]</td><td>×</td><td>✓</td></tr><tr><td>PromptSRC [21]</td><td>×</td><td>✓</td></tr><tr><td rowspan="3">Prompt ensembling methods (LLM)</td><td>DCLIP [29]</td><td>✓</td><td>×</td></tr><tr><td>WaffleCLIP-Concept [39]</td><td>✓</td><td>×</td></tr><tr><td>CuPL [36]</td><td>✓</td><td>×</td></tr><tr><td></td><td>ProText (Ours)</td><td>✓</td><td>✓</td></tr></table>
16
+
17
+ Table 1. Existing methods improve CLIP's generalization by learning prompts with image supervision or using non-transferable prompt ensembling with LLM knowledge. In contrast, our approach, ProText, effectively learns prompts with text-only supervision which are transferable to new datasets and classes.
18
+
19
+ highlights which leverage contrastive pre-training on massive image-text pairs from the internet. During pre-training, CLIP learns to align image-text samples in a shared feature space. This allows CLIP to encode open-vocabulary concepts and generalize well to zero-shot recognition tasks.
20
+
21
+ CLIP consists of two encoders to encode image and text inputs respectively. At inference, a hand-crafted prompt such as 'a photo of a CLS' is used as the text input. Text features of classes are compared with visual feature and class with highest similarity is assigned as predicted label. Improving the quality of text templates such as adding attributes [1], or class-specific details [19, 36] has shown to improve CLIP performance. However, designing high-quality prompts that can best describe test image remains a key challenge, as image content is not known in advance.
22
+
23
+ In literature, numerous techniques have been proposed to adapt CLIP for downstream recognition tasks. One branch of methods [6, 17, 27, 41, 49, 50] treat text prompts as learnable vectors and optimize them using task-specific objectives such as cross-entropy. As prompts are learned in the embedding space, this allows them to be used with classes and datasets beyond those on which they were trained on. While effective over the baseline CLIP, most of these methods require annotated image labels to optimize the prompts which is often impractical, especially in real-world scenarios such as medical imaging, remote sensing, security,
24
+
25
+ ![](images/353fb85488dae31556978cf2884998783eedeeccb509804809c93804a8355d4f.jpg)
26
+ Figure 1. Without using any images for supervision, ProText with text-only training improves over CLIP, CuPL, and prior 16-shot image-supervised methods in challenging cross-dataset transfer settings. Prompt ensembling based CuPL performs same as CLIP as it cannot transfer class specific LLM templates to cross-datasets.
27
+
28
+ surveillance, etc. Moreover, these methods tend to overfit on few-shot source samples and struggle to retain CLIP's generalization, especially in cross-dataset settings.
29
+
30
+ Alternatively, several methods [29, 36] have adopted the training-free approach of prompt ensembling by leveraging the capabilities of Large Language Models (LLMs). Instead of using hand-crafted templates, these methods mine dataset or class specific descriptors and captions from LLMs to enrich text features. These enriched features aim to better represent content that could possibly occur in test images, leading to improvements over baseline CLIP. Although these methods do not require image information, the knowledge acquired from LLMs is mostly specific to each class and not directly transferable across unseen classes and datasets since no optimization is performed. Additionally, generating LLM descriptions for each concept separately incurs additional LLM serving and prompt engineering costs.
31
+
32
+ In this work, we present a new paradigm to improve CLIP's generalization. Our motivation comes from combining the strengths of prompt learning and prompt ensembling approaches while effectively addressing their limitations. To this end, we introduce ProText: Prompt Learning with Text-Only Supervision. In contrast to previous methods, our approach instead proposes to learn prompts using text only data obtained from LLMs. As supervised training of prompts is not trivial due to image-free setting, we develop a novel training framework that allows prompts to learn and extract rich contextual knowledge from LLM data. Moreover, as LLM contextual knowledge is mapped within the learned prompts, it enables zero-shot transfer of prompts to new classes and datasets, potentially leading to a substantial reduction in LLM serving and prompt engineering cost.
33
+
34
+ As shown in Tab. 1, our approach is different from prior methods as it does not require image samples to learn prompts, in addition the adapted CLIP transfers well to unseen classes and datasets, therefore addressing a key limitation of LLM-based prompt ensembling techniques. We demonstrate the effectiveness of ProText by performing extensive evaluations on 4 benchmarks. On challenging cross
35
+
36
+ dataset transfer setting, ProText without using any visual information achieves an average gain of $+2.08\%$ over CLIP while surpassing the performance of previous best image-supervised prompt learning method MaPLe [20] by $+0.93\%$ (Fig. 1). Further, ProText with text-only supervision performs competitively against prior methods in domain generalization, base-to-novel class, and text-only supervised setting. Our main contributions are summarized as follows:
37
+
38
+ - We present a new approach for prompt learning in CLIP using text-only supervision. Our method harmonically combines the strengths of prompt learning and prompt assembling methods to improve CLIP's generalization.
39
+ - To optimize prompts with text-only data, we develop a training approach that allows prompts to learn a mapping by extracting rich contextual information from LLM data.
40
+ - As LLM contextual knowledge is mapped within the learned prompts, this enables prompts to be directly used with new classes and datasets potentially cutting the additional LLM serving and prompt engineering cost.
41
+ - We validate the effectiveness of our method through extensive experiments across four benchmarks. Our TextPro approach improves the generalization of CLIP across various settings and fares competitive to approaches that explicitly use labeled image samples for training.
42
+
43
+ # 2. Related Work
44
+
45
+ Foundational Vision-Language models (VLMs). VLMs [18, 33, 37, 46-48] leverage joint image-text pretraining using internet-scale data in a self-supervised fashion. Representative VLMs like CLIP [37] and ALIGN [18] have utilized around 400M and 1B image-text pairs during their pre-training. Using the contrastive learning objective, VLMs learn rich multi-modal features by attracting together the features of paired images and texts while repelling un-paired image-text features in a joint feature space. The resulting model learns open-vocabulary concepts interpretable through natural language suitable for various downstream discriminative vision tasks such as open-vocabulary image classification [6, 20, 27, 31, 32, 50], detection [3, 10, 26, 30, 51], and segmentation [13, 24, 25]. Although promising, adapting VLMs effectively while maintaining their original generalization remains a crucial challenge. In this work, we propose a novel method to adapt CLIP with prompt learning through text modality supervision to improve its performance on vision modality tasks.
46
+
47
+ Prompt Learning for VLMs. Prompt Learning [6, 9, 27, 40, 41, 49, 50] has emerged as an effective fine-tuning strategy to adapt large-scale models. This approach adds a small number of learnable embeddings along with model inputs which are optimized during training while the rest of the model is kept frozen. As the pre-trained model is unchanged during prompt learning, it has become particularly effective for VLMs such as CLIP, where maintaining the model's
48
+
49
+ original generalizability is crucial. CoOp [50] is the pioneering prompt learning method for CLIP which learns text prompt embeddings to fine-tune CLIP. CoCoOp [49] improves CoOp's generalization by conditioning text prompts on visual features. MaPLe [20] proposes a multi-modal prompting framework to adapt both vision and language branches of CLIP. UPL [17] adopts an unsupervised prompt learning approach to finetune CLIP. PromptSRC [21] improves prompt learning from a regularization perspective by making use of additional loss functions during training. While these methods improve baseline CLIP performance, most of them require image samples with labels, which is less practical, and generating pseudo-labels is often less effective. In contrast, we present a novel prompt learning approach that improves CLIP generalization without relying on any visual samples during training.
50
+
51
+ Training-Free Text Prompt Enhancement. With the emergence of LLMs such as GPT-3 [5], several approaches [29, 36, 39] have demonstrated their potential for improving zero-shot generalization of CLIP. Instead of using handcrafted templates for generating class features, these methods leverage LLMs to generate high-level concepts, class descriptions, and/or attributes which are used in one form or another to produce enriched text features. DCLIP [29] generates fine-grained per-class language descriptors and ensemble its similarity with image to produce classification scores. WaffleCLIP [39] matches DCLIP performance with random descriptors and show further gains by data-specific concepts generated via LLMs. CuPL [36] query LLMs to generate class-specific prompt descriptions for text prompt ensembling. Although effective, most of these approaches generate class-specific text data from LLMs which are not directly transferable to unseen classes and new datasets since no training is performed. On the other hand, we aim to leverage the same LLM data via novel text-only prompt learning technique which seamlessly allows the transfer of learned prompts toward unseen classes and new datasets.
52
+
53
+ # 3. Method
54
+
55
+ Given the language interpretable nature of foundational VLMs such as CLIP [37], they are naturally suited for zero-shot recognition tasks. However, to achieve full potential of CLIP's generalization for downstream tasks, adaptation still appears to be necessary. Numerous approaches have since been proposed to adapt general knowledge of CLIP for user-specific downstream tasks. One line of methods adopts prompt learning [20, 27, 49, 50] to re-purpose CLIP features for downstream data. While effective, most of them require image samples with labels to learn the prompts, which is a hard requirement to meet. Another line of methods adopts training-free prompt ensembling techniques [29, 36, 39] with the help of LLMs. Although ensembling-based approaches do not require image information, the majority of
56
+
57
+ these works generate class-specific LLM prompts that are not directly transferable to new classes and datasets.
58
+
59
+ To this end, we present a new paradigm for learning generalized transferable prompts for VLMs using text-only supervision. Our proposed adaptation framework, ProText: Prompt Learning with Text only supervision aims to address the challenges of existing approaches by learning transferable prompts without relying on images. Fig. 2 shows our ProText framework. First, we curate text-only LLM template data using class names of a given dataset and a LLM such as GPT-3 [5]. As a text-supervised approach, ProText only requires CLIP text encoders during training. Specifically, we employ one frozen encoder with learnable prompts and a second frozen encoder without learnable prompts. Learnable prompts with class-name templates are input to the prompted text encoder to obtain the class-name template feature, and a frozen text encoder generates LLM template feature from its description obtained from LLM data. Next, we employ a contextual mapping training objective which maps class-name template feature to the LLM template feature. Contextual mapping allows the prompts to learn a mapping function that embeds rich contextual knowledge from LLM data within the prompt vectors. As prompts are learned in the embedding space, they are directly compatible with new classes and datasets. At inference, the learned prompts are shipped with CLIP model for standard zero-shot CLIP inference for visual recognition.
60
+
61
+ Below we explain our proposed approach in detail. We first revisit CLIP and previous methods including Prompt Learning and Prompt Ensembling via LLMs in Sec. 3.1 and then we present our ProText approach in Sec. 3.2.
62
+
63
+ # 3.1. Preliminaries
64
+
65
+ Contrastive Language-Image Pre-training (CLIP). CLIP consist of an image encoder $f$ and a text encoder $g$ which maps image and text input into visual and textual feature respectively. We denote CLIP parameters as $\theta_{\mathrm{CLIP}} = \{\theta_f, \theta_g\}$ where $\theta_f$ and $\theta_g$ refer to the image and text encoder parameters, respectively. Input image $X$ is divided into $M$ patches which are linearly projected to produce patch tokens and a learnable class token CLS is pretended resulting in the final sequence as $\tilde{X} = \{\mathrm{CLS}, e_1, e_2, \dots, e_M\}$ . The image encoder $f$ encodes the input patches via multiple transformer blocks to produce a latent visual feature representation $\tilde{f} = f(\tilde{X}, \theta_f)$ , where $\tilde{f} \in \mathbb{R}^d$ . Next, the corresponding class label $y$ is embedded in a text template, such as 'a photo of a [CLASS]' which can be formulated as $\tilde{Y} = \{\mathrm{SOS}, t_1, t_2, \dots, t_L, c_k, \mathrm{EOS}\}$ . Here $\{t_l|_{l=1}^L\}$ and $c_k$ are the word embeddings corresponding to the text template and the label $y$ , respectively while SOS and EOS are the learnable start and end token embeddings. The text encoder $g$ encodes $\tilde{Y}$ via multiple transformer blocks to produce the latent text feature as $\tilde{g} = g(\tilde{Y}, \theta_g)$ , where $\tilde{g} \in \mathbb{R}^d$ .
66
+
67
+ ![](images/8c4b797776646437ed49d229748e75f2300bb94991003b382ec3b70e54c9cbaa.jpg)
68
+ Figure 2. Overview of ProText framework. (Left) First, diverse captions are generated for training classes using LLM like GPT-3. During training, CLIP text encoders generate prompted class-name feature $(\tilde{g}_p)$ from class-name templates with learnable prompts and frozen LLM template feature $(\tilde{g})$ from LLM generated templates. Next, we employ contextual mapping loss to guide learnable prompts to learn a mapping from the prompted class-name feature to the LLM template feature containing more information about the class. This allows the learned prompts to exploit internal knowledge of text encoder complemented by LLM descriptions. (Right) At inference, learned prompts are used with class-name templates, and the standard zero-shot CLIP inference protocol is followed. Moreover, rich contextual information from LLM descriptions mapped within the learned prompts enables its transferability to new classes and datasets.
69
+
70
+ ![](images/8aa63a96b4dfc68a722a921e08f67407b230a95a8024094710d52506653e18e6.jpg)
71
+
72
+ For zero-shot inference, text features of text template with class labels $\{1,2,\dots ,C\}$ are matched with image feature $\tilde{f}$ as $\frac{\exp(\sin(\tilde{g}\cdot\tilde{f})\tau)}{\sum_{i = 1}^{C}\exp(\sin(\tilde{g}_i\cdot\tilde{f})\tau)}$ where $\operatorname {sim}()$ denotes the cosine similarity and $\tau$ is the temperature.
73
+
74
+ Prompt Learning with CLIP. Being a parameter efficient tuning method, prompt learning has emerged as a popular technique to adapt vision-language models like CLIP. Since most of the model is kept frozen during adaptation, prompt learning aims to reduce overfitting. Learnable prompts are appended either at the image side [2], text encoder side [49, 50], or both sides. In this work, we learn hierarchical prompts at the text encoder named Deep Language Prompting (DLP) [20] formulated as follows.
75
+
76
+ $T$ learnable language prompts $\pmb{P}_{t} = \{\pmb{p}_{t}^{1},\pmb{p}_{t}^{2},\dots ,\pmb{p}_{t}^{T}\}$ are appended with text input tokens, resulting in $\tilde{\pmb{Y}}_p = \{\mathrm{SOS},\pmb {P}_t,\pmb {t}_1,\pmb {t}_2,\dots ,\pmb {t}_L,\pmb {c}_k,\mathrm{EOS}\}$ . The text encoder processes $\tilde{\mathbf{Y}}_p$ and prompted text feature is obtained as $\tilde{g}_p = g(\tilde{\mathbf{Y}}_p,\theta_g)$ . We use deep prompting which learns hierarchical prompts at subsequent transformer blocks of text encoder. Visual feature $\tilde{f}$ is obtained without utilizing learnable prompts. To adapt CLIP on image classification task on dataset $\mathcal{D}$ , prompts $\pmb{P}_{t}$ are optimized in a supervised fashion using labeled image samples with cross-entropy loss, $\mathcal{L}_{\mathrm{CE}}$ .
77
+
78
+ $$
79
+ \mathcal {L} _ {\mathrm {C E}} = \arg \min _ {\boldsymbol {P} _ {t}} \mathbb {E} _ {(\boldsymbol {X}, y) \sim \mathcal {D}} \mathcal {L} (\operatorname {s i m} (\tilde {\boldsymbol {f}}, \tilde {\boldsymbol {g}} _ {\boldsymbol {p}}), y). \tag {1}
80
+ $$
81
+
82
+ Prompt Ensembling with LLM descriptions. Several methods have recently proposed to adapt CLIP via training-free prompt ensembling techniques. The majority of these approaches leverage the capabilities of LLMs to mine rich descriptions, attributes, or high-level concepts of class names. The corresponding text features are either averaged
83
+
84
+ [36] or the similarity score of each attribute with the image is calculated to obtain classification scores [39] [29].
85
+
86
+ In this work, we focus our comparison with a strong ensembling baseline CuPL [36]. Specifically, a Large Language Model $\mathcal{F}$ such as GPT-3 [5] is used to generate class-specific descriptions for class labels $\{1,2,\dots ,C\}$ using queries such as 'How does a CLASS look like'. Text features of the same class description are averaged together, which serves as the ensembled text features. Finally, zero-shot inference is performed with those ensembled text features.
87
+
88
+ # 3.2. Prompt Learning with Text-Only Supervision
89
+
90
+ While image-supervised prompt learning and LLM-based prompt ensembling methods have proven effective in adapting CLIP, they face notable challenges as outlined below.
91
+
92
+ Visual data dependency. Existing prompt learning methods require visual samples with labels to optimize prompts using Eq. 1. However, collecting samples and labels is difficult in critical scenarios like medical images, remote sensing, and surveillance. Pseudo-labels alleviate label dependency but they are often less effective. Furthermore, these methods tend to overfit CLIP to source data distributions and compromise generalization across cross-datasets. For instance, CoOp utilizing labeled source samples reduces average CLIP performance by $1.27\%$ on 10 cross-datasets.
93
+
94
+ LLM Prompts transferability limitation. LLM-based prompt ensembling approaches like CuPL [36] generate class-specific LLM descriptions that cannot be directly transferred to unseen classes and datasets. While open-source LLMs exhibit lower performance, proprietary ones such as GPT-3 are required for generating data for new classes and datasets leading to additional serving costs.
95
+
96
+ Our work aims to address the aforementioned limitations within a unified framework. Below we detail our strategy for curating text-to-text data via LLMs for training, followed by our text-only prompt learning framework.
97
+
98
+ # 3.2.1 Text-Only LLM data for Prompt Learning
99
+
100
+ As discussed in Sec. 3.1, optimizing prompts for downstream datasets typically requires image-labels pairs. Since we explicitly aim to bypass this requirement, we first leverage LLMs to curate text data for prompt learning which consists of text inputs and text outputs. Given a set of classes $\{c_i\}_{i=1}^C$ , we prepare text inputs $\{L_{\mathrm{inputs}}^i\}_{i=1}^C$ by wrapping each class name in a standard hand-written text template,
101
+
102
+ $$
103
+ L _ {\text {i n p u t s}} ^ {i} = \text {‘ a p h o t o o f a c} _ {i} ^ {\prime}.
104
+ $$
105
+
106
+ Next, we prepare text outputs corresponding to the $L_{\mathrm{inputs}}$ . Specifically, we query GPT-3 model to generate detailed descriptions for each class name $c_i$ . Similar to CuPL [36], we prompt GPT-3 with different queries $Q$ conditioned on class names such as 'How does a $c_i$ look like?' and 'How can you identify a $c_i$ ?' to obtain text outputs,
107
+
108
+ $$
109
+ L _ {\mathrm {o u t p u t s}} ^ {i} = \mathcal {F} (Q | c _ {i}).
110
+ $$
111
+
112
+ Similar to [36], we generate $M$ text outputs per query $Q$ and use $N$ different queries, resulting in $M \times N$ text outputs per class category. We associate all $L_{\mathrm{outputs}}$ with the corresponding single $L_{\mathrm{inputs}}$ for each class $c_i$ . As LLMs are pre-trained on internet-scale text corpora, they possess the capability of generating very diverse and high-quality descriptions and captions for different class categories which results in high-quality text outputs. Finally we combine $L_{\mathrm{inputs}}$ and $L_{\mathrm{outputs}}$ to create LLM based text-to-text data for text only prompt learning, $\mathcal{D}_{\mathrm{PROMPT}} = \{L_{\mathrm{inputs}}^i, L_{\mathrm{outputs}}^i\}_{i=1}^{M \times N \times C}$ . We refer the readers to supplementary for additional details on the choice of LLM prompts and examples of $\mathcal{D}_{\mathrm{PROMPT}}$ .
113
+
114
+ # 3.2.2 Contextual mapping with Prompt Learning
115
+
116
+ To leverage LLM text-to-text data $\mathcal{D}_{\text{PROMPT}}$ for learning generalized transferable prompts, we propose a contextual mapping strategy that effectively learns a mapping function that maps standard class name templates such as 'a photo of a $c_i$ ' to the text feature generated from a LLM description which contains more information about the class $c_i$ . In other words, contextual mapping allows learnable prompts to map $L_{\text{inputs}}$ to $L_{\text{outputs}}$ in the text feature space of CLIP. The mapping function is realized in the form of learnable prompt vectors, which we found to be more effective in our ablations as compared to other techniques such as adapters via linear projection and MLP.
117
+
118
+ For an $i_{\mathrm{th}}$ training sample from $\mathcal{D}_{\mathrm{PROMPT}}$ consisting of a text-to-text pair $\{L_{\mathrm{inputs}}, L_{\mathrm{outputs}}\}_i$ , we obtain prompted class-name feature $\tilde{\pmb{g}}_p$ for $L_{\mathrm{inputs}}^i$ using learnable prompts and frozen LLM feature $\tilde{\pmb{g}}$ for $L_{\mathrm{outpu s}}^i$ without the prompt vectors within the pre-trained latent space of CLIP. We then impose a contextual mapping constraint between $\tilde{\pmb{g}}_p$ and $\tilde{\pmb{g}}$ text features as follows,
119
+
120
+ $$
121
+ \mathcal {L} _ {\text {m a p p i n g}} = \frac {1}{d} \sum_ {i = 1} ^ {d} | | \tilde {\boldsymbol {g}} _ {\boldsymbol {p}} - \tilde {\boldsymbol {g}} | | _ {2} ^ {2}. \tag {2}
122
+ $$
123
+
124
+ As shown above, we utilize MSE loss objective to enforce contextual mapping from $L_{\text{inputs}}^i$ to $L_{\text{outputs}}^i$ . We study other choices of consistency objectives in our ablations (Sec. 4.7). Motivation for $\mathcal{L}_{\text{mapping}}$ . Contextual mapping objective allows learnable prompts to exploit internal knowledge of text encoder of CLIP to generate rich contextual features aligned with the LLM descriptions ( $L_{\text{outputs}}^i$ ) for a given class. This strategy effectively learns prompts without using any visual information and when trained using all training classes together, it enables prompts to capture versatile and generalized context from the LLM descriptions. These context-aware prompts become adaptable for use with any dataset and effectively enable the transferability of class-specific LLM descriptions to unseen classes and datasets. Consequently, this substantially reduces the per-dataset overhead associated with LLM serving and prompt engineering.
125
+
126
+ Inference. Once text prompt vectors are optimized through our TextPro framework in the text domain, they become ready to be shipped with CLIP for downstream visual domain inference with a standard zero-shot CLIP inference setup. As shown in Fig. 2 (right), the learned prompts $P_{t}$ are fused with each given class name to produce prompted text features $\{\tilde{g}_p\}_{i=1}^C$ . Finally, zero-shot inference is performed with the prompted text features and the input image feature $\tilde{f}$ to produce classification scores on test images.
127
+
128
+ # 4. Experiments
129
+
130
+ # 4.1. Evaluation settings
131
+
132
+ We perform evaluations in 4 benchmark settings. Prompt ensembling methods and ProText utilize text-only LLM data for adapting CLIP while image-supervised prompt learning methods use image-label pairs for training.
133
+
134
+ Base-to-Novel Generalization. This setting evaluates the generalization of methods within a dataset. Following previous methods [49, 50], we split each dataset into base and novel classes. Models are trained on base classes and evaluated on the test set of base and novel classes respectively.
135
+
136
+ Cross-dataset transfer. This setting evaluates the generalization ability of models trained on ImageNet-1k [8] source dataset by directly transferring it on cross-datasets.
137
+
138
+ Domain Generalization. We evaluate the robustness of different methods on out-of-distribution datasets. We train
139
+
140
+ <table><tr><td>Method</td><td>ImageNet Acc.</td></tr><tr><td>1: CLIP (ICML&#x27;21)</td><td>66.72</td></tr><tr><td>2: CLIP-Attribute</td><td>67.60</td></tr><tr><td>3: CLIP-80</td><td>68.32</td></tr><tr><td>4: DCLIP (ICLR&#x27;23)</td><td>68.03</td></tr><tr><td>5: Waffle CLIP (ICCV&#x27;23)</td><td>68.34</td></tr><tr><td>6: CuPL (ICCV&#x27;23)</td><td>69.62</td></tr><tr><td>7: ProText-Attribute</td><td>68.05</td></tr><tr><td>8: ProText-80</td><td>68.48</td></tr><tr><td>9: ProText-CuPL</td><td>70.22</td></tr></table>
141
+
142
+ Table 2. With the same amount of text data, learning contextual prompts with text-only supervision improves CLIP performance in comparison to the prompt ensembling techniques.
143
+
144
+ <table><tr><td rowspan="2">Dataset</td><td colspan="3">CLIP [37]</td><td colspan="3">CuPL [50]</td><td colspan="3">ProText (Ours)</td></tr><tr><td>Base</td><td>Novel</td><td>HM</td><td>Base</td><td>Novel</td><td>HM</td><td>Base</td><td>Novel</td><td>HM</td></tr><tr><td>ImageNet</td><td>72.43</td><td>68.14</td><td>70.22</td><td>74.30</td><td>68.14</td><td>71.09</td><td>75.00</td><td>71.38</td><td>73.14</td></tr><tr><td>Caltech101</td><td>96.84</td><td>94.00</td><td>95.40</td><td>97.22</td><td>94.00</td><td>95.58</td><td>98.06</td><td>95.63</td><td>96.83</td></tr><tr><td>OxfordPets</td><td>91.17</td><td>97.26</td><td>94.12</td><td>94.42</td><td>97.26</td><td>95.82</td><td>94.95</td><td>98.00</td><td>96.45</td></tr><tr><td>StanfordCars</td><td>63.37</td><td>74.89</td><td>68.65</td><td>63.54</td><td>74.89</td><td>68.75</td><td>64.54</td><td>76.08</td><td>69.84</td></tr><tr><td>Flowers102</td><td>72.08</td><td>77.80</td><td>74.83</td><td>74.36</td><td>77.80</td><td>76.04</td><td>74.36</td><td>78.44</td><td>76.35</td></tr><tr><td>Food101</td><td>90.10</td><td>91.22</td><td>90.66</td><td>89.93</td><td>91.22</td><td>90.57</td><td>90.20</td><td>91.98</td><td>91.08</td></tr><tr><td>Aircraft</td><td>27.19</td><td>36.29</td><td>31.09</td><td>30.61</td><td>36.29</td><td>33.21</td><td>30.91</td><td>34.13</td><td>32.44</td></tr><tr><td>SUN397</td><td>69.36</td><td>75.35</td><td>72.23</td><td>76.02</td><td>75.35</td><td>75.68</td><td>76.14</td><td>79.14</td><td>77.61</td></tr><tr><td>DTD</td><td>53.24</td><td>59.90</td><td>56.37</td><td>62.85</td><td>59.90</td><td>61.34</td><td>63.08</td><td>61.59</td><td>62.33</td></tr><tr><td>EuroSAT</td><td>56.48</td><td>64.05</td><td>60.03</td><td>59.64</td><td>64.05</td><td>61.77</td><td>59.71</td><td>80.97</td><td>68.73</td></tr><tr><td>UCF101</td><td>70.53</td><td>77.50</td><td>73.85</td><td>75.28</td><td>77.50</td><td>76.37</td><td>75.54</td><td>79.50</td><td>77.47</td></tr><tr><td>Average</td><td>69.34</td><td>74.22</td><td>71.70</td><td>72.56</td><td>74.22</td><td>73.38</td><td>72.95</td><td>76.98</td><td>74.91</td></tr></table>
145
+
146
+ Table 3. Base-to-novel setting. ProText enables the transferability of learned prompts to new classes and improves over CuPL [36].
147
+
148
+ models on the ImageNet-1k source dataset and evaluate its performance on four ImageNet variants with domain shifts.
149
+
150
+ Supervised setting. We provide performance comparison of ProText with CuPL[36] with text-only data per dataset.
151
+
152
+ Datasets. For the aforementioned benchmarks, we use same datasets as followed by previous works [20, 21, 49, 50]. For cross-dataset transfer, domain generalization, and base-to-novel generalization settings, we use 11 image datasets that cover multiple recognition tasks. These include ImageNet [8] and Caltech101 [11] which contains generic objects; OxfordPets [35], StanfordCars [22], Flowers102 [34], Food101 [4], and FGVCAircraft [28] for fine-grained classification, SUN397 [45] for scene recognition, UCF101 [42] for action recognition, DTD [7] for texture classification, and EuroSAT [14] for satellite images categorization. For domain generalization setting, we train models on ImageNet [8] as a source dataset and use ImageNet-A [16], ImageNet-R [15], ImageNet-Sketch [44] and ImageNetV2 [38] for out of distribution dataset evaluation.
153
+
154
+ Implementation details. We use a publically available pretrained ViT-B/16 CLIP model from OpenAI [37]. We train ProText with Deep Language Prompting in the first 9 transformer blocks of the CLIP text encoder. For cross-dataset transfer and domain generalization setting, we train ProText using $T = 4$ and $T = 16$ language prompts with 10 and 200 epochs respectively. Similar to [44], ProText and zero-shot CLIP use additional concepts where available with its prompts such as 'a photo of a CLS, a type of flower' for OxfordFlowers [34]. For base-to-novel and supervised
155
+
156
+ text-only settings, ProText uses optimal prompt length and epoch configuration for each dataset. Optimal training configuration is obtained through hyper-parameter search on validation split of datasets. To generate text-only data, we utilize GPT-3 DaVinci-002 model [5] and generate class-specific descriptions using the LLM prompts provided by CuPL [36]. We use publicly available CuPL data and generate descriptions for datasets not provided by CuPL. AdamW optimizer is used with 5 warm-up epochs for training. We use a single 16-GB V100 to train our models. Refer to supplementary material for additional implementation details.
157
+
158
+ # 4.2. Effectiveness of Text-Only Supervision
159
+
160
+ We first present an ablation to motivate our approach of learning prompts with text-only supervision. We train ProText with 3 types of text data and evaluate performance on ImageNet-1k [8]. ProText-Attribute uses 46 templates from [1] which corresponds to common image attributes such as rotation, blurriness, etc. ProText-80 is trained on standard 80 templates provided by CLIP [37] and ProText-CuPL is trained on class-specific LLM data employed by our main baseline CuPL [36] for its ensembling approach.
161
+
162
+ In Tab. 2, we compare ProText with CLIP and recent LLM-based ensembling methods. Prompt ensembling with attribute templates and 80 templates improves over CLIP single template result. Among the LLM-based ensembling methods, CuPL provide highest performance of $69.62\%$ . In contrast, ProText uses a learning-based approach and shows competitive performance against prompt ensembling methods using the same text data. ProText-Attribute provides gain of $0.45\%$ over CLIP-Attribute while roughly maintaining its performance against CLIP-80. When equipped with CuPL LLM text-data, ProText surpasses CuPL by $0.60\%$ leading to highest performance against all methods. These results motivate our approach that instead of prompt ensembling, one can achieve competitive results by utilizing the same available text data to learn prompts. Next, we demonstrate the generalization of ProText such that the learned prompts transfer well across new classes and datasets.
163
+
164
+ # 4.3. Base to novel class generalization
165
+
166
+ We now present results in base-to-novel class generalization setting where training data for only base classes are available and the model is evaluated on both base and novel classes. For CuPL [36], we use base-class LLM templates for base classes and zero-shot CLIP results for its novel classes. For ProText, we use base-class LLM templates for training and transfer the learned prompts for novel classes.
167
+
168
+ Results are shown in Tab. 3. CuPL outperforms zero-shot CLIP on base classes while maintaining its performance on novel classes as LLM prompts for new classes are not available. ProText shows consistent improvements over CuPL on base classes for 11 datasets. Furthermore,
169
+
170
+ <table><tr><td rowspan="2"></td><td>Source</td><td colspan="11">Target</td></tr><tr><td>ImageNet</td><td>Caletech101</td><td>OxfordPeds</td><td>StanfordCars</td><td>Flowers102</td><td>Food101</td><td>Aircraft</td><td>SUN397</td><td>DTD</td><td>EuroSAT</td><td>UCF101</td><td>Average</td></tr><tr><td colspan="13">Methods utilizing labeled visual samples</td></tr><tr><td>CoOp</td><td>71.51</td><td>93.70</td><td>89.14</td><td>64.51</td><td>68.71</td><td>85.30</td><td>18.47</td><td>64.15</td><td>41.92</td><td>46.39</td><td>66.55</td><td>63.88</td></tr><tr><td>Co-CoOp</td><td>71.02</td><td>94.43</td><td>90.14</td><td>65.32</td><td>71.88</td><td>86.06</td><td>22.94</td><td>67.36</td><td>45.73</td><td>45.37</td><td>68.21</td><td>65.74</td></tr><tr><td>MaPLe</td><td>70.72</td><td>93.53</td><td>90.49</td><td>65.57</td><td>72.23</td><td>86.20</td><td>24.74</td><td>67.01</td><td>46.49</td><td>48.06</td><td>68.69</td><td>66.30</td></tr><tr><td>PromptSRC</td><td>71.27</td><td>93.60</td><td>90.25</td><td>65.70</td><td>70.25</td><td>86.15</td><td>23.90</td><td>67.10</td><td>46.87</td><td>45.50</td><td>68.75</td><td>65.81</td></tr><tr><td colspan="13">Zero-shot &amp; Prompt ensembling methods</td></tr><tr><td>CLIP</td><td>66.72</td><td>92.98</td><td>89.13</td><td>65.29</td><td>71.30</td><td>86.11</td><td>24.90</td><td>62.59</td><td>44.56</td><td>47.84</td><td>66.83</td><td>65.15</td></tr><tr><td>CuPL</td><td>69.62</td><td>92.98</td><td>89.13</td><td>65.29</td><td>71.30</td><td>86.11</td><td>24.90</td><td>62.59</td><td>44.56</td><td>47.84</td><td>66.83</td><td>65.15</td></tr><tr><td colspan="13">Prompt learning with text-only supervision</td></tr><tr><td>ProText (Ours)</td><td>69.80</td><td>94.81</td><td>91.01</td><td>66.00</td><td>72.35</td><td>86.66</td><td>24.72</td><td>67.34</td><td>47.93</td><td>51.86</td><td>69.60</td><td>67.23</td></tr></table>
171
+
172
+ Table 4. Cross-dataset transfer setting. CuPL and CLIP perform same for cross-datasets as CuPL source data cannot transfer to cross-datasets. Image-based models are trained on 16-shot ImageNet samples. ProText employ same ImageNet data as CuPL for prompt learning.
173
+
174
+ <table><tr><td rowspan="2"></td><td>Source</td><td colspan="5">Target</td></tr><tr><td>ImageNet</td><td>-V2</td><td>-S</td><td>-A</td><td>-R</td><td>Avg.</td></tr><tr><td colspan="7">Methods utilizing labeled visual samples</td></tr><tr><td>CoOp</td><td>71.51</td><td>64.20</td><td>47.99</td><td>49.71</td><td>75.21</td><td>59.28</td></tr><tr><td>CoCoOp</td><td>71.02</td><td>64.07</td><td>48.75</td><td>50.63</td><td>76.18</td><td>59.91</td></tr><tr><td>MaPLe</td><td>70.72</td><td>64.07</td><td>49.15</td><td>50.90</td><td>76.98</td><td>60.27</td></tr><tr><td colspan="7">Zero-shot &amp; Prompt ensembling methods</td></tr><tr><td>CLIP</td><td>66.72</td><td>60.83</td><td>46.15</td><td>47.77</td><td>73.96</td><td>57.18</td></tr><tr><td>CuPL</td><td>69.62</td><td>63.27</td><td>49.02</td><td>50.72</td><td>77.05</td><td>60.01</td></tr><tr><td colspan="7">Prompt learning with text-only supervision</td></tr><tr><td>ProText (Ours)</td><td>70.22</td><td>63.54</td><td>49.45</td><td>51.47</td><td>77.35</td><td>60.45</td></tr></table>
175
+
176
+ Table 5. Domain generalization. Prompt learning methods are trained on imageNet and evaluated on datasets with domain shifts.
177
+
178
+ <table><tr><td>Dataset</td><td>CLIP</td><td>CuPL</td><td>ProText</td><td>Δ</td></tr><tr><td>ImageNet</td><td>66.72</td><td>69.60</td><td>70.22</td><td>+0.62</td></tr><tr><td>Caltech101</td><td>92.98</td><td>94.32</td><td>95.29</td><td>+0.97</td></tr><tr><td>DTD</td><td>44.56</td><td>53.96</td><td>54.02</td><td>+0.06</td></tr><tr><td>EuroSAT</td><td>47.84</td><td>60.27</td><td>58.53</td><td>-1.74</td></tr><tr><td>StanfordCars</td><td>65.29</td><td>65.95</td><td>66.77</td><td>+0.82</td></tr><tr><td>Flowers102</td><td>71.30</td><td>73.85</td><td>74.42</td><td>+0.57</td></tr><tr><td>Aircraft</td><td>24.90</td><td>27.66</td><td>29.01</td><td>+1.35</td></tr><tr><td>SUN397</td><td>62.59</td><td>69.00</td><td>69.76</td><td>+0.76</td></tr><tr><td>OxfordPets</td><td>89.13</td><td>91.11</td><td>92.72</td><td>+1.61</td></tr><tr><td>UCF101</td><td>66.83</td><td>70.63</td><td>71.45</td><td>+0.82</td></tr><tr><td>Food101</td><td>86.11</td><td>86.11</td><td>86.68</td><td>+0.57</td></tr><tr><td>Average</td><td>65.15</td><td>69.31</td><td>69.90</td><td>+0.59</td></tr></table>
179
+
180
+ Table 6. ProText results with text supervision on each dataset. We compare ProText with CLIP and CuPL. Gains of ProText over CuPL are shown in blue.
181
+
182
+ with the same LLM base-class data as CuPL, ProText effectively transfers learned prompts towards novel classes and improves CLIP and CuPL novel class performance by $2.76\%$ averaged across 11 datasets. This shows the advantage of ProText prompts to benefit unseen class performance potentially reducing the LLM prompt serving cost by half.
183
+
184
+ # 4.4. Cross-dataset transfer
185
+
186
+ In cross-dataset transfer setting, we compare ProText with CLIP [37], CuPL [36], and image-supervised prompt learning methods. Since class-specific ImageNet LLM prompts limit its transfer to other datasets in CuPL, we assign CLIP results to CuPL for cross-datasets. Image-supervised meth
187
+
188
+ ods [20, 21, 49, 50] are trained with 16-shot ImageNet data.
189
+
190
+ We show our main comparison results in Tab. 4. CuPL improves ImageNet performance of CLIP by ensembling ImageNet LLM prompts, while its cross-dataset results remain the same as CLIP. In contrast, ProText effectively addresses the transferability challenges of CuPL using generalized prompts trained with the same ImageNet LLM data. Since ProText allows generalization to unseen datasets, these learned prompts can directly be used with CLIP for cross-datasets leading to absolute average gains of $+2.1\%$ against CLIP and CuPL. With ProText, one can notably reduce proprietary LLM serving and prompt engineering costs as prompts learned on one dataset are effectively transferable to other datasets. We next compare ProText with strong 16-shot image-supervised methods. Without using any visual samples, ProText demonstrates effective generalization on cross-datasets and consistently surpasses previous state-of-the-art MaPLe on 9/10 datasets leading to the highest average accuracy of $67.23\%$ . This highlights that text-only methods like ProText can lead to better generalization of CLIP as compared to image-supervised methods which tend to overfit on the source sample distributions.
191
+
192
+ # 4.5. Domain generalization experiments
193
+
194
+ We present the results for domain generalization task in Table 5. As the domain shift variants of ImageNet share class names with ImageNet, CuPL employs prompt ensembling for each dataset and provides an average gain of $+2.84\%$ over CLIP. In contrast, ProText with learned prompts shows an additional gain of $+0.44\%$ against CuPL averaged over 4 datasets. Moreover, ProText fairs competitively with image-supervised methods by showing consistent improvements over CoOp, CoCoOp, and MaPLe. These results suggest that text-only supervision methods like ProText can serve as an effective alternative to improve the robustness of VLMs when no visual information is available for training.
195
+
196
+ ![](images/e955c800641d55cab5d7cffebbff896090a7bdd719cb12dc529d341e81be36b4.jpg)
197
+ Figure 3. Ablation: Prompt length (left) and prompt depth (right).
198
+
199
+ ![](images/6521cb288386538a869820ceb1ad1e166f4d405a3b5db5b36c2ecc63182e98ce.jpg)
200
+
201
+ <table><tr><td>Method</td><td>ImageNet Top1.</td></tr><tr><td>1: ProText-contrastive loss</td><td>68.12</td></tr><tr><td>2: ProText- L1 loss</td><td>69.96</td></tr><tr><td>3: ProText-MSE loss</td><td>70.22</td></tr></table>
202
+
203
+ Table 7. Ablation of choice of loss for contextual mapping. MSE loss provides highest results.
204
+
205
+ <table><tr><td>Method</td><td>ImageNet Top1</td></tr><tr><td>1: ProText-80 templates</td><td>68.48</td></tr><tr><td>2: ProText-Alpaca</td><td>67.10</td></tr><tr><td>3: ProText-GPT-3</td><td>70.22</td></tr></table>
206
+
207
+ Table 8. Effect on performance with different text data for training. GPT-3 text data show highest results.
208
+
209
+ <table><tr><td>Method</td><td>ImageNet Top1.</td></tr><tr><td>1: Linear Adaptor</td><td>69.36</td></tr><tr><td>2: MLP Adaptor</td><td>69.24</td></tr><tr><td>3: Prompt Learning</td><td>70.22</td></tr></table>
210
+
211
+ Table 9. Ablation on the choice of mapping network. Prompt Learning shows optimal performance.
212
+
213
+ <table><tr><td rowspan="2">Method</td><td colspan="4">Correct class confidence (%) ↑</td><td colspan="4">Incorrect class confidence (%) ↓</td></tr><tr><td>DTD</td><td>SUN</td><td>Caltech</td><td>UFC</td><td>DTD</td><td>SUN</td><td>Caltech</td><td>UFC</td></tr><tr><td>CLIP</td><td>30.5</td><td>49.3</td><td>84.5</td><td>56.4</td><td>1.51</td><td>0.13</td><td>0.16</td><td>0.44</td></tr><tr><td>ProText</td><td>33.1</td><td>54.2</td><td>89.1</td><td>59.5</td><td>1.45</td><td>0.12</td><td>0.11</td><td>0.40</td></tr></table>
214
+
215
+ Table 10. Confidence score analysis: ProText trained on ImageNet improves its logit confidence for correct classes in unseen datasets.
216
+
217
+ # 4.6. Supervised text-only training
218
+
219
+ In this setting, we compare ProText with CuPL for each dataset trained on LLM template data and the results are shown in Tab. 6. While utilizing the same LLM data, ProText achieves consistent improvements over CuPL on 10/11 datasets with an average gain of $+0.59\%$ . This reflects the generalization of the ProText approach across various diverse image datasets where it better utilizes LLM data within the learned prompts. We also compare ProText with image-supervised methods and observe that ProText fares competitively with approaches utilizing up to 2-shot samples for training. This shows ProText as a potential alternative to image-supervised methods in extremely low-data regimes. Refer to supplementary for additional results.
220
+
221
+ # 4.7. Ablative analysis
222
+
223
+ On understanding ProText prompts. In Table. 10, we present average confidence scores obtained from ProText logits trained on ImageNet-1k text data when applied to
224
+
225
+ ![](images/58ddce2688abcad06e66604e9a0ca5427a2584bfcfd110acd6d3a6d3fb8a7e2a.jpg)
226
+ Figure 4. (Left) Effect of LLM data size on performance. (Right) Ablation on ensembling LLM descriptions for training ProText.
227
+
228
+ cross-datasets. Compared to CLIP, ProText exhibits increased confidence scores for correct classes across various datasets, while marginally decreasing confidence scores for incorrect classes. This suggests that the prompts learned on ImageNet-1k provide complementary and transferable contextual cues, leading to improved results. We conjecture that ProText prompts potentially improve the classification of test samples situated near the decision boundary due to higher confidence for correct classes. Refer to the supplementary section for qualitative and additional analysis.
229
+
230
+ Loss metric in contextual mapping. We ablate on choice of loss used for the contextual mapping module in Tab. 7. Distance-based losses improve over contrastive loss. We conjecture that contrastive loss treats samples of same class labels in a same batch as negatives leading to noisy training.
231
+
232
+ Choice of LLM for generating text data. ProText by default uses GPT-3 [5] LLM to obtain text templates for training. Here we ablate on an open-source Alpaca [43] model as an alternative choice. As shown in Tab. 8, ProText with Alpaca templates performs worse than ProText-80 template and ProText-GPT-3. We observed that Alpaca templates are often noisy while GPT-3 descriptions contain more enriched class details which results in better performance.
233
+
234
+ Prompt learning verses adapter. While ProText employs prompt learning to learn contextual mapping from LLM templates, here ablations on adapters in Tab. 9. Similar to [12], we attach adapter at the output of CLIP text encoder. Adapters perform lower as compared to prompting. We conjecture that adapter completely transforms text features and loses CLIP generalization. In contrast, prompt learning append learnable vectors with CLIP text input without significant replacement and learns effective mapping function.
235
+
236
+ Training data size for text-supervision. To assess the effect of LLM template data size on ProText, we ablate on the number of descriptions per class in Fig. 4 (left). Increasing descriptions for each class consistently improves the results. This suggests that we could further boost ProText performance as quality and size of text data increases.
237
+
238
+ Ensembling in ProText training. ProText uses multiple descriptions per class and enforce mapping of class-name template feature to feature of each LLM description for that class. We conduct an alternative experiment by ensembling
239
+
240
+ a single feature from multiple LLM descriptions per class and enforce mapping on ensembled LLM feature. As shown in Fig. 4 (right), ProText-ensemble performs lower than ProText with individual samples. We conjecture that learning on each description allows the model to utilize additional context present in each description. Ensembling can potentially mask out less frequent details available in text.
241
+
242
+ Prompt length and prompt depth. Fig. 3 (left) shows the effect of prompt length for training ProText. Setting prompt length to 16 leads to optimal performance. Fig. 3 (right) shows the effect of prompt depth on final performance where prompt depth of 9 shows optimal results.
243
+
244
+ # 5. Conclusion
245
+
246
+ Prompt learning and LLM-based ensembling are effective techniques to improve CLIP's generalization. However, prompt learning often requires labeled images, which is less practical, while LLM-based ensembling methods are dominantly class-specific and not directly transferable to new classes. To address these challenges, we propose a new direction to adapt CLIP by learning generalized prompts with text-only supervision, without relying on visual data. We introduce a training strategy for prompts to learn a mapping function that embeds rich contextual knowledge from LLM text data within the prompts. The context learned by these prompts transfers well to unseen classes and datasets, potentially reducing the LLM prompt engineering and serving cost. We perform extensive evaluations on four benchmarks where our text-only approach performs favorably well over previous methods, including those utilizing labeled images.
247
+
248
+ Acknowledgements: We would like to thank Hanan Ghani and Jameel Hassan for their help in downloading datasets. We also thank Muhammad Jehanzeb Mirza for providing Alpaca LLM prompt data for ablation experiments.
249
+
250
+ # References
251
+
252
+ [1] Bang An, Sicheng Zhu, Michael-Andrei Panaitescu-Liess, Chaithanya Kumar Mummadi, and Furong Huang. More context, less distraction: Improving zero-shot inference of clip by inferring and describing spurious features. In Workshop on Efficient Systems for Foundation Models@ ICML2023, 2023. 1, 6
253
+ [2] Hyojin Bahng, Ali Jahanian, Swami Sankaranarayanan, and Phillip Isola. Visual prompting: Modifying pixel space to adapt pre-trained models. arXiv preprint arXiv:2203.17274, 2022. 4
254
+ [3] Hanoona Bangalath, Muhammad Maaz, Muhammad Uzair Khattak, Salman H Khan, and Fahad Shahbaz Khan. Bridging the gap between object and image-level representations for open-vocabulary detection. NeurIPS, 35:33781-33794, 2022. 2
255
+ [4] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool.
256
+
257
+ Food-101-mining discriminative components with random forests. In ECCV, pages 446-461. Springer, 2014. 6
258
+ [5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 3, 4, 6, 8
259
+ [6] Guangyi Chen, Weiran Yao, Xiangchen Song, Xinyue Li, Yongming Rao, and Kun Zhang. Plot: Prompt learning with optimal transport for vision-language models. In ICLR, 2022. 1, 2
260
+ [7] Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In CVPR, pages 3606-3613, 2014. 6
261
+ [8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009. 5, 6
262
+ [9] Mohammad Mahdi Derakhshani, Enrique Sanchez, Adrian Bulat, Victor G Turrisi da Costa, Cees GM Snoek, Georgios Tzimiropoulos, and Brais Martinez. Bayesian prompt learning for image-language model generalization. In CVPR, pages 15237-15246, 2023. 2
263
+ [10] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, pages 14084-14093, 2022. 2
264
+ [11] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In CVPR Workshop, pages 178–178. IEEE, 2004. 6
265
+ [12] Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. Clip-adapter: Better vision-language models with feature adapters. IJCV, pages 1–15, 2023. 8
266
+ [13] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In ECCV, pages 540-557. Springer, 2022. 2
267
+ [14] Patrick Helber, Benjamin Bischke, Andreas Dengel, and Damian Borth. Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification. J-STARS, 12(7):2217-2226, 2019. 6
268
+ [15] Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. In ICCV, pages 8340-8349, 2021. 6
269
+ [16] Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Song. Natural adversarial examples. In CVPR, pages 15262-15271, 2021. 6
270
+ [17] Tony Huang, Jack Chu, and Fangyun Wei. Unsupervised prompt learning for vision-language models. arXiv preprint arXiv:2204.03649, 2022. 1, 3
271
+ [18] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICLR, pages 4904-4916. PMLR, 2021. 1, 2
272
+
273
+ [19] Woojeong Jin, Yu Cheng, Yelong Shen, Weizhu Chen, and Xiang Ren. A good prompt is worth millions of parameters? low-resource prompt-based learning for vision-language models. arXiv preprint arXiv:2110.08484, 2021. 1
274
+ [20] Muhammad Uzair Khattak, Hanoona Rasheed, Muhammad Maaz, Salman Khan, and Fahad Shahbaz Khan. Maple: Multi-modal prompt learning. In CVPR, pages 19113-19122, 2023. 1, 2, 3, 4, 6, 7
275
+ [21] Muhammad Uzair Khattak, Syed Talal Wasim, Muzammal Naseer, Salman Khan, Ming-Hsuan Yang, and Fahad Shahbaz Khan. Self-regulating prompts: Foundational model adaptation without forgetting. In ICCV, pages 15190-15200, 2023. 1, 3, 6, 7
276
+ [22] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 6
277
+ [23] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692, 2023.1
278
+ [24] Boyi Li, Kilian Q. Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation, 2022. 2
279
+ [25] Feng Liang, Bichen Wu, Xiaoliang Dai, Kunpeng Li, Yinan Zhao, Hang Zhang, Peizhao Zhang, Peter Vajda, and Diana Marculescu. Open-vocabulary semantic segmentation with mask-adapted clip. In CVPR, pages 7061-7070, 2023. 2
280
+ [26] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 2
281
+ [27] Yuning Lu, Jianzhuang Liu, Yonggang Zhang, Yajing Liu, and Xinmei Tian. Prompt distribution learning. In CVPR, pages 5206-5215, 2022. 1, 2, 3
282
+ [28] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013. 6
283
+ [29] Sachit Menon and Carl Vondrick. Visual classification via description from large language models. In ICLR, 2023. 1, 2, 3, 4
284
+ [30] Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, et al. Simple open-vocabulary object detection. In ECCV, pages 728-755. Springer, 2022. 2
285
+ [31] Muhammad Ferjad Naeem, Yongqin Xian, Luc V Gool, and Federico Tombari. I2dformer: Learning image to document attention for zero-shot image classification. NeurIPS, 2022. 2
286
+ [32] Muhammad Ferjad Naeem, Muhammad Gul Zain Ali Khan, Yongqin Xian, Muhammad Zeshan Afzal, Didier Stricker, Luc Van Gool, and Federico Tombari. I2mvformer: Large language model generated multi-view document supervision for zero-shot image classification. In CVPR, 2023. 2
287
+
288
+ [33] Muhammad Ferjad Naeem, Yongqin Xian, Xiaohua Zhai, Lukas Hoyer, Luc Van Gool, and Federico Tombari. Silc: Improving vision language pretraining with self-distillation. arXiv preprint arXiv:2310.13355, 2023. 2
289
+ [34] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In ICVGIP, pages 722-729. IEEE, 2008. 6, 2
290
+ [35] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In $CVPR$ , pages 3498-3505. IEEE, 2012. 6
291
+ [36] Sarah Pratt, Ian Covert, Rosanne Liu, and Ali Farhadi. What does a platypus look like? generating customized prompts for zero-shot image classification. In ICCV, pages 15691-15701, 2023. 1, 2, 3, 4, 5, 6, 7
292
+ [37] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 1, 2, 3, 6, 7, 4
293
+ [38] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do imagenet classifiers generalize to imagenet? In International Conference on Machine Learning, pages 5389-5400. PMLR, 2019. 6
294
+ [39] Karsten Roth, Jae Myung Kim, A Koepke, Oriol Vinyals, Cordelia Schmid, and Zeynep Akata. Waffling around for performance: Visual classification with random words and broad concepts. 2023. 1, 3, 4, 2
295
+ [40] Jameel Hassan Abdul Samadh, Hanan Gani, Noor Hazim Hussein, Muhammad Uzair Khattak, Muzammal Naseer, Fahad Khan, and Salman Khan. Align your prompts: Test-time prompting with distribution alignment for zero-shot generalization. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 2
296
+ [41] Manli Shu, Weili Nie, De-An Huang, Zhiding Yu, Tom Goldstein, Anima Anandkumar, and Chaowei Xiao. Test-time prompt tuning for zero-shot generalization in vision-language models. NeurIPS, 35:14274-14289, 2022. 1, 2
297
+ [42] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 6
298
+ [43] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. Stanford alpaca: An instruction-following llama model, 2023. 8
299
+ [44] Haohan Wang, Songwei Ge, Zachary Lipton, and Eric P Xing. Learning robust global representations by penalizing local predictive power. In NeurIPS, 2019. 6
300
+ [45] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In CVPR, pages 3485-3492. IEEE, 2010. 6
301
+ [46] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. In ICLR, 2021. 2
302
+ [47] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive
303
+
304
+ captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022. 1
305
+ [48] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2
306
+ [49] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Conditional prompt learning for vision-language models. In CVPR, pages 16816-16825, 2022. 1, 2, 3, 4, 5, 6, 7
307
+ [50] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 130(9):2337-2348, 2022. 1, 2, 3, 4, 5, 6, 7
308
+ [51] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In ECCV, pages 350-368. Springer, 2022. 2
309
+
310
+ # Learning to Prompt with Text Only Supervision for Vision-Language Models Supplementary Material
311
+
312
+ The following sections provide supplementary material for our main paper. This includes additional analysis and comparison experiments, implementation details, and specifics of our text-to-text data used for training. The contents are organized as follows:
313
+
314
+ Additional analysis and comparison experiments (Sec. A)
315
+ Additional Implementation details (Sec. B)
316
+ - Details on Text-Only Data (Sec. C)
317
+
318
+ # A. Additional Experiments
319
+
320
+ # A.1. Additional Analysis.
321
+
322
+ Here we provide additional analysis experiments for our ProText technique.
323
+
324
+ Qualitative Analysis. In order to understand the transferability of ProText prompts across new datasets, we visualize attention maps in Fig. 5. Specifically, we employ ProText prompts learned on ImageNet-1k text-only dataset and transfer it to cross-datasets. We observe that ProText tends to focus to relevant image features while reducing its attention towards spurious features as shown in Oxford Pets and Caltech-101 images. In case of texture image from DTD, ProText shows more global attention on the texture portion of the image which is crucial in recognizing the correct texture due to the fine-grained nature of texture classes. This suggests that ProText can learn complementary contextual features, which steers CLIP for better transferability towards new datasets without relying on visual samples.
325
+
326
+ ![](images/90b4c4aa3c809b11b64ba4f3a75c73394c9edb79c73ee21877f43ef8bdcd1738.jpg)
327
+ Figure 5. Attention map visualizations for CLIP and ProText for cross-datasets. ProText is trained on ImageNet-1k text-only data.
328
+
329
+ <table><tr><td>Layer #</td><td>CTX 1</td><td>CTX 2</td><td>CTX 3</td><td>CTX 4</td></tr><tr><td>1</td><td>a</td><td>a</td><td>for</td><td>onto</td></tr><tr><td>2</td><td>bi</td><td>paper</td><td>erup</td><td>believes</td></tr><tr><td>3</td><td>ilwx</td><td>ered</td><td>emon</td><td>enclosure</td></tr><tr><td>4</td><td>devoted</td><td>fly</td><td>ced</td><td>hair</td></tr><tr><td>5</td><td>sin</td><td>tous</td><td>cona</td><td>emor</td></tr><tr><td>6</td><td>foto</td><td>unwanted</td><td>swagg</td><td>curfew</td></tr><tr><td>7</td><td>banan</td><td>lift</td><td>knob</td><td>maz</td></tr><tr><td>8</td><td>slow</td><td>commuter</td><td>helene</td><td>nuff</td></tr><tr><td>9</td><td>chevron</td><td>rear</td><td>crete</td><td>opi</td></tr></table>
330
+
331
+ Table 11. Illustration of nearest words in CLIP word vocabulary against ProText prompts in different transformer layers. ProText prompts are trained on ImageNet-1k LLM prompt data.
332
+
333
+ Towards interpreting ProText prompts. Our main experiments in Sec. 4.7 demonstrated that ProText trained on ImageNet-1k text dataset performs favorably well across cross-datasets. Here we are interested in studying how the ProText prompt vectors are interpreted in natural language. Specifically, we searched for words in CLIP vocabulary that are closest to the learned prompts using Euclidean distance in the embedding space. The results in Table 11 show the nearest (valid) word for ProText prompts across different transformer layers. Note that these words may note concretely correspond to the learned prompts as we could only select nearest ones. We observe that the represented words are diverse containing connecting words that are common in web captions such as "a," "for," and "onto". Additionally, since CLIP uses a BPE representation for tokenization, several subwords appear among the nearest words, such as "sin," "ced," and "banan." These subwords can collectively contribute to strong context priors, such as deriving "banana" from "banan," "Mercedes" from "ced," and "casino" from "sin," which may be potentially relevant for downstream datasets like SUN397 and Stanford Cars. At the same time, some words do not appear to contribute much for context enhancement such as "ilwx", "curfew" etc. In summary, similar to the findings in [50], the learned vectors may encompass word representations not explicitly present in the existing vocabulary.
334
+
335
+ # A.2. Additional comparisons with WaffleCLIP.
336
+
337
+ We present additional comparisons between ProText and WaffleCLIP [39] approach. WaffleCLIP employs prompt ensembling by introducing random descriptors and characters alongside class names. Specifically, we perform a comparison with a WaffleCLIP-Concept variant, which incorporates high-level dataset concepts in its text prompts,
338
+
339
+ such as 'a photo of a flower: a CLS' for OxfordFlowers [34]. Further details on the WaffleCLIP framework and its variants can be found in [39].
340
+
341
+ Cross-dataset transfer. For cross-dataset transfer settings, all methods only utilize ImageNet source dataset LLM prompt information. The results are shown in Tab. 12. CuPL shows the same performance as CLIP for cross-datasets as class-specific descriptions for new datasets are not available in this setting. Overall, WaffleCLIP uses random descriptors which leads to improvements over CLIP and CuPL. In contrast, ProText with text-only training with ImageNet-1k LLM templates shows consistent improvements over WaffleCLIP by surpassing on 9/10 cross-datasets and leads to the averaged accuracy of $67.23\%$ in the challenging cross-dataset transfer setting.
342
+
343
+ Text-only supervised setting. We additionally compare WaffleCLIP in text-only supervised setting. As shown in Tab. 13, WaffleCLIP improves over CLIP but lags behind CuPL as it only relies on high-level dataset concepts and random descriptors. CuPL uses class-specific LLM descriptions for prompt ensembling and shows improved results. In contrast to these approaches, ProText adopts a learning-based approach using text data and shows the highest performance by surpassing both WaffleCLIP and CuPL in 10/11 datasets. This suggests that text-only prompt learning can serve as a better alternative to training-free prompt ensembling methods.
344
+
345
+ # A.3. Comparison with image-supervised methods.
346
+
347
+ We show additional comparisons of ProText with image-supervised methods in terms of generalization performance. In base to novel class generalization setting, we include prompt learning methods utilizing 16-shot image data where we mainly focus on novel class performance for comparison. In text-only supervised setting, we compare ProText with few-shot image supervised methods including CLIP Linear Probe, CoOp, and CoCoOp, which are trained up to 2-shot data.
348
+
349
+ Unseen class generalization. All methods are trained on seen classes of each dataset and we specifically analyze their performance on unseen classes to study generalization. Results are shown in Tab. 14. Image-supervised prompt learning methods utilize 16-shot base-class labeled data and demonstrate improved accuracy for novel classes. For example, the previous state-of-the-art method, PromptSRC, achieves a substantial accuracy of $70.73\%$ on ImageNet for novel classes. In comparison, ProText, leveraging text-only data, shows an improvement of $+0.65\%$ against PromptSRC for novel classes on ImageNet. In summary, ProText consistently outperforms PromptSRC on 9 out of 11 datasets for novel classes, leading to the highest novel class accuracy of $76.98\%$ averaged over 11 datasets.
350
+
351
+ Supervised setting. In Tab. 15, compare ProText with
352
+
353
+ few-shot image-supervised methods including CLIP Linear Probe, CoOp, and CoCoOp. ProText shows improved averaged performance over 1 & 2 shot Linear Probe. Similarly, ProText without using any images for training improves on most datasets against CoOp and CoCoOp trained with 1 and 2 shots. ProText, without using any images for training, outperforms CoOp and CoCoOp trained with 1 and 2 shots on most datasets. This suggests that text-only training can be considered an effective alternative approach to image-supervised methods under extreme low-data regimes.
354
+
355
+ # A.4. Additional ablation studies.
356
+
357
+ We present additional ablation experiments conducted on ProText as outlined below.
358
+
359
+ Combining prompt ensembling and prompt learning. In our ProText approach, learnable prompts for inference are trained on text data. Here, we explore an alternative experiment by averaging the text features with ProText-learned prompts and text features of LLM templates obtained via prompt ensembling. Specifically, we average the LLM prompt features (e.g., CuPL features) and ProText features for the same classes to study if prompt learning and prompt ensembling could be complementary. The results are shown in Table 16. Combining ProText and CuPL features leads to marginal improvement compared to ProText alone. We conjecture that since ProText uses the same LLM template data to learn prompts, the LLM template features and ProText features might not be strongly complementary.
360
+
361
+ # B. Additional Implementation details
362
+
363
+ Training details. For training ProText, we use a publically available CLIP ViT-B/16 model from OpenAI [37]. Language prompts for each training are initialized with 'a photo of a' for the first layer and randomly initialized for the remaining transformer layers of the text encoder of CLIP. All models are trained using the AdamW optimizer on a single 16-GB V100 GPU. For cross-dataset and domain generalization benchmarks, we train ProText using $T = 4$ and $T = 16$ language prompts, respectively, for 10 and 200 epochs, respectively. The warm-up epochs are set to 5 during training.
364
+
365
+ As text data from LLMs varies in quality and size across datasets, we have observed that training ProText on each dataset requires custom training configurations to achieve the best performance. Therefore, ProText employs optimal prompt length and epoch configuration for each dataset. The optimal training configurations are obtained through the validation splits of each dataset.
366
+
367
+ Base-to-novel generalization setting. In Tab. 17, we show the hyperparameters used for training models in base-to-novel generalization settings. We use a learning rate of 0.03 for all datasets except UCF101, FOOD101, and OxfordFlowers where learning rate of 0.0025 is used.
368
+
369
+ <table><tr><td rowspan="2"></td><td>Source</td><td colspan="10">Target</td></tr><tr><td>ImageNet</td><td>Caletech101</td><td>OxfordPeds</td><td>StanfordCars</td><td>Flowers102</td><td>Food101</td><td>Aircraft</td><td>SUN397</td><td>DTD</td><td>EuroSAT</td><td>UCF101</td></tr><tr><td colspan="12">Zero-shot &amp; Prompt ensembling methods</td></tr><tr><td>CLIP</td><td>66.72</td><td>92.98</td><td>89.13</td><td>65.29</td><td>71.30</td><td>86.11</td><td>24.90</td><td>62.59</td><td>44.56</td><td>47.84</td><td>66.83</td></tr><tr><td>CuPL</td><td>69.62</td><td>92.98</td><td>89.13</td><td>65.29</td><td>71.30</td><td>86.11</td><td>24.90</td><td>62.59</td><td>44.56</td><td>47.84</td><td>66.83</td></tr><tr><td>WaffleCLIP-Concept</td><td>68.34</td><td>94.01</td><td>89.57</td><td>63.42</td><td>72.00</td><td>86.84</td><td>24.49</td><td>66.17</td><td>45.15</td><td>47.74</td><td>67.96</td></tr><tr><td colspan="12">Prompt learning with text-only supervision</td></tr><tr><td>ProText (Ours)</td><td>69.80</td><td>94.81</td><td>91.01</td><td>66.00</td><td>72.35</td><td>86.66</td><td>24.72</td><td>67.34</td><td>47.93</td><td>51.86</td><td>69.60</td></tr></table>
370
+
371
+ Table 12. Cross-dataset transfer setting. Results comparison of ProText with CLIP, CuPL, and Waffle-CLIP. ProText overall shows consistent improvements over LLM-based prompt ensembling methods.
372
+
373
+ <table><tr><td>Dataset</td><td>CLIP</td><td>CuPL</td><td>WaffleCLIP-C</td><td>ProText</td><td>Δ</td></tr><tr><td>ImageNet</td><td>66.72</td><td>69.60</td><td>68.34</td><td>70.22</td><td>+0.62</td></tr><tr><td>Caltech101</td><td>92.98</td><td>94.32</td><td>94.01</td><td>95.29</td><td>+0.97</td></tr><tr><td>DTD</td><td>44.56</td><td>53.96</td><td>45.15</td><td>54.04</td><td>+0.06</td></tr><tr><td>EuroSAT</td><td>47.84</td><td>60.27</td><td>47.74</td><td>58.53</td><td>-1.74</td></tr><tr><td>StanfordCars</td><td>65.29</td><td>65.95</td><td>63.42</td><td>66.77</td><td>+0.82</td></tr><tr><td>Flowers102</td><td>71.30</td><td>73.85</td><td>72.00</td><td>74.42</td><td>+0.57</td></tr><tr><td>Aircraft</td><td>24.90</td><td>27.66</td><td>24.49</td><td>29.01</td><td>+1.35</td></tr><tr><td>SUN397</td><td>62.59</td><td>69.00</td><td>66.17</td><td>69.76</td><td>+0.76</td></tr><tr><td>OxfordPets</td><td>89.13</td><td>91.11</td><td>89.57</td><td>92.72</td><td>+1.61</td></tr><tr><td>UCF101</td><td>66.83</td><td>70.63</td><td>67.96</td><td>71.45</td><td>+0.82</td></tr><tr><td>Food101</td><td>86.11</td><td>86.11</td><td>86.84</td><td>86.68</td><td>+0.57</td></tr><tr><td>Average</td><td>65.15</td><td>69.31</td><td>65.97</td><td>69.90</td><td>+0.59</td></tr></table>
374
+
375
+ Table 13. ProText results with text supervision on each dataset. We compare ProText with CLIP and CuPL and WaffleCLIP-Concept. Gains of ProText over CuPL are shown in blue.
376
+
377
+ <table><tr><td>Dataset</td><td>CuPL [36]</td><td>ProText Ours</td><td>CoOp [50]</td><td>CoCoOp [49]</td><td>MaPLe [20]</td><td>PromptSRC [21]</td><td>Δ</td></tr><tr><td>ImageNet</td><td>68.14</td><td>71.38</td><td>67.88</td><td>70.43</td><td>70.54</td><td>70.73</td><td>+3.2</td></tr><tr><td>Caltech101</td><td>94.00</td><td>95.63</td><td>89.81</td><td>93.81</td><td>94.36</td><td>94.03</td><td>+1.6</td></tr><tr><td>DTD</td><td>59.90</td><td>61.59</td><td>41.18</td><td>56.00</td><td>59.18</td><td>62.97</td><td>+1.7</td></tr><tr><td>EuroSAT</td><td>64.05</td><td>80.97</td><td>54.74</td><td>60.04</td><td>73.23</td><td>73.90</td><td>+17</td></tr><tr><td>StanfordCars</td><td>74.89</td><td>76.08</td><td>60.40</td><td>73.59</td><td>74.00</td><td>74.97</td><td>+1.2</td></tr><tr><td>Flowers102</td><td>77.80</td><td>78.44</td><td>59.67</td><td>71.75</td><td>72.46</td><td>76.50</td><td>+0.6</td></tr><tr><td>Aircraft</td><td>36.29</td><td>34.13</td><td>22.30</td><td>23.71</td><td>35.61</td><td>37.87</td><td>-2.2</td></tr><tr><td>SUN397</td><td>75.35</td><td>79.14</td><td>65.89</td><td>76.86</td><td>78.70</td><td>78.47</td><td>+3.8</td></tr><tr><td>OxfordPets</td><td>97.26</td><td>98.00</td><td>95.29</td><td>97.69</td><td>97.76</td><td>97.30</td><td>+0.7</td></tr><tr><td>UCF101</td><td>77.50</td><td>79.50</td><td>56.05</td><td>73.45</td><td>78.66</td><td>78.80</td><td>+2.0</td></tr><tr><td>Food101</td><td>91.22</td><td>91.98</td><td>82.26</td><td>91.29</td><td>92.05</td><td>91.53</td><td>+0.8</td></tr><tr><td>Average</td><td>74.22</td><td>76.98</td><td>63.22</td><td>71.69</td><td>75.14</td><td>76.10</td><td>+2.8</td></tr></table>
378
+
379
+ Table 14. Novel-class generalization comparison. We compare ProText with prompt ensembling and image-supervised methods on unseen class performance in base-to-novel class generalization setting. Gains of ProText over CuPL are shown in blue.
380
+
381
+ Text-only supervised setting. For our comparison with CuPL [36] in Table 15, ProText models are trained using the
382
+
383
+ same LLM text data as utilized by CuPL. Hyperparameter values are shown in Table 18. All models are trained using a learning rate of 0.03, except for UCF101, EuroSAT, and Oxford-Flowers, where a learning rate of 0.0025 is used.
384
+
385
+ # C. Details on Text-Only Data
386
+
387
+ As discussed in Sec. 3.2.1, our ProText approach relies on text-only data ( $\mathcal{D}PROMPT$ ) curated from Language Models (LLMs) for training its language prompts. Here, we provide additional details on the curation of text-only data. Specifically, we first provide information on the text queries used as input to LLMs for generating prompts, followed by qualitative examples of $\mathcal{D}_{\text{PROMPT}}$ .
388
+
389
+ # C.1. Queries to LLMs to curate Text-Only Data
390
+
391
+ Following [36], we obtain class descriptions from LLMs by providing various queries as inputs. Specifically, we utilize queries termed as Full prompts by CuPL [36]. For instance, to generate class descriptions of ImageNet-1k classes, we prompt GPT-3 with the following 5 queries:
392
+
393
+ Describe what a(n) CLS looks like.
394
+ How can you identify a(n) CLS?
395
+ What does a(n) look like?
396
+ - 'Describe an image from the internet of $a(n)$ CLS.'
397
+ - 'A caption of an image of a(n) CLS.'
398
+
399
+ Here, CLS denotes the class names present in the dataset. After generating LLM class descriptions, we associate all descriptions of the same class with its class-name template given as 'A photo of a CLS'. This results in our text-only training data $\mathcal{D}_{\text{PROMPT}}$ with text-to-text mapping pairs used to train ProText. Refer to [36] for LLM queries of other datasets used to generate class-specific descriptions. For standardized comparisons, we use publicly available CuPL data and generate descriptions for datasets not provided by CuPL.
400
+
401
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">CLIP</td><td rowspan="2">CuPL</td><td rowspan="2">ProText</td><td colspan="2">Linear Probe</td><td colspan="2">CoOp</td><td colspan="2">CoCoOp</td></tr><tr><td>K=1</td><td>K=2</td><td>K=1</td><td>K=2</td><td>K=1</td><td>K=2</td></tr><tr><td>ImageNet</td><td>66.70</td><td>69.62</td><td>70.22</td><td>32.13</td><td>44.88</td><td>66.33</td><td>67.07</td><td>69.43</td><td>69.78</td></tr><tr><td>Caltech101</td><td>92.98</td><td>94.32</td><td>95.29</td><td>79.88</td><td>89.01</td><td>92.60</td><td>93.07</td><td>93.83</td><td>94.82</td></tr><tr><td>DTD</td><td>44.56</td><td>53.96</td><td>54.04</td><td>34.59</td><td>40.76</td><td>50.23</td><td>53.60</td><td>48.54</td><td>52.17</td></tr><tr><td>EuroSAT</td><td>47.84</td><td>60.27</td><td>58.53</td><td>49.23</td><td>61.98</td><td>54.93</td><td>65.17</td><td>55.33</td><td>46.74</td></tr><tr><td>StanfordCars</td><td>65.29</td><td>65.95</td><td>66.77</td><td>35.66</td><td>50.28</td><td>67.43</td><td>70.50</td><td>67.22</td><td>68.37</td></tr><tr><td>Flowers102</td><td>71.30</td><td>73.85</td><td>74.42</td><td>69.74</td><td>85.07</td><td>77.53</td><td>87.33</td><td>72.08</td><td>75.79</td></tr><tr><td>Aircraft</td><td>24.90</td><td>27.66</td><td>29.01</td><td>19.61</td><td>26.41</td><td>21.37</td><td>26.20</td><td>12.68</td><td>15.06</td></tr><tr><td>SUN397</td><td>62.59</td><td>69.00</td><td>69.76</td><td>41.58</td><td>53.70</td><td>66.77</td><td>66.53</td><td>68.33</td><td>69.03</td></tr><tr><td>OxfordPets</td><td>89.13</td><td>91.11</td><td>92.72</td><td>44.06</td><td>58.37</td><td>90.37</td><td>89.80</td><td>91.27</td><td>92.64</td></tr><tr><td>UCF101</td><td>66.83</td><td>70.63</td><td>71.45</td><td>53.66</td><td>65.78</td><td>71.23</td><td>73.43</td><td>70.30</td><td>73.51</td></tr><tr><td>Food101</td><td>86.11</td><td>86.11</td><td>86.68</td><td>43.96</td><td>61.51</td><td>84.33</td><td>84.40</td><td>85.65</td><td>86.22</td></tr><tr><td>Average</td><td>65.15</td><td>69.31</td><td>69.90</td><td>45.83</td><td>57.98</td><td>67.56</td><td>70.65</td><td>66.79</td><td>67.65</td></tr></table>
402
+
403
+ Table 15. ProText results with text supervision on each dataset. We compare ProText with CLIP [37], CuPL [36] and image supervised Linear Probe [37], CoOp [50] and CoCoOp [49] methods.
404
+
405
+ <table><tr><td>Method</td><td>ImageNet Top1.</td></tr><tr><td>1: CuPL</td><td>69.62</td></tr><tr><td>2: ProText</td><td>70.22</td></tr><tr><td>3: Ensembling: ProText + CuPL</td><td>70.28</td></tr><tr><td colspan="2">Table 16. Abla-
406
+ tion on combining
407
+ CuPL and ProText
408
+ text features.</td></tr></table>
409
+
410
+ <table><tr><td>Hparameter</td><td>ImageNet</td><td>Caltech101</td><td>OxfordPnts</td><td>StanfordCars</td><td>Flowers102</td><td>Food101</td><td>Aircraft</td><td>SUN397</td><td>DTD</td><td>EuroSAT</td><td>UCF101</td></tr><tr><td>Epochs</td><td>30</td><td>30</td><td>50</td><td>30</td><td>150</td><td>50</td><td>200</td><td>30</td><td>200</td><td>30</td><td>20</td></tr><tr><td># Prompts (T)</td><td>4</td><td>8</td><td>4</td><td>8</td><td>4</td><td>8</td><td>4</td><td>8</td><td>4</td><td>16</td><td>16</td></tr></table>
411
+
412
+ Table 17. Hyper-parameters setting used for base-to-novel generalization setting. Optimal configuration is set using validation splits of each dataset.
413
+
414
+ <table><tr><td>Hparameter</td><td>ImageNet</td><td>Caltech101</td><td>OxfordPnts</td><td>StanfordCars</td><td>Flowers102</td><td>Food101</td><td>Aircraft</td><td>SUN397</td><td>DTD</td><td>EuroSAT</td><td>UCF101</td></tr><tr><td>Epochs</td><td>200</td><td>30</td><td>50</td><td>20</td><td>300</td><td>30</td><td>200</td><td>200</td><td>200</td><td>300</td><td>100</td></tr><tr><td># Prompts (T)</td><td>16</td><td>16</td><td>4</td><td>8</td><td>4</td><td>16</td><td>4</td><td>16</td><td>16</td><td>4</td><td>8</td></tr></table>
415
+
416
+ Table 18. Hyper-parameters used for text-only supervised setting.
417
+
418
+ # C.2. Qualitative examples
419
+
420
+ As LLMs are pre-trained on internet-scale text corpora, they possess the capability of generating diverse and high-quality descriptions and captions for different class categories, resulting in high-quality text outputs. Below we show some examples of $\mathcal{D}_{\mathrm{PROMPT}}$ text-to-text pairs for the ImageNet-1k dataset.
421
+
422
+ # Class: Tench
423
+
424
+ Class-name template: 'A photo of a Tenc'
425
+
426
+ Associated LLM descriptions:
427
+
428
+ - 'A tench is a freshwater fish with a dark green back and light-colored sides.'
429
+ - 'A tench looks like a freshwater fish with a dark olive-green back, fading to yellowish-brown on the sides.'
430
+ - 'Tench are a freshwater fish that can grow up to 70cm long! They have olive-brown skin with dark spots, and their meat is white and firm.'
431
+ - 'This image shows a large, dark green tench swimming in a pond.'
432
+
433
+ # Class: bath towel
434
+
435
+ Class-name template: 'A photo of a bath towel'
436
+
437
+ Associated LLM descriptions:
438
+
439
+ - 'A bath towel typically has a loops on one side and a smooth surface on the other.'
440
+ - 'A bath towel is a rectangular piece of fabric, usually Cotton, that is used to dry oneself after a bath or shower.'
441
+ - 'The image is of a white bath towel with a blue and green stripes.'
442
+ 'A fluffy white bath towel draped over a towel rack.'
443
+
444
+ # Class: sandal
445
+
446
+ Class-name template: 'A photo of a sandal'
447
+
448
+ Associated LLM descriptions:
449
+
450
+ - 'A sandal is a shoe typically made of leather or synthetic material that has an open toe and a strap or straps that go around the foot or up the ankle.'
451
+ - 'A sandal is usually a flat shoe with a strap that goes around the foot or ankle.'
452
+ - 'This sandal is from the ancient Egyptian city of Thebes.'
453
+ - 'When you are looking to identify a sandal, the first place to start is by looking at the features of the shoe.'
2401.02xxx/2401.02418/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e307658280ac029f6a768980c8623285c228cd83795510b908c55d24511a792
3
+ size 880760
2401.02xxx/2401.02418/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_content_list.json ADDED
@@ -0,0 +1,1342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "On the Prospects of Incorporating Large Language Models (LLMs) in Automated Planning and Scheduling (APS)",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 88,
8
+ 119,
9
+ 908,
10
+ 162
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Vishal Pallagani<sup>1</sup>, Kaushik Roy<sup>1</sup>, Bharath Muppasani<sup>1</sup>, Francesco Fabiano<sup>2</sup>, Andrea Loreggia<sup>3</sup>, Keerthiram Murugesan<sup>4</sup>, Biplav Srivastava<sup>1</sup>, Francesca Rossi<sup>4</sup>, Lior Horesh<sup>4</sup>, Amit Sheth<sup>1</sup>",
17
+ "bbox": [
18
+ 101,
19
+ 169,
20
+ 898,
21
+ 205
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "<sup>1</sup>University of South Carolina",
28
+ "bbox": [
29
+ 398,
30
+ 208,
31
+ 596,
32
+ 220
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$^{2}$ New Mexico State University",
39
+ "bbox": [
40
+ 398,
41
+ 222,
42
+ 598,
43
+ 236
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "<sup>3</sup>University of Brescia",
50
+ "bbox": [
51
+ 424,
52
+ 236,
53
+ 571,
54
+ 250
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "4IBM Research",
61
+ "bbox": [
62
+ 447,
63
+ 250,
64
+ 549,
65
+ 262
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Abstract",
72
+ "text_level": 1,
73
+ "bbox": [
74
+ 250,
75
+ 273,
76
+ 313,
77
+ 286
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Automated Planning and Scheduling is among the growing areas in Artificial Intelligence (AI) where mention of LLMs has gained popularity. Based on a comprehensive review of 126 papers, this paper investigates eight categories based on the unique applications of LLMs in addressing various aspects of planning problems: language translation, plan generation, model construction, multi-agent planning, interactive planning, heuristics optimization, tool integration, and brain-inspired planning. For each category, we articulate the issues considered and existing gaps. A critical insight resulting from our review is that the true potential of LLMs unfolds when they are integrated with traditional symbolic planners, pointing towards a promising neuro-symbolic approach. This approach effectively combines the generative aspects of LLMs with the precision of classical planning methods. By synthesizing insights from existing literature, we underline the potential of this integration to address complex planning challenges. Our goal is to encourage the ICAPS community to recognize the complementary strengths of LLMs and symbolic planners, advocating for a direction in automated planning that leverages these synergistic capabilities to develop more advanced and intelligent planning systems.",
84
+ "bbox": [
85
+ 99,
86
+ 295,
87
+ 460,
88
+ 574
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Introduction",
95
+ "text_level": 1,
96
+ "bbox": [
97
+ 225,
98
+ 592,
99
+ 336,
100
+ 606
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "As a sub-field of Artificial Intelligence (Russell and Norvig 2003), Automated Planning and Scheduling (Ghallab, Nau, and Traverso 2004) refers to developing algorithms and systems to generate plans or sequences of actions to achieve specific goals in a given environment or problem domain. APS is a valuable tool in domains where there is a need for intelligent decision-making, goal achievement, and efficient resource utilization. It enables the automation of complex tasks, making systems more capable and adaptable in dynamic environments. Over time, APS has evolved from the early development of robust theoretical foundations to practical applications in diverse sectors like manufacturing, space exploration, and personal scheduling. This evolution underscores the versatility and critical significance of APS.",
107
+ "bbox": [
108
+ 81,
109
+ 611,
110
+ 477,
111
+ 805
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "In parallel with advancements in APS, the development and proliferation of LLMs have marked a substantial leap in AI, particularly within computational linguistics. Evolving from early efforts in natural language processing (NLP), LLMs have undergone significant transformation. Initially focused on basic tasks like word prediction and syntax",
118
+ "bbox": [
119
+ 81,
120
+ 805,
121
+ 477,
122
+ 888
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "analysis, newer models are characterized by their ability to generate coherent, contextually relevant text and perform diverse, complex linguistic tasks. Trained on extensive text corpora, LLMs have mastered human-like language patterns. Their recent success in various NLP tasks has prompted efforts to apply these models in APS. There is a notable shift towards using language constructs to specify aspects of planning, such as preconditions, effects, and goals, rather than relying solely on traditional planning domain languages like PDDL.",
129
+ "bbox": [
130
+ 514,
131
+ 273,
132
+ 911,
133
+ 412
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "This paper presents an exhaustive literature review exploring the integration of LLMs in APS across eight categories: Language Translation, Plan Generation, Model Construction, Multi-agent Planning, Interactive Planning, Heuristics Optimization, Brain-Inspired Planning, and Tool Integration. Table 1 provides the description for the eight categories. Our comprehensive analysis of 126 papers not only categorizes LLMs' diverse contributions but also identifies significant gaps in each domain. Through our review, we put forward the following position:",
140
+ "bbox": [
141
+ 514,
142
+ 412,
143
+ 911,
144
+ 551
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Position Statement",
151
+ "text_level": 1,
152
+ "bbox": [
153
+ 535,
154
+ 566,
155
+ 661,
156
+ 579
157
+ ],
158
+ "page_idx": 0
159
+ },
160
+ {
161
+ "type": "text",
162
+ "text": "Integrating LLMs into APS marks a pivotal advancement, bridging the gap between the advanced reasoning of traditional APS and the nuanced language understanding of LLMs. Traditional APS systems excel in structured, logical planning but often lack flexibility and contextual adaptability, a gap readily filled by LLMs. Conversely, while LLMs offer unparalleled natural language processing and a vast knowledge base, they fail to generate precise, actionable plans where APS systems thrive. This integration surpasses the limitations of each standalone method, offering a dynamic and context-aware planning approach, while also scaling up the traditional use of data and past experiences in the planning process.",
163
+ "bbox": [
164
+ 532,
165
+ 590,
166
+ 895,
167
+ 785
168
+ ],
169
+ "page_idx": 0
170
+ },
171
+ {
172
+ "type": "text",
173
+ "text": "In the forthcoming sections, we delve into the background of LLMs and classical planning problem, accompanied by the identification of literature. This sets the stage for an in-depth exploration of the application of LLMs in APS, where we critically examine the strengths and limitations of LLMs. Our position on the emerging neuro-symbolic AI paradigm",
174
+ "bbox": [
175
+ 514,
176
+ 804,
177
+ 911,
178
+ 888
179
+ ],
180
+ "page_idx": 0
181
+ },
182
+ {
183
+ "type": "aside_text",
184
+ "text": "arXiv:2401.02500v2 [cs.AI] 20 Jan 2024",
185
+ "bbox": [
186
+ 22,
187
+ 276,
188
+ 57,
189
+ 707
190
+ ],
191
+ "page_idx": 0
192
+ },
193
+ {
194
+ "type": "table",
195
+ "img_path": "images/64de82f21bd0513c5e28236eb92b14103811de939112c98e43867203f5ec325f.jpg",
196
+ "table_caption": [],
197
+ "table_footnote": [],
198
+ "table_body": "<table><tr><td>Category</td><td>Description</td></tr><tr><td>Language Translation</td><td>Involves converting natural language into structured planning languages or formats like PDDL and vice-versa, enhancing the interface between human linguistic input and machine-understandable planning directives.</td></tr><tr><td>Plan Generation</td><td>Entails the creation of plans or strategies directly by LLMs, focusing on generating actionable sequences or decision-making processes.</td></tr><tr><td>Model Construction</td><td>Utilizes LLMs to construct or refine world and domain models essential for accurate and effective planning.</td></tr><tr><td>Multi-agent Planning</td><td>Focuses on scenarios involving multiple agents, where LLMs contribute to coordination and cooperative strategy development.</td></tr><tr><td>Interactive Planning</td><td>Centers on scenarios requiring iterative feedback or interactive planning with users, external verifiers, or environment, emphasizing the adaptability of LLMs to dynamic inputs.</td></tr><tr><td>Heuristics Optimization</td><td>Applies LLMs in optimizing planning processes through refining existing plans or providing heuristic assistance to symbolic planners.</td></tr><tr><td>Tool Integration</td><td>Encompasses studies where LLMs act as central orchestrators or coordinators in a tool ecosystem, interfacing with planners, theorem provers, and other systems.</td></tr><tr><td>Brain-Inspired Planning</td><td>Covers research focusing on LLM architectures inspired by neurological or cognitive processes, particularly to enhance planning capabilities.</td></tr></table>",
199
+ "bbox": [
200
+ 81,
201
+ 64,
202
+ 916,
203
+ 377
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "Table 1: Comprehensive description of the eight categories utilizing LLMs in APS",
210
+ "bbox": [
211
+ 225,
212
+ 385,
213
+ 771,
214
+ 402
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "is central to our discussion, highlighting its unique advantages over purely neural network-based (i.e., statistical AI) or symbolic AI approaches. Finally, we will discuss prospective developments, address potential challenges, and identify promising opportunities in the field.",
221
+ "bbox": [
222
+ 81,
223
+ 426,
224
+ 480,
225
+ 500
226
+ ],
227
+ "page_idx": 1
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "Background",
232
+ "text_level": 1,
233
+ "bbox": [
234
+ 227,
235
+ 512,
236
+ 336,
237
+ 529
238
+ ],
239
+ "page_idx": 1
240
+ },
241
+ {
242
+ "type": "text",
243
+ "text": "Large Language Models",
244
+ "text_level": 1,
245
+ "bbox": [
246
+ 83,
247
+ 534,
248
+ 276,
249
+ 550
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "Large language models are neural network models with upwards of $\\sim 3$ billion parameters that are trained on extremely large corpora of natural language data (trillions of tokens/words). These models are proficient in interpreting, generating, and contextualizing human language, leading to applications ranging from text generation to language-driven reasoning tasks. The evolution of LLMs in NLP began with rule-based models, progressed through statistical models, and achieved a significant breakthrough with the introduction of neural network-based models. The shift to sequence-based neural networks, with Recurrent Neural Networks (RNNs) and Long Short-Term Memory (LSTM) networks, marked a notable advancement due to their capability to process information and context over long sequences. Shortcomings in RNNs and LSTMs due to vanishing gradients and, consequently, loss of very long sequence contexts lead to the transformer model, which introduced self-attention (SA) mechanisms. The SA mechanism enabled focus on different parts of a long input sequence in parallel, which enhanced understanding of contextual nuances in language patterns over extremely long sequences. The SA mechanism is also complemented with positional encodings in transformers to enable the model to maintain an awareness of word-token order, which is required to understand accurate",
256
+ "bbox": [
257
+ 81,
258
+ 556,
259
+ 480,
260
+ 891
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "text",
266
+ "text": "grammar and syntax. The self-attention mechanism, central to transformers, uses a query, key, and value system to contextualize dependencies in the input sequence. Informally, the SA concept is inspired by classical information retrieval systems where the query is the input sequence context, the key refers to a \"database\" contained within the parametric memory, and the value is the actual value present at that reference. The operation is mathematically expressed in Equation 1.",
267
+ "bbox": [
268
+ 514,
269
+ 426,
270
+ 913,
271
+ 551
272
+ ],
273
+ "page_idx": 1
274
+ },
275
+ {
276
+ "type": "equation",
277
+ "text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V \\tag {1}\n$$\n",
278
+ "text_format": "latex",
279
+ "bbox": [
280
+ 566,
281
+ 561,
282
+ 913,
283
+ 595
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "text",
289
+ "text": "In this equation, $Q$ , $K$ , and $V$ denote the query, key, and value matrices. The scaling factor $\\sqrt{d_k}$ , where $d_k$ is the dimension of the keys, is employed to standardize the vectors to unit variance for ensuring stable softmax gradients during training. Since the introduction of LLMs with self-attention, there have been several architectural variants depending on the downstream tasks.",
290
+ "bbox": [
291
+ 514,
292
+ 599,
293
+ 911,
294
+ 695
295
+ ],
296
+ "page_idx": 1
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "Causal Language Modeling (CLMs): CLMs, such as GPT-4, are designed for tasks where text generation is sequential and dependent on the preceding context. They predict each subsequent word based on the preceding words, modeling the probability of a word sequence in a forward direction. This process is mathematically formulated as shown in Equation 2.",
301
+ "bbox": [
302
+ 514,
303
+ 696,
304
+ 913,
305
+ 792
306
+ ],
307
+ "page_idx": 1
308
+ },
309
+ {
310
+ "type": "equation",
311
+ "text": "\n$$\nP (T) = \\prod_ {i = 1} ^ {n} P \\left(t _ {i} \\mid t _ {< i}\\right) \\tag {2}\n$$\n",
312
+ "text_format": "latex",
313
+ "bbox": [
314
+ 638,
315
+ 804,
316
+ 911,
317
+ 842
318
+ ],
319
+ "page_idx": 1
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "In this formulation, $P(t_{i}|t_{< i})$ represents the probability of the $i$ -th token given all preceding tokens, $t_{< i}$ . This characteristic makes CLMs particularly suitable for applications",
324
+ "bbox": [
325
+ 514,
326
+ 845,
327
+ 913,
328
+ 888
329
+ ],
330
+ "page_idx": 1
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "like content generation, where the flow and coherence of the text in the forward direction are crucial.",
335
+ "bbox": [
336
+ 81,
337
+ 68,
338
+ 478,
339
+ 95
340
+ ],
341
+ "page_idx": 2
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "Masked Language Modeling (MLMs): Unlike CLMs, MLMs like BERT are trained to understand the bidirectional context by predicting words randomly masked in a sentence. This approach allows the model to learn both forward and backward dependencies in language structure. The MLM prediction process can be represented as Equation 3.",
346
+ "bbox": [
347
+ 81,
348
+ 95,
349
+ 480,
350
+ 181
351
+ ],
352
+ "page_idx": 2
353
+ },
354
+ {
355
+ "type": "equation",
356
+ "text": "\n$$\nP \\left(T _ {\\text {m a s k e d}} \\mid T _ {\\text {c o n t e x t}}\\right) = \\prod_ {i \\in M} P \\left(t _ {i} \\mid T _ {\\text {c o n t e x t}}\\right) \\tag {3}\n$$\n",
357
+ "text_format": "latex",
358
+ "bbox": [
359
+ 150,
360
+ 191,
361
+ 478,
362
+ 223
363
+ ],
364
+ "page_idx": 2
365
+ },
366
+ {
367
+ "type": "text",
368
+ "text": "Here, $T_{\\mathrm{masked}}$ is the set of masked tokens in the sentence, $T_{\\mathrm{context}}$ represents the unmasked part of the sentence, and $M$ is the set of masked positions. MLMs have proven effective in NLP tasks such as sentiment analysis or question answering.",
369
+ "bbox": [
370
+ 81,
371
+ 228,
372
+ 478,
373
+ 297
374
+ ],
375
+ "page_idx": 2
376
+ },
377
+ {
378
+ "type": "text",
379
+ "text": "Sequence-to-Sequence (Seq2Seq) Modeling: Seq2Seq models, like T5, are designed to transform an input sequence into a related output sequence. They are often employed in tasks that require a mapping between different types of sequences, such as language translation or summarization. The Seq2Seq process is formulated as Equation 4.",
380
+ "bbox": [
381
+ 81,
382
+ 297,
383
+ 480,
384
+ 383
385
+ ],
386
+ "page_idx": 2
387
+ },
388
+ {
389
+ "type": "equation",
390
+ "text": "\n$$\nP \\left(T _ {\\text {o u t p u t}} \\mid T _ {\\text {i n p u t}}\\right) = \\prod_ {i = 1} ^ {m} P \\left(t _ {\\text {o u t p u t} _ {i}} \\mid T _ {\\text {i n p u t}}, t _ {\\text {o u t p u t} _ {< i}}\\right) \\tag {4}\n$$\n",
391
+ "text_format": "latex",
392
+ "bbox": [
393
+ 122,
394
+ 401,
395
+ 478,
396
+ 441
397
+ ],
398
+ "page_idx": 2
399
+ },
400
+ {
401
+ "type": "text",
402
+ "text": "In Equation 4, $T_{\\mathrm{input}}$ is the input sequence, $T_{\\mathrm{output}}$ is the output sequence, and $P(t_{\\mathrm{output}_i}|T_{\\mathrm{input}},t_{\\mathrm{output}_{< i}})$ calculates the probability of generating each token in the output sequence, considering both the input sequence and the preceding tokens in the output sequence.",
403
+ "bbox": [
404
+ 81,
405
+ 448,
406
+ 478,
407
+ 518
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "In addition to their architectural variants, the utility of LLMs is further enhanced by specific model utilization strategies, enabling their effective adaptation to various domains at scale. One key strategy is fine-tuning, which applies to pre-trained LLMs. Pre-trained LLMs are models already trained on large datasets to understand and generate language, acquiring a broad linguistic knowledge base. Fine-tuning involves further training pre-trained LLMs on a smaller, task-specific dataset, thereby adjusting the neural network weights for particular applications. This process is mathematically represented in Equation 5.",
414
+ "bbox": [
415
+ 81,
416
+ 518,
417
+ 480,
418
+ 672
419
+ ],
420
+ "page_idx": 2
421
+ },
422
+ {
423
+ "type": "equation",
424
+ "text": "\n$$\n\\theta_ {\\text {f i n e - t u n e d}} = \\theta_ {\\text {p r e - t r a i n e d}} - \\eta \\cdot \\nabla_ {\\theta} L (\\theta , D _ {\\text {t a s k}}) \\tag {5}\n$$\n",
425
+ "text_format": "latex",
426
+ "bbox": [
427
+ 143,
428
+ 686,
429
+ 478,
430
+ 704
431
+ ],
432
+ "page_idx": 2
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "Here, $\\theta_{\\mathrm{fine - tuned}}$ are the model parameters after fine-tuning, $\\theta_{\\mathrm{pre - trained}}$ are the parameters obtained from pre-training, $\\eta$ is the learning rate, and $\\nabla_{\\theta}L(\\theta ,D_{\\mathrm{task}})$ denotes the gradient of the loss function $L$ with respect to the parameters $\\theta$ on the task-specific dataset $D_{\\mathrm{task}}$ .",
437
+ "bbox": [
438
+ 81,
439
+ 707,
440
+ 480,
441
+ 779
442
+ ],
443
+ "page_idx": 2
444
+ },
445
+ {
446
+ "type": "equation",
447
+ "text": "\n$$\nP (T | C) = \\prod_ {i = 1} ^ {n} P \\left(t _ {i} \\mid t _ {< i}, C\\right) \\tag {6}\n$$\n",
448
+ "text_format": "latex",
449
+ "bbox": [
450
+ 186,
451
+ 789,
452
+ 478,
453
+ 828
454
+ ],
455
+ "page_idx": 2
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "Complementing the fine-tuning approach is in-context learning, an alternative strategy that is particularly characteristic of models like the GPT series. This method diverges from fine-tuning by enabling the model to adapt its",
460
+ "bbox": [
461
+ 81,
462
+ 832,
463
+ 480,
464
+ 888
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "responses based on immediate context or prompts without necessitating further training. The efficacy of in-context learning is a direct consequence of the comprehensive pretraining phase, where models are exposed to diverse textual datasets, thereby acquiring a nuanced understanding of language and context. Given a context $C$ , the model generates text $T$ that is contextually relevant, as shown in Equation 6. Here, $P(T|C)$ is the probability of generating text $T$ given the context $C$ , and $P(t_i|t_{<i}, C)$ is the probability of generating the $i$ -th token $t_i$ given the preceding tokens $t_{<i}$ and the context $C$ .",
471
+ "bbox": [
472
+ 514,
473
+ 69,
474
+ 913,
475
+ 220
476
+ ],
477
+ "page_idx": 2
478
+ },
479
+ {
480
+ "type": "image",
481
+ "img_path": "images/9c9354954b9e2f81a6619598516e2381ffb89e4bc821a098ab69f1eacdded832.jpg",
482
+ "image_caption": [
483
+ "Figure 1: Radar chart showcasing the relative performance of six language models (GPT-4, Claude-v1, GPT-3.5-turbo, Vicuna-13B, Alpaca-13B, LLama-13B) across key domains: Writing, Roleplay, Reasoning, Math, Coding, Extraction, STEM, and Humanities from Zheng et al. (2023a)."
484
+ ],
485
+ "image_footnote": [],
486
+ "bbox": [
487
+ 563,
488
+ 233,
489
+ 870,
490
+ 382
491
+ ],
492
+ "page_idx": 2
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "These diverse model types and training methodologies under the umbrella of LLMs showcase the flexibility and adaptability of language models in handling a wide range of complex tasks. Figure 1 illustrates the comparative capabilities of different LLMs across various competency domains, such as Writing (evaluating text generation quality), Role-play (assessing conversational interaction), Reasoning (logical problem-solving), Math (numerical problem-solving), Coding (programming language understanding and generation), Extraction (information retrieval from text), STEM (proficiency in scientific and technical contexts), and Humanities (engagement with arts, history, and social sciences content). Across these domains, GPT-4 exhibits the strongest performance in the benchmark dataset evaluated by Zheng et al. (2023a), indicative of its superior training and extensive knowledge base. Expanding LLMs into applications such as code generation signifies their adaptability and potential for cross-disciplinary innovation. However, fine-tuning and in-context learning methodologies also bring challenges, such as potential data overfitting and reliance on the quality of input context. LLMs' continuous development and refinement promise to open new frontiers in various domains, including automated planning and scheduling, by bridging AI with human-like language understanding.",
497
+ "bbox": [
498
+ 514,
499
+ 481,
500
+ 913,
501
+ 816
502
+ ],
503
+ "page_idx": 2
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "Automated Planning and Scheduling",
508
+ "text_level": 1,
509
+ "bbox": [
510
+ 516,
511
+ 827,
512
+ 803,
513
+ 843
514
+ ],
515
+ "page_idx": 2
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "APS is a branch of AI that focuses on the creation of strategies or action sequences, typically for execution by intelligent agents, autonomous robots, and unmanned ve-",
520
+ "bbox": [
521
+ 514,
522
+ 845,
523
+ 913,
524
+ 888
525
+ ],
526
+ "page_idx": 2
527
+ },
528
+ {
529
+ "type": "image",
530
+ "img_path": "images/cae332ce1610600c60aeba97dbf09be3e8de45cc4817a92350418208f7015ca3.jpg",
531
+ "image_caption": [
532
+ "Figure 2: Of the 126 papers surveyed in this study, 55 were accepted by peer-reviewed conferences. This chart illustrates the distribution of these papers across various conferences in the fields of LLMs and APS, highlighting the primary forums for scholarly contributions in these areas."
533
+ ],
534
+ "image_footnote": [],
535
+ "bbox": [
536
+ 171,
537
+ 64,
538
+ 831,
539
+ 262
540
+ ],
541
+ "page_idx": 3
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "hicles. A basic category in APS is a Classical Planning Problem (CPP) (Russell and Norvig 2003) which is a tuple $\\mathcal{M} = \\langle \\mathcal{D},\\mathcal{I},\\mathcal{G}\\rangle$ with domain $\\mathcal{D} = \\langle F,A\\rangle$ - where $F$ is a set of fluents that define a state $s\\subseteq F$ , and $A$ is a set of actions - and initial and goal states $\\mathcal{I},\\mathcal{G}\\subseteq F$ . Action $a\\in A$ is a tuple $(c_{a},pre(a),eff^{\\pm}(a))$ where $c_{a}$ is the cost, and $pre(a),eff^{\\pm}(a)\\subseteq F$ are the preconditions and add/delete effects, i.e., $\\delta_{\\mathcal{M}}(s,a)\\models \\bot s$ if $s\\not\\models pre(a)$ ; else $\\delta_{\\mathcal{M}}(s,a)\\models s\\cup \\mathrm{eff}^{+}(a)\\setminus \\mathrm{eff}^{-}(a)$ where $\\delta_{\\mathcal{M}}(\\cdot)$ is the transition function. The cumulative transition function is $\\delta_{\\mathcal{M}}(s,(a_1,a_2,\\ldots ,a_n)) = \\delta_{\\mathcal{M}}(\\delta_{\\mathcal{M}}(s,a_1),(a_2,\\ldots ,a_n))$ . A plan for a CPP is a sequence of actions $\\langle a_1,a_2,\\dots ,a_n\\rangle$ that transforms the initial state $\\mathcal{I}$ into the goal state $\\mathcal{G}$ using the transition function $\\delta_{\\mathcal{M}}$ . Traditionally, a CPP is encoded using a symbolic representation, where states, actions, and transitions are explicitly enumerated. This symbolic approach, often implemented using Planning Domain Definition Language or PDDL (McDermott et al. 1998), ensures precise and unambiguous descriptions of planning problems. This formalism allows for applying search algorithms and heuristic methods to find a sequence of actions that lead to the goal state, which is the essence of the plan.",
546
+ "bbox": [
547
+ 81,
548
+ 343,
549
+ 478,
550
+ 652
551
+ ],
552
+ "page_idx": 3
553
+ },
554
+ {
555
+ "type": "text",
556
+ "text": "The advent of LLMs has sparked a significant evolution in representation methods for CPPs, moving towards leveraging the expressive power of natural language (Valmeekam et al. 2023a) and the perceptual capabilities of vision (Asai 2018). These novel approaches, inherently more suited for LLM processing, use text and vision-based representations, allowing researchers to utilize the pre-existing knowledge within LLMs. This shift enables a more humanistic comprehension and reasoning about planning tasks, enhancing the flexibility and applicability of planning algorithms in complex, dynamic environments. LLMs, while distinct in being trained on vast datasets outside the traditional scope of planning, loosely connect to previous data-driven methodologies, such as case-based reasoning (Xu 1995) applied to planning and Hierarchical Task Network (HTN) (Georgievski and Aiello 2015) which make use of task knowledge. It is an open area how LLMs may be used syn",
557
+ "bbox": [
558
+ 81,
559
+ 652,
560
+ 480,
561
+ 891
562
+ ],
563
+ "page_idx": 3
564
+ },
565
+ {
566
+ "type": "text",
567
+ "text": "ergestically with prior methods.",
568
+ "bbox": [
569
+ 516,
570
+ 344,
571
+ 728,
572
+ 359
573
+ ],
574
+ "page_idx": 3
575
+ },
576
+ {
577
+ "type": "text",
578
+ "text": "LLMs in APS - Literature selection",
579
+ "text_level": 1,
580
+ "bbox": [
581
+ 516,
582
+ 369,
583
+ 795,
584
+ 386
585
+ ],
586
+ "page_idx": 3
587
+ },
588
+ {
589
+ "type": "text",
590
+ "text": "A comprehensive survey of existing literature was conducted to explore the application of LLMs for automated planning. This endeavor led to identifying 126 pertinent research papers showcasing various methodologies, applications, and theoretical insights into utilizing LLMs within this domain.",
591
+ "bbox": [
592
+ 514,
593
+ 388,
594
+ 911,
595
+ 472
596
+ ],
597
+ "page_idx": 3
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "The selection of these papers was guided by stringent criteria, focusing primarily on their relevance to the core theme of LLMs in automated planning. The search, conducted across multiple academic databases and journals, was steered by keywords such as \"Large Language Models\", \"Automated Planning\", \"LLMs in Planning\", and \"LLMs + Robotics\". Figure 2 presents the distribution of these selected papers across various peer-reviewed conferences, underlining the breadth and diversity of forums addressing the intersection of LLMs and APS. Even if a paper originated from a workshop within a conference, only the conference name is listed. Out of 126 papers, 71 are under review or available on arXiv. The inclusion criteria prioritized the relevance and contribution of papers to automated planning with LLMs over the publication date. Nonetheless, all surveyed papers emerged from either 2022 or 2023, a trend depicted in Figure 3, underscoring the recent surge in LLM research. A word cloud was generated to visually capture the prevalent research themes reflected in these papers' titles, illustrated in Figure 4. This cloud highlights the frequent use of terms such as \"Language Model\" and \"Planning\", which dominate the current discourse. In contrast, the emergence of \"Neuro-Symbolic\" reflects a nascent yet growing interest in integrating neural and symbolic approaches within the field. This systematic approach ensured a comprehensive inclusion of seminal works and recent advancements.",
602
+ "bbox": [
603
+ 514,
604
+ 473,
605
+ 913,
606
+ 832
607
+ ],
608
+ "page_idx": 3
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "Upon the accumulation of these papers, a meticulous manual categorization was undertaken. The papers were divided into four piles, each containing approximately 30 papers. Each pile was manually categorized by one author,",
613
+ "bbox": [
614
+ 514,
615
+ 832,
616
+ 913,
617
+ 890
618
+ ],
619
+ "page_idx": 3
620
+ },
621
+ {
622
+ "type": "image",
623
+ "img_path": "images/073f64493115ced78d94b76f87c30654f0a47c4bfb31b9139f35eae7bdf742fc.jpg",
624
+ "image_caption": [
625
+ "Figure 3: Annual distribution of the 126 surveyed papers, indicating a significant increase in publications from 12 in 2022 to 114 in 2023, highlighting the rapid growth of LLM research within a single year."
626
+ ],
627
+ "image_footnote": [],
628
+ "bbox": [
629
+ 120,
630
+ 66,
631
+ 441,
632
+ 224
633
+ ],
634
+ "page_idx": 4
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "with the final categorization being reviewed by all authors. During this process, each paper could belong to multiple categories out of the eight established. The maximum number of categories assigned to a single paper was three, although the median was typically one category per paper. This process was pivotal in distilling the vast information into coherent, thematic groups. The categorization was conducted based on the specific application of LLMs in planning. This formed eight distinct categories, each representing a unique facet of LLM application in automated planning. These categories facilitate a structured analysis and highlight LLMs' diverse applications and theoretical underpinnings in this field.",
639
+ "bbox": [
640
+ 81,
641
+ 321,
642
+ 478,
643
+ 502
644
+ ],
645
+ "page_idx": 4
646
+ },
647
+ {
648
+ "type": "image",
649
+ "img_path": "images/c6bce785d81e2113a9455e41ee3cb5796b607308a1b1b790012676aca8c9f4c9.jpg",
650
+ "image_caption": [
651
+ "Figure 4: Word cloud of terms from the titles of papers surveyed in this study, displaying the prevalence of \"Language Model\" and \"Planning\" as central themes. The presence of \"Neuro-Symbolic\" indicates an emergent trend toward the fusion of neural and symbolic methodologies in the domain."
652
+ ],
653
+ "image_footnote": [],
654
+ "bbox": [
655
+ 89,
656
+ 518,
657
+ 475,
658
+ 670
659
+ ],
660
+ "page_idx": 4
661
+ },
662
+ {
663
+ "type": "text",
664
+ "text": "LLMs in APS - Literature Discussion",
665
+ "text_level": 1,
666
+ "bbox": [
667
+ 120,
668
+ 784,
669
+ 441,
670
+ 800
671
+ ],
672
+ "page_idx": 4
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "This section dwelves into the diverse applications of LLMs in planning tasks. We have identified eight distinct categories based on the utility and application of LLMs in planning, which are concisely summarized in Table 1. Figure 5 provides a detailed taxonomy, illustrating the categorization of the identified research papers.",
677
+ "bbox": [
678
+ 81,
679
+ 804,
680
+ 480,
681
+ 891
682
+ ],
683
+ "page_idx": 4
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "Language Translation",
688
+ "text_level": 1,
689
+ "bbox": [
690
+ 516,
691
+ 68,
692
+ 689,
693
+ 83
694
+ ],
695
+ "page_idx": 4
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "Language translation in the context of LLMs and planning involves transforming natural language instructions into structured planning languages (Wong et al. 2023; Kelly et al. 2023; Yang 2023; Pan et al. 2023; Xie et al. 2023; Yang, Ishay, and Lee 2023; Lin et al. 2023c; Sakib and Sun 2023; Yang et al. 2023b; Parakh et al. 2023; Yang et al. 2023a; Dai et al. 2023; Ding et al. 2023b; Zelikman et al. 2023; Xu et al. 2023b; Chen et al. 2023a; You et al. 2023) such as PDDL, and vice versa, utilizing in-context learning techniques (Guan et al. 2023). This capability effectively bridges the gap between human linguistic expression and machine-understandable formats, enhancing intuitive and efficient planning processes. The LLM+P framework (Liu et al. 2023) exemplifies this by converting natural language descriptions of planning problems into PDDL using GPT-4, leveraging classical planners for solution finding, and then translating these solutions back into natural language, with a specific focus on robot planning scenarios. Additionally, Graph2NL (Chalvatzaki et al. 2023) generates natural language text from scene graphs for long-horizon robot reasoning tasks, while (Shirai et al. 2023) introduces a vision-to-language interpreter for robot task planning. Further, (Brohan et al. 2023) examines the grounding of LLM-generated natural language utterances in actionable robot tasks, and (Yang, Gaglione, and Topcu 2022) utilizes LLMs for creating finite-state automatons for sequential decision-making problems. Despite these advancements, a critical research gap emerges in the autonomous translation capabilities of LLMs, particularly in converting natural language to PDDL without external expert intervention.",
700
+ "bbox": [
701
+ 514,
702
+ 87,
703
+ 913,
704
+ 503
705
+ ],
706
+ "page_idx": 4
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "While LLMs effectively translate PDDL to natural language, a notable gap is evident in their limited understanding of real-world objects and the problem of grounding affordances, mainly when translating natural language to structured languages like PDDL. Addressing this gap calls for integrating neuro-symbolic approaches in LLMs, where the fusion of perceptual experience for concrete concept understanding from knowledge graphs complements LLMs' proficiency in distributional statistics (Lenat and Marcus 2023).",
711
+ "bbox": [
712
+ 527,
713
+ 513,
714
+ 903,
715
+ 630
716
+ ],
717
+ "page_idx": 4
718
+ },
719
+ {
720
+ "type": "text",
721
+ "text": "Plan Generation",
722
+ "text_level": 1,
723
+ "bbox": [
724
+ 516,
725
+ 648,
726
+ 648,
727
+ 664
728
+ ],
729
+ "page_idx": 4
730
+ },
731
+ {
732
+ "type": "text",
733
+ "text": "This category focuses on directly generating plans using LLMs. The research, primarily utilizing causal language models through in-context learning (Sermanet et al. 2023; Li et al. 2023b; Silver et al. 2023; Parakh et al. 2023; Zelikman et al. 2023; Besta et al. 2023; Huang et al. 2023a; Dalal et al. 2023; Wang et al. 2023b; Valmeekam et al. 2022; Valmeekam, Marquez, and Kambhampati 2023; Gramopadhye and Szafir 2022; Singh et al. 2023) $^{1}$ , demonstrates modest performance, indicating notable challenges in employing LLMs for effective plan generation. Novel in-context learning strategies, such as the Chain-of-Symbol and Tree of Thoughts, have been introduced to enhance LLMs' reasoning capabilities (Hu et al. 2023b; Yao et al. 2023). Ef",
734
+ "bbox": [
735
+ 514,
736
+ 667,
737
+ 913,
738
+ 849
739
+ ],
740
+ "page_idx": 4
741
+ },
742
+ {
743
+ "type": "page_footnote",
744
+ "text": "<sup>1</sup>Due to space constraints, only a select number of papers are cited in this section.",
745
+ "bbox": [
746
+ 514,
747
+ 861,
748
+ 911,
749
+ 888
750
+ ],
751
+ "page_idx": 4
752
+ },
753
+ {
754
+ "type": "image",
755
+ "img_path": "images/83d1a34e2a54e9903fd6b7dac7ba203146e201485f993f83594961a414049fc8.jpg",
756
+ "image_caption": [
757
+ "Figure 5: Taxonomy of recent research in the intersection of LLMs and Planning into categories (#). Each has scholarly papers based on their unique application or customization of LLMs in addressing various aspects of planning problems."
758
+ ],
759
+ "image_footnote": [],
760
+ "bbox": [
761
+ 84,
762
+ 77,
763
+ 916,
764
+ 834
765
+ ],
766
+ "page_idx": 5
767
+ },
768
+ {
769
+ "type": "text",
770
+ "text": "forts to generate multimodal, text, and image-based goal-conditioned plans are exemplified by (Lu et al. 2023b). Additionally, a subset of studies in this survey investigates the fine-tuning of seq2seq, code-based language models (Pallagani et al. 2022, 2023b), which are noted for their advanced syntactic encoding. These models show promise in improving plan generation within the confines of their training datasets (Logeswaran et al. 2023), yet exhibit limitations in generalizing to out-of-distribution domains (Pallagani et al. 2023a), highlighting a gap in their adaptability across diverse planning contexts.",
771
+ "bbox": [
772
+ 86,
773
+ 68,
774
+ 477,
775
+ 220
776
+ ],
777
+ "page_idx": 6
778
+ },
779
+ {
780
+ "type": "text",
781
+ "text": "Causal LLMs are predominantly used for plan generation, but their performance is often limited due to their design, which is focused on generating text based on preceding input. On the other hand, seq2seq LLMs can generate valid plans but struggle with generalization across diverse domains. This limitation highlights an opportunity for a synergistic approach: integrating even imperfect LLM outputs with symbolic planners can expedite heuristic searches, thereby enhancing efficiency and reducing search times (Fabiano et al. 2023).",
782
+ "bbox": [
783
+ 94,
784
+ 229,
785
+ 465,
786
+ 344
787
+ ],
788
+ "page_idx": 6
789
+ },
790
+ {
791
+ "type": "text",
792
+ "text": "Model Construction",
793
+ "text_level": 1,
794
+ "bbox": [
795
+ 86,
796
+ 363,
797
+ 240,
798
+ 376
799
+ ],
800
+ "page_idx": 6
801
+ },
802
+ {
803
+ "type": "text",
804
+ "text": "This category employs LLMs to build or refine world and domain models essential for accurate planning. Nottingham et al. (2023); Yuan et al. (2023a) leverage in-context learning with LLMs to develop an abstract world model in the Minecraft domain, highlighting the challenge of semantic grounding in LLMs. Similarly, Gragera and Pozanco (2023) explore the capability of LLMs in completing ill-defined PDDL domains. Efforts such as (Huang et al. 2023a; Brohan et al. 2023) delve into LLMs' grounding capabilities, with SayCan (Brohan et al. 2023) notably achieving $74\\%$ executable plans. Hao et al. (2023a); Yoneda et al. (2023) innovatively positions LLMs as both world models and reasoning agents, enabling the simulation of world states and prediction of action outcomes. Research by (Zhang and Soh 2023; Wong et al. 2023; Mandi, Jain, and Song 2023; Hu et al. 2023a; Zhao, Lee, and Hsu 2023; Ding et al. 2023b; Huang et al. 2023a; Wu et al. 2023b; Xu et al. 2023b; Brohan et al. 2023) shows that LLMs can effectively model high-level human states and behaviors using their commonsense knowledge. Yet, they face difficulties accurately processing low-level geometrical or shape features due to spatial and numerical reasoning constraints. Additionally, Kelly et al. (2023) investigates the potential of LLMs in conjunction with planners to craft narratives and logical story models, integrating human-in-the-loop for iterative edits.",
805
+ "bbox": [
806
+ 86,
807
+ 381,
808
+ 477,
809
+ 726
810
+ ],
811
+ "page_idx": 6
812
+ },
813
+ {
814
+ "type": "text",
815
+ "text": "LLMs often struggle with detailed spatial reasoning and processing low-level environmental features, limiting their effectiveness in model construction. Integrating world models presents a viable solution, offering advanced abstractions for reasoning that encompass human-like cognitive elements and interactions, thereby enhancing LLMs' capabilities in model construction (Hu and Shu 2023).",
816
+ "bbox": [
817
+ 96,
818
+ 736,
819
+ 465,
820
+ 824
821
+ ],
822
+ "page_idx": 6
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "Multi-agent Planning",
827
+ "text_level": 1,
828
+ "bbox": [
829
+ 86,
830
+ 844,
831
+ 251,
832
+ 859
833
+ ],
834
+ "page_idx": 6
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "In multi-agent planning, LLMs play a vital role in scenarios involving interaction among multiple agents, typically mod",
839
+ "bbox": [
840
+ 86,
841
+ 861,
842
+ 477,
843
+ 888
844
+ ],
845
+ "page_idx": 6
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "eled using distinct LLMs. These models enhance coordination and cooperation, leading to more complex and effective multi-agent strategies. (Zhang et al. 2023b) introduces an innovative framework that employs LLMs to develop cooperative embodied agents. AutoGraph (Wei et al. 2023) leverages LLMs to generate autonomous agents adept at devising solutions for varied graph-structured data problems. Addressing scalability in multi-robot task planning, (Chen et al. 2023d) proposes frameworks for the collaborative function of different LLM-based agents. Furthermore, (Abdelnabi et al. 2023) and (Hua et al. 2023) collectively demonstrate the effectiveness of LLM agents in complex negotiation and decision-making environments.",
850
+ "bbox": [
851
+ 519,
852
+ 69,
853
+ 911,
854
+ 248
855
+ ],
856
+ "page_idx": 6
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "A key gap in multi-agent planning with LLMs lies in standardizing inter-agent communication and maintaining distinct belief states, including human aspects. Overcoming this requires advanced LLM algorithms for dynamic alignment of communication and belief states, drawing on epistemic reasoning and knowledge representation (de Zarza et al. 2023).",
861
+ "bbox": [
862
+ 529,
863
+ 260,
864
+ 900,
865
+ 335
866
+ ],
867
+ "page_idx": 6
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "Interactive Planning",
872
+ "text_level": 1,
873
+ "bbox": [
874
+ 519,
875
+ 356,
876
+ 676,
877
+ 372
878
+ ],
879
+ "page_idx": 6
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "In this category, LLMs are utilized in dynamic scenarios where real-time adaptability to user feedback or iterative planning is essential. The refinement of LLM outputs is typically achieved through four primary feedback variants: (a) External verifiers, such as VAL(Howey, Long, and Fox 2004) for PDDL or scene descriptors and success detectors in robotics (Guan et al. 2023; Arora and Kambhampati 2023; Jha et al. 2023; Huang et al. 2022b; Liu, Bahety, and Song 2023; Rana et al. 2023; Ren et al. 2023; Kim et al. 2023; Graule and Isler 2023; Driess et al. 2023; Zheng et al. 2023b); (b) Online reinforcement learning, which progressively updates the LLM about environmental changes (Carta et al. 2023); (c) Self-refinement by LLMs, where they provide feedback on their own outputs (Zhou et al. 2023; Hu et al. 2023c,b; Ding et al. 2023a; Sun et al. 2023; Naik et al. 2023); (d) Input from human experts (Raman et al. 2022; Wu, Ai, and Hsu 2023). Furthermore, (Chen et al. 2023b) introduces the \"Action Before Action\" method, enabling LLMs to proactively seek relevant information from external sources in natural language, thereby improving embodied decision-making in LLMs by $40\\%$ .",
884
+ "bbox": [
885
+ 519,
886
+ 375,
887
+ 911,
888
+ 666
889
+ ],
890
+ "page_idx": 6
891
+ },
892
+ {
893
+ "type": "text",
894
+ "text": "A key gap in interactive planning with LLMs lies in harmonizing the \"fast\" neural processing of LLMs with \"slow\" symbolic reasoning, as manifested in feedback mechanisms. This integration is key to maintaining the neural speed of LLMs while effectively embedding the depth and precision of feedback, which is vital for accuracy in dynamic planning scenarios (Zhang et al. 2023a).",
895
+ "bbox": [
896
+ 529,
897
+ 676,
898
+ 900,
899
+ 766
900
+ ],
901
+ "page_idx": 6
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "Heuristics Optimization",
906
+ "text_level": 1,
907
+ "bbox": [
908
+ 519,
909
+ 786,
910
+ 704,
911
+ 801
912
+ ],
913
+ "page_idx": 6
914
+ },
915
+ {
916
+ "type": "text",
917
+ "text": "In the realm of Heuristics Optimization, LLMs are leveraged to enhance planning processes, either by refining existing plans or aiding symbolic planners with heuristic guidance. Studies like (Hazra, Martires, and De Raedt 2023; Hao et al. 2023a; Dai et al. 2023; Feng et al. 2023) have effectively coupled LLMs with heuristic searches to identify optimal",
918
+ "bbox": [
919
+ 519,
920
+ 805,
921
+ 911,
922
+ 888
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "action sequences. Research by (Silver et al. 2022; Shah et al. 2023; Valmeekam et al. 2023b) reveals that LLMs' outputs, even if partially correct, can provide valuable direction for symbolic planners such as LPG (Gerevini and Serina 2002), especially in problems beyond the LLMs' solvable scope. Furthermore, (Raimondo et al. 2023) makes an intriguing observation that including workflows and action plans in LLM input prompts can notably enhance task-oriented dialogue generalization.",
929
+ "bbox": [
930
+ 86,
931
+ 68,
932
+ 477,
933
+ 193
934
+ ],
935
+ "page_idx": 7
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "This category marks significant progress towards realizing neuro-symbolic approaches in APS. Current methods emphasize plan validity, often at the expense of time efficiency. Future research should look at how to continually evolve LLMs for better plan generation, with its experience from complimenting symbolic planners (Du et al. 2023).",
940
+ "bbox": [
941
+ 96,
942
+ 207,
943
+ 465,
944
+ 282
945
+ ],
946
+ "page_idx": 7
947
+ },
948
+ {
949
+ "type": "text",
950
+ "text": "Tool Integration",
951
+ "text_level": 1,
952
+ "bbox": [
953
+ 86,
954
+ 309,
955
+ 210,
956
+ 324
957
+ ],
958
+ "page_idx": 7
959
+ },
960
+ {
961
+ "type": "text",
962
+ "text": "In tool integration, LLMs serve as coordinators within a diverse array of planning tools, enhancing functionality in complex scenarios. Studies like (Xu et al. 2023a; Lu et al. 2023a; Shen et al. 2023; Hao et al. 2023b; Ge et al. 2023) demonstrate that incorporating tools such as web search engines, Python functions, and API endpoints enhances LLM reasoning abilities. However, (Ruan et al. 2023) notes a tendency for LLMs to over-rely on specific tools, potentially prolonging the planning process. (Li et al. 2023a) introduces a benchmark for tool-augmented LLMs. While typical approaches involve teaching LLMs tool usage via multiple prompts, (Hsieh et al. 2023) suggests that leveraging tool documentation offers improved planning capabilities, circumventing the need for extensive demonstrations.",
963
+ "bbox": [
964
+ 86,
965
+ 333,
966
+ 477,
967
+ 527
968
+ ],
969
+ "page_idx": 7
970
+ },
971
+ {
972
+ "type": "text",
973
+ "text": "LLMs often hallucinate non-existent tools, overuse a single tool, and face scaling challenges with multiple tools. Overcoming these issues is key to enabling LLMs to effectively select and utilize various tools in complex planning scenarios (Elaraby et al. 2023).",
974
+ "bbox": [
975
+ 96,
976
+ 541,
977
+ 465,
978
+ 604
979
+ ],
980
+ "page_idx": 7
981
+ },
982
+ {
983
+ "type": "text",
984
+ "text": "Brain-Inspired Planning",
985
+ "text_level": 1,
986
+ "bbox": [
987
+ 86,
988
+ 631,
989
+ 274,
990
+ 646
991
+ ],
992
+ "page_idx": 7
993
+ },
994
+ {
995
+ "type": "text",
996
+ "text": "This area explores neurologically and cognitively inspired architectures in LLMs (Webb et al. 2023; Sumers et al. 2023; Momennejad et al. 2023; Hu et al. 2023d; Lin et al. 2023a), aiming to replicate human-like planning in enhancing problem-solving. However, while these methods rely on in-context learning, they frequently encounter challenges such as hallucination and grounding, as previously discussed, and tend to be more computationally intensive than in-context learning alone.",
997
+ "bbox": [
998
+ 86,
999
+ 655,
1000
+ 477,
1001
+ 780
1002
+ ],
1003
+ "page_idx": 7
1004
+ },
1005
+ {
1006
+ "type": "text",
1007
+ "text": "While LLMs attempt to mimic symbolic solvers through in-context learning for brain-inspired modules, this approach lacks adaptability and is a superficial understanding of complex cognitive processes. To overcome these issues, developing systems where neural and symbolic components are intrinsically intertwined is critical as it would accurately mirror human cognitive capabilities in planning (Fabiano et al. 2023).",
1008
+ "bbox": [
1009
+ 96,
1010
+ 792,
1011
+ 467,
1012
+ 883
1013
+ ],
1014
+ "page_idx": 7
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "text": "Discussion and Conclusion",
1019
+ "text_level": 1,
1020
+ "bbox": [
1021
+ 604,
1022
+ 68,
1023
+ 826,
1024
+ 82
1025
+ ],
1026
+ "page_idx": 7
1027
+ },
1028
+ {
1029
+ "type": "text",
1030
+ "text": "In this position paper, we comprehensively investigate the role of LLMs within the domain of APS, analyzing 126 scholarly articles across eight distinct categories. This extensive survey not only provides a detailed landscape of current LLM applications and their limitations but also highlights the volume of research in each category: Language Translation with 23 papers demonstrates LLMs' proficiency, whereas Plan Generation, the most researched category with 53 papers, reveals their shortcomings in optimality, completeness, and correctness compared to traditional combinatorial planners. Our exploration extends to Model Construction (17 papers), which examines LLMs in developing planning models, and the relatively unexplored area of Multiagent Planning (4 papers). Interactive Planning is well represented with 21 papers, illustrating LLMs' adaptability in feedback-centric scenarios. Despite being less researched, Heuristics Optimization and Tool Integration, each with 8 papers, provide valuable insights into efficiency enhancement and integration of LLMs with symbolic solvers. Lastly, Brain-inspired Planning, although least represented with 5 papers, opens innovative avenues for human-like planning processes in LLMs. By identifying the research distribution and gaps in these categories, our paper proposes how neuro-symbolic approaches can address these voids, thereby underscoring the varying degrees of LLM applications in APS and guiding future research towards enhancing their capabilities in complex planning tasks.",
1031
+ "bbox": [
1032
+ 519,
1033
+ 94,
1034
+ 911,
1035
+ 468
1036
+ ],
1037
+ "page_idx": 7
1038
+ },
1039
+ {
1040
+ "type": "text",
1041
+ "text": "It is important to acknowledge that while LLMs have shown promise, they are not a panacea for the inherent complexities of automated planning. The expectation that LLMs, operating within polynomial run-time bounds, could supplant the nuanced and often non-polynomial complexities of symbolic planners is not yet realizable. Indeed, the strengths of LLMs do not currently include generating sequences of actions akin to those devised by symbolic planners, which are essential for creating a coherent and practical plan for complex problems. However, this does not diminish the potential utility of LLMs within this space. When considering average-case scenarios, which are typically less complex than worst-case scenarios, LLMs could offer substantial efficiencies. They can be seen as akin to meta-heuristic approaches, capable of accelerating plan generation in a variety of settings. As such, their application, governed by cognitive-inspired frameworks like SOFAI(Fabiano et al. 2023), could delineate when and where their use is most advantageous.",
1042
+ "bbox": [
1043
+ 519,
1044
+ 470,
1045
+ 911,
1046
+ 733
1047
+ ],
1048
+ "page_idx": 7
1049
+ },
1050
+ {
1051
+ "type": "text",
1052
+ "text": "Future research should prioritize three areas: developing new LLM training paradigms that ensure coherence and goal alignment in outputs; delving into Henry Kautz's neurosymbolic taxonomies (Kautz 2022) to better integrate neural and symbolic methods; and establishing clear performance metrics for LLM-assisted planners. In conclusion, integrating LLMs into automated planning, while challenging, opens avenues for innovation. Embracing a symbiotic approach that combines the creative strengths of LLMs with the precision of symbolic planners can lead to more effective, sophisticated AI applications in planning.",
1053
+ "bbox": [
1054
+ 519,
1055
+ 736,
1056
+ 911,
1057
+ 888
1058
+ ],
1059
+ "page_idx": 7
1060
+ },
1061
+ {
1062
+ "type": "text",
1063
+ "text": "References",
1064
+ "text_level": 1,
1065
+ "bbox": [
1066
+ 233,
1067
+ 66,
1068
+ 330,
1069
+ 82
1070
+ ],
1071
+ "page_idx": 8
1072
+ },
1073
+ {
1074
+ "type": "list",
1075
+ "sub_type": "ref_text",
1076
+ "list_items": [
1077
+ "Abdelnabi, S.; Gomaa, A.; Sivaprasad, S.; Schonherr, L.; and Fritz, M. 2023. Llm-deliberation: Evaluating llms with interactive multi-agent negotiation games. arXiv preprint arXiv:2309.17234.",
1078
+ "Arora, D.; and Kambhampati, S. 2023. Learning and Leveraging Verifiers to Improve Planning Capabilities of Pre-trained Language Models. arXiv preprint arXiv:2305.17077.",
1079
+ "Asai, M. 2018. Photo-Realistic Blockworld Dataset. arXiv preprint arXiv:1812.01818.",
1080
+ "Besta, M.; Blach, N.; Kubicek, A.; Gerstenberger, R.; Gianinazzi, L.; Gajda, J.; Lehmann, T.; Podstawski, M.; Niewiadomski, H.; Nczyk, P.; et al. 2023. Graph of thoughts: Solving elaborate problems with large language models. arXiv preprint arXiv:2308.09687.",
1081
+ "Brohan, A.; Chebotar, Y.; Finn, C.; Hausman, K.; Herzog, A.; Ho, D.; Ibarz, J.; Irpan, A.; Jang, E.; Julian, R.; et al. 2023. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, 287-318. PMLR.",
1082
+ "Capitanelli, A.; and Mastrogiovanni, F. 2023. A Framework to Generate Neurosymbolic PDDL-compliant Planners. arXiv preprint arXiv:2303.00438.",
1083
+ "Carta, T.; Romac, C.; Wolf, T.; Lamprier, S.; Sigaud, O.; and Oudefyer, P.-Y. 2023. Grounding large language models in interactive environments with online reinforcement learning. arXiv preprint arXiv:2302.02662.",
1084
+ "Chalvatzaki, G.; Younes, A.; Nandha, D.; Le, A. T.; Ribeiro, L. F.; and Gurevych, I. 2023. Learning to reason over scene graphs: a case study of finetuning GPT-2 into a robot language model for grounded task planning. Frontiers in Robotics and AI, 10.",
1085
+ "Chen, B.; Xia, F.; Ichter, B.; Rao, K.; Gopalakrishnan, K.; Ryoo, M. S.; Stone, A.; and Kappler, D. 2023a. Open-vocabulary queryable scene representations for real world planning. In 2023 IEEE International Conference on Robotics and Automation (ICRA), 11509-11522. IEEE.",
1086
+ "Chen, X.; Zhang, S.; Zhang, P.; Zhao, L.; and Chen, J. 2023b. Asking Before Action: Gather Information in Embodied Decision Making with Language Models. arXiv preprint arXiv:2305.15695.",
1087
+ "Chen, Y.; Arkin, J.; Zhang, Y.; Roy, N.; and Fan, C. 2023c. AutoTAMP: Autoregressive Task and Motion Planning with LLMs as Translators and Checkers. arXiv preprint arXiv:2306.06531.",
1088
+ "Chen, Y.; Arkin, J.; Zhang, Y.; Roy, N.; and Fan, C. 2023d. Scalable Multi-Robot Collaboration with Large Language Models: Centralized or Decentralized Systems? arXiv preprint arXiv:2309.15943.",
1089
+ "Dagan, G.; Keller, F.; and Lascarides, A. 2023. Dynamic Planning with a LLM. arXiv preprint arXiv:2308.06391.",
1090
+ "Dai, Z.; Asgharivaskasi, A.; Duong, T.; Lin, S.; Tzes, M.-E.; Pappas, G.; and Atanasov, N. 2023. Optimal Scene Graph Planning with Large Language Model Guidance. arXiv preprint arXiv:2309.09182."
1091
+ ],
1092
+ "bbox": [
1093
+ 84,
1094
+ 85,
1095
+ 478,
1096
+ 888
1097
+ ],
1098
+ "page_idx": 8
1099
+ },
1100
+ {
1101
+ "type": "list",
1102
+ "sub_type": "ref_text",
1103
+ "list_items": [
1104
+ "Dalal, M.; Chiruvolu, T.; Chaplot, D. S.; and Salakhutdinov, R. 2023. Plan-Seq-Learn: Language Model Guided RL for Solving Long Horizon Robotics Tasks. In 2nd Workshop on Language and Robot Learning: Language as Grounding.",
1105
+ "de Zarza, I.; de Curto, J.; Roig, G.; Manzoni, P.; and Calafate, C. T. 2023. Emergent Cooperation and Strategy Adaptation in Multi-Agent Systems: An Extended Coevolutionary Theory with LLMs. *Electronics*, 12(12): 2722.",
1106
+ "Ding, Y.; Zhang, X.; Amiri, S.; Cao, N.; Yang, H.; Kaminski, A.; Esselink, C.; and Zhang, S. 2023a. Integrating Action Knowledge and LLMs for Task Planning and Situation Handling in Open Worlds. arXiv preprint arXiv:2305.17590.",
1107
+ "Ding, Y.; Zhang, X.; Paxton, C.; and Zhang, S. 2023b. Leveraging Commonsense Knowledge from Large Language Models for Task and Motion Planning. In RSS 2023 Workshop on Learning for Task and Motion Planning.",
1108
+ "Ding, Y.; Zhang, X.; Paxton, C.; and Zhang, S. 2023c. Task and motion planning with large language models for object rearrangement. arXiv preprint arXiv:2303.06247.",
1109
+ "Driess, D.; Xia, F.; Sajjadi, M. S.; Lynch, C.; Chowdhery, A.; Ichter, B.; Wahid, A.; Tompson, J.; Vuong, Q.; Yu, T.; et al. 2023. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378.",
1110
+ "Du, M.; Luu, A. T.; Ji, B.; and Ng, S.-k. 2023. From Static to Dynamic: A Continual Learning Framework for Large Language Models. arXiv preprint arXiv:2310.14248.",
1111
+ "Elaraby, M.; Lu, M.; Dunn, J.; Zhang, X.; Wang, Y.; and Liu, S. 2023. Halo: Estimation and reduction of hallucinations in open-source weak large language models. arXiv preprint arXiv:2308.11764.",
1112
+ "Fabiano, F.; Pallagani, V.; Ganapini, M. B.; Horesh, L.; Loreggia, A.; Murugesan, K.; Rossi, F.; and Srivastava, B. 2023. Fast and Slow Planning. arXiv preprint arXiv:2303.04283.",
1113
+ "Feng, X.; Wan, Z.; Wen, M.; Wen, Y.; Zhang, W.; and Wang, J. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179.",
1114
+ "Gandhi, K.; Sadigh, D.; and Goodman, N. D. 2023. Strategic Reasoning with Language Models. arXiv preprint arXiv:2305.19165.",
1115
+ "Ge, Y.; Hua, W.; Ji, J.; Tan, J.; Xu, S.; and Zhang, Y. 2023. Openagi: When llm meets domain experts. arXiv preprint arXiv:2304.04370.",
1116
+ "Georgievski, I.; and Aiello, M. 2015. HTN planning: Overview, comparison, and beyond. Artif. Intell., 222: 124-156.",
1117
+ "Gerevini, A.; and Serina, I. 2002. LPG: A Planner Based on Local Search for Planning Graphs with Action Costs. In Aips, volume 2, 281-290.",
1118
+ "Ghallab, M.; Nau, D.; and Traverso, P. 2004. Automated Planning: Theory and Practice. The Morgan Kaufmann Series in Artificial Intelligence. Amsterdam: Morgan Kaufmann. ISBN 978-1-55860-856-6."
1119
+ ],
1120
+ "bbox": [
1121
+ 517,
1122
+ 68,
1123
+ 911,
1124
+ 888
1125
+ ],
1126
+ "page_idx": 8
1127
+ },
1128
+ {
1129
+ "type": "list",
1130
+ "sub_type": "ref_text",
1131
+ "list_items": [
1132
+ "Gragera, A.; and Pozanco, A. 2023. Exploring the Limitations of using Large Language Models to Fix Planning Tasks.",
1133
+ "Gramopadhye, M.; and Szafir, D. 2022. Generating executable action plans with environmentally-aware language models. arXiv preprint arXiv:2210.04964.",
1134
+ "Graule, M. A.; and Isler, V. 2023. GG-LLM: Geometrically Grounding Large Language Models for Zero-shot Human Activity Forecasting in Human-Aware Task Planning. arXiv preprint arXiv:2310.20034.",
1135
+ "Gu, Q.; Kuwajerwala, A.; Morin, S.; Jatavallabhula, K. M.; Sen, B.; Agarwal, A.; Rivera, C.; Paul, W.; Ellis, K.; Chellappa, R.; et al. 2023. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. arXiv preprint arXiv:2309.16650.",
1136
+ "Guan, L.; Valmeekam, K.; Sreedharan, S.; and Kambhampati, S. 2023. Leveraging Pre-trained Large Language Models to Construct and Utilize World Models for Model-based Task Planning. arXiv preprint arXiv:2305.14909.",
1137
+ "Hao, S.; Gu, Y.; Ma, H.; Hong, J. J.; Wang, Z.; Wang, D. Z.; and Hu, Z. 2023a. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992.",
1138
+ "Hao, S.; Liu, T.; Wang, Z.; and Hu, Z. 2023b. ToolkenGPT: Augmenting Frozen Language Models with Massive Tools via Tool Embeddings. arXiv preprint arXiv:2305.11554.",
1139
+ "Hazra, R.; Martires, P. Z. D.; and De Raedt, L. 2023. SayCanPay: Heuristic Planning with Large Language Models using Learnable Domain Knowledge. arXiv preprint arXiv:2308.12682.",
1140
+ "Howey, R.; Long, D.; and Fox, M. 2004. VAL: automatic plan validation, continuous effects and mixed initiative planning using PDDL. In 16th IEEE International Conference on Tools with Artificial Intelligence, 294-301.",
1141
+ "Hsieh, C.-Y.; Chen, S.-A.; Li, C.-L.; Fujii, Y.; Ratner, A.; Lee, C.-Y.; Krishna, R.; and Pfister, T. 2023. Tool documentation enables zero-shot tool-usage with large language models. arXiv preprint arXiv:2308.00675.",
1142
+ "Hu, B.; Zhao, C.; Zhang, P.; Zhou, Z.; Yang, Y.; Xu, Z.; and Liu, B. 2023a. Enabling Efficient Interaction between an Algorithm Agent and an LLM: A Reinforcement Learning Approach. arXiv preprint arXiv:2306.03604.",
1143
+ "Hu, H.; Lu, H.; Zhang, H.; Lam, W.; and Zhang, Y. 2023b. Chain-of-Symbol Prompting Elicits Planning in Large Language Models. arXiv preprint arXiv:2305.10276.",
1144
+ "Hu, M.; Mu, Y.; Yu, X.; Ding, M.; Wu, S.; Shao, W.; Chen, Q.; Wang, B.; Qiao, Y.; and Luo, P. 2023c. Tree-Planner: Efficient Close-loop Task Planning with Large Language Models. arXiv preprint arXiv:2310.08582.",
1145
+ "Hu, P.; Qi, J.; Li, X.; Li, H.; Wang, X.; Quan, B.; Wang, R.; and Zhou, Y. 2023d. Tree-of-mixed-thought: Combining fast and slow thinking for multi-hop visual reasoning. arXiv preprint arXiv:2308.09658.",
1146
+ "Hu, Z.; and Shu, T. 2023. Language Models, Agent Models, and World Models: The LAW for Machine Reasoning and Planning. arXiv:2312.05230."
1147
+ ],
1148
+ "bbox": [
1149
+ 83,
1150
+ 68,
1151
+ 480,
1152
+ 888
1153
+ ],
1154
+ "page_idx": 9
1155
+ },
1156
+ {
1157
+ "type": "list",
1158
+ "sub_type": "ref_text",
1159
+ "list_items": [
1160
+ "Hua, W.; Fan, L.; Li, L.; Mei, K.; Ji, J.; Ge, Y.; Hemphill, L.; and Zhang, Y. 2023. War and peace (waragent): Large language model-based multi-agent simulation of world wars. arXiv preprint arXiv:2311.17227.",
1161
+ "Huang, W.; Abbeel, P.; Pathak, D.; and Mordatch, I. 2022a. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International Conference on Machine Learning, 9118-9147. PMLR.",
1162
+ "Huang, W.; Wang, C.; Zhang, R.; Li, Y.; Wu, J.; and Fei-Fei, L. 2023a. Voxposer: Composable 3d value maps for robotic manipulation with language models. arXiv preprint arXiv:2307.05973.",
1163
+ "Huang, W.; Xia, F.; Shah, D.; Driess, D.; Zeng, A.; Lu, Y.; Florence, P.; Mordatch, I.; Levine, S.; Hausman, K.; et al. 2023b. Grounded decoding: Guiding text generation with grounded models for robot control. arXiv preprint arXiv:2303.00855.",
1164
+ "Huang, W.; Xia, F.; Xiao, T.; Chan, H.; Liang, J.; Florence, P.; Zeng, A.; Tompson, J.; Mordatch, I.; Chebotar, Y.; et al. 2022b. Inner monologue: Embodied reasoning through planning with language models. arXiv preprint arXiv:2207.05608.",
1165
+ "Jha, S. K.; Jha, S.; Lincoln, P.; Bastian, N. D.; Velasquez, A.; Ewetz, R.; and Neema, S. 2023. Neuro Symbolic Reasoning for Planning: Counterexample Guided Inductive Synthesis using Large Language Models and Satisfiability Solving. arXiv preprint arXiv:2309.16436.",
1166
+ "Joublin, F.; Ceravola, A.; Smirnov, P.; Ocker, F.; Deigmoeller, J.; Belardinelli, A.; Wang, C.; Hasler, S.; Tanneberg, D.; and Gienger, M. 2023. CoPAL: Corrective Planning of Robot Actions with Large Language Models. arXiv preprint arXiv:2310.07263.",
1167
+ "Kannan, S. S.; Venkatesh, V. L.; and Min, B.-C. 2023. SMART-LLM: Smart Multi-Agent Robot Task Planning using Large Language Models. arXiv preprint arXiv:2309.10062.",
1168
+ "Kant, Y.; Ramachandran, A.; Yenamandra, S.; Gilitschenski, I.; Batra, D.; Szot, A.; and Agrawal, H. 2022. Housekeep: Tidying virtual households using commonsense reasoning. In European Conference on Computer Vision, 355-373. Springer.",
1169
+ "Kautz, H. A. 2022. The third AI summer: AAAI Robert S. Engelmore Memorial Lecture. AI Magazine, 43(1): 105-125.",
1170
+ "Kelly, J.; Calderwood, A.; Wardrip-Fruin, N.; and Mateas, M. 2023. There and back again: extracting formal domains for controllable neurosymbolic story authoring. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, volume 19, 64-74.",
1171
+ "Kim, G.; Kim, T.; Kannan, S. S.; Venkatesh, V. L.; Kim, D.; and Min, B.-C. 2023. DynaCon: Dynamic Robot Planner with Contextual Awareness via LLMs. arXiv preprint arXiv:2309.16031.",
1172
+ "Kirk, J. R.; Wray, R. E.; and Laird, J. E. 2023. Exploiting Language Models as a Source of Knowledge for Cognitive Agents. arXiv preprint arXiv:2310.06846."
1173
+ ],
1174
+ "bbox": [
1175
+ 517,
1176
+ 68,
1177
+ 913,
1178
+ 888
1179
+ ],
1180
+ "page_idx": 9
1181
+ },
1182
+ {
1183
+ "type": "list",
1184
+ "sub_type": "ref_text",
1185
+ "list_items": [
1186
+ "Lenat, D.; and Marcus, G. 2023. Getting from generative ai to trustworthy ai: What llms might learn from cyc. arXiv preprint arXiv:2308.04445.",
1187
+ "Li, M.; Song, F.; Yu, B.; Yu, H.; Li, Z.; Huang, F.; and Li, Y. 2023a. Api-bank: A benchmark for tool-augmented llms. arXiv preprint arXiv:2304.08244.",
1188
+ "Li, Y.; Kamra, N.; Desai, R.; and Halevy, A. 2023b. Human-Centered Planning. arXiv preprint arXiv:2311.04403.",
1189
+ "Lin, B. Y.; Fu, Y.; Yang, K.; Ammanabrolu, P.; Brahman, F.; Huang, S.; Bhagavatula, C.; Choi, Y.; and Ren, X. 2023a. SwiftSage: A Generative Agent with Fast and Slow Thinking for Complex Interactive Tasks. arXiv preprint arXiv:2305.17390.",
1190
+ "Lin, H.; Zala, A.; Cho, J.; and Bansal, M. 2023b. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091.",
1191
+ "Lin, K.; Agia, C.; Migimatsu, T.; Pavone, M.; and Bohg, J. 2023c. Text2motion: From natural language instructions to feasible plans. arXiv preprint arXiv:2303.12153.",
1192
+ "Liu, B.; Jiang, Y.; Zhang, X.; Liu, Q.; Zhang, S.; Biswas, J.; and Stone, P. 2023. Llm+ p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477.",
1193
+ "Liu, Z.; Bahety, A.; and Song, S. 2023. Reflect: Summarizing robot experiences for failure explanation and correction. arXiv preprint arXiv:2306.15724.",
1194
+ "Logeswaran, L.; Sohn, S.; Lyu, Y.; Liu, A. Z.; Kim, D.-K.; Shim, D.; Lee, M.; and Lee, H. 2023. Code Models are Zero-shot Precondition Reasoners. arXiv preprint arXiv:2311.09601.",
1195
+ "Lu, P.; Peng, B.; Cheng, H.; Galley, M.; Chang, K.-W.; Wu, Y. N.; Zhu, S.-C.; and Gao, J. 2023a. Chameleon: Plug-and-play compositional reasoning with large language models. arXiv preprint arXiv:2304.09842.",
1196
+ "Lu, Y.; Feng, W.; Zhu, W.; Xu, W.; Wang, X. E.; Eckstein, M.; and Wang, W. Y. 2022. Neuro-symbolic causal language planning with commonsense prompting. arXiv eprints, arXiv-2206.",
1197
+ "Lu, Y.; Lu, P.; Chen, Z.; Zhu, W.; Wang, X. E.; and Wang, W. Y. 2023b. Multimodal Procedural Planning via Dual Text-Image Prompting. arXiv preprint arXiv:2305.01795.",
1198
+ "Luo, L.; Li, Y.-F.; Haffari, G.; and Pan, S. 2023. Reasoning on graphs: Faithful and interpretable large language model reasoning. arXiv preprint arXiv:2310.01061.",
1199
+ "Mandi, Z.; Jain, S.; and Song, S. 2023. Roco: Dialectic multi-robot collaboration with large language models. arXiv preprint arXiv:2307.04738.",
1200
+ "McDermott, D.; Ghallab, M.; Howe, A.; Knoblock, C.; Ram, A.; Veloso, M.; Weld, D.; and Wilkins, D. 1998. PDDL-the planning domain definition language.",
1201
+ "Momennejad, I.; Hasanbeig, H.; Vieira, F.; Sharma, H.; Ness, R. O.; Jojic, N.; Palangi, H.; and Larson, J. 2023. Evaluating Cognitive Maps and Planning in Large Language Models with CogEval. arXiv preprint arXiv:2309.15129."
1202
+ ],
1203
+ "bbox": [
1204
+ 83,
1205
+ 68,
1206
+ 478,
1207
+ 888
1208
+ ],
1209
+ "page_idx": 10
1210
+ },
1211
+ {
1212
+ "type": "list",
1213
+ "sub_type": "ref_text",
1214
+ "list_items": [
1215
+ "Naik, R.; Chandrasekaran, V.; Yuksekgonul, M.; Palangi, H.; and Nushi, B. 2023. Diversity of Thought Improves Reasoning Abilities of Large Language Models. arXiv preprint arXiv:2310.07088.",
1216
+ "Nottingham, K.; Ammanabrolu, P.; Suhr, A.; Choi, Y.; Hajishirzi, H.; Singh, S.; and Fox, R. 2023. Do embodied agents dream of pixelated sheep?: Embodied decision making using language guided world modelling. arXiv preprint arXiv:2301.12050.",
1217
+ "Pallagani, V.; Muppasani, B.; Murugesan, K.; Rossi, F.; Horesh, L.; Srivastava, B.; Fabiano, F.; and Loreggia, A. 2022. Plansformer: Generating symbolic plans using transformers. arXiv preprint arXiv:2212.08681.",
1218
+ "Pallagani, V.; Muppasani, B.; Murugesan, K.; Rossi, F.; Srivastava, B.; Horesh, L.; Fabiano, F.; and Loreggia, A. 2023a. Understanding the Capabilities of Large Language Models for Automated Planning. arXiv preprint arXiv:2305.16151.",
1219
+ "Pallagani, V.; Muppasani, B.; Srivastava, B.; Rossi, F.; Horesh, L.; Murugesan, K.; Loreggia, A.; Fabiano, F.; Joseph, R.; Kethepalli, Y.; et al. 2023b. Plansformer Tool: Demonstrating Generation of Symbolic Plans Using Transformers. In *IJCAI*, volume 2023, 7158–7162. International Joint Conferences on Artificial Intelligence.",
1220
+ "Pan, L.; Albalak, A.; Wang, X.; and Wang, W. Y. 2023. Logic-lm: Empowering large language models with symbolic solvers for faithful logical reasoning. arXiv preprint arXiv:2305.12295.",
1221
+ "Parakh, M.; Fong, A.; Simeonov, A.; Gupta, A.; Chen, T.; and Agrawal, P. 2023. Human-Assisted Continual Robot Learning with Foundation Models. arXiv preprint arXiv:2309.14321.",
1222
+ "Raimondo, S.; Pal, C.; Liu, X.; Vazquez, D.; and Palacios, H. 2023. Improving Generalization in Task-oriented Dialogues with Workflows and Action Plans. arXiv preprint arXiv:2306.01729.",
1223
+ "Rajvanshi, A.; Sikka, K.; Lin, X.; Lee, B.; Chiu, H.-P.; and Velasquez, A. 2023. Saynav: Grounding large language models for dynamic planning to navigation in new environments. arXiv preprint arXiv:2309.04077.",
1224
+ "Raman, S. S.; Cohen, V.; Rosen, E.; Idrees, I.; Paulius, D.; and TELEX, S. 2022. Planning with large language models via corrective re-prompting. In NeurIPS 2022 Foundation Models for Decision Making Workshop.",
1225
+ "Rana, K.; Haviland, J.; Garg, S.; Abou-Chakra, J.; Reid, I.; and Suenderhauf, N. 2023. Sayplan: Grounding large language models using 3d scene graphs for scalable task planning. arXiv preprint arXiv:2307.06135.",
1226
+ "Ren, A. Z.; Dixit, A.; Bodrova, A.; Singh, S.; Tu, S.; Brown, N.; Xu, P.; Takayama, L.; Xia, F.; Varley, J.; et al. 2023. Robots that ask for help: Uncertainty alignment for large language model planners. arXiv preprint arXiv:2307.01928.",
1227
+ "Ruan, J.; Chen, Y.; Zhang, B.; Xu, Z.; Bao, T.; Du, G.; Shi, S.; Mao, H.; Zeng, X.; and Zhao, R. 2023. Tptu: Task planning and tool usage of large language model-based ai agents. arXiv preprint arXiv:2308.03427.",
1228
+ "Russell, S.; and Norvig, P. 2003. Artificial Intelligence, A Modern Approach. Second Edition."
1229
+ ],
1230
+ "bbox": [
1231
+ 517,
1232
+ 68,
1233
+ 911,
1234
+ 888
1235
+ ],
1236
+ "page_idx": 10
1237
+ },
1238
+ {
1239
+ "type": "list",
1240
+ "sub_type": "ref_text",
1241
+ "list_items": [
1242
+ "Sakib, M. S.; and Sun, Y. 2023. From Cooking Recipes to Robot Task Trees—Improving Planning Correctness and Task Efficiency by Leveraging LLMs with a Knowledge Network. arXiv preprint arXiv:2309.09181.",
1243
+ "Sarkisyan, C.; Korchemnyi, A.; Kovalev, A. K.; and Panov, A. I. 2023. Evaluation of Pretrained Large Language Models in Embodied Planning Tasks. In International Conference on Artificial General Intelligence, 222-232. Springer.",
1244
+ "Sermanet, P.; Ding, T.; Zhao, J.; Xia, F.; Dwibedi, D.; Gopalakrishnan, K.; Chan, C.; Dulac-Arnold, G.; Maddineni, S.; Joshi, N. J.; et al. 2023. RoboVQA: Multimodal Long-Horizon Reasoning for Robotics. arXiv preprint arXiv:2311.00899.",
1245
+ "Shah, D.; Equi, M.; Osinski, B.; Xia, F.; Ichter, B.; and Levine, S. 2023. Navigation with large language models: Semantic guesswork as a heuristic for planning. arXiv preprint arXiv:2310.10103.",
1246
+ "Shen, Y.; Song, K.; Tan, X.; Li, D.; Lu, W.; and Zhuang, Y. 2023. Huggingppt: Solving ai tasks with chatgpt and its friends in huggingface. arXiv preprint arXiv:2303.17580.",
1247
+ "Shirai, K.; Beltran-Hernandez, C. C.; Hamaya, M.; Hashimoto, A.; Tanaka, S.; Kawaharazuka, K.; Tanaka, K.; Ushiku, Y.; and Mori, S. 2023. Vision-Language Interpreter for Robot Task Planning. arXiv preprint arXiv:2311.00967.",
1248
+ "Silver, T.; Dan, S.; Srinivas, K.; Tenenbaum, J. B.; Kaelbling, L. P.; and Katz, M. 2023. Generalized Planning in PDDL Domains with Pretrained Large Language Models. arXiv preprint arXiv:2305.11014.",
1249
+ "Silver, T.; Hariprasad, V.; Shuttleworth, R. S.; Kumar, N.; Lozano-Pérez, T.; and Kaelbling, L. P. 2022. PDDL planning with pretrained large language models. In NeurIPS 2022 Foundation Models for Decision Making Workshop.",
1250
+ "Singh, I.; Blukis, V.; Mousavian, A.; Goyal, A.; Xu, D.; Tremblay, J.; Fox, D.; Thomason, J.; and Garg, A. 2023. ProgPrompt: program generation for situated robot task planning using large language models. Autonomous Robots, 1-14.",
1251
+ "Song, C. H.; Wu, J.; Washington, C.; Sadler, B. M.; Chao, W.-L.; and Su, Y. 2023. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2998-3009.",
1252
+ "Sumers, T.; Yao, S.; Narasimhan, K.; and Griffiths, T. L. 2023. Cognitive architectures for language agents. arXiv preprint arXiv:2309.02427.",
1253
+ "Sun, H.; Zhuang, Y.; Kong, L.; Dai, B.; and Zhang, C. 2023. AdaPlanner: Adaptive Planning from Feedback with Language Models. arXiv preprint arXiv:2305.16653.",
1254
+ "Tang, X.; Zheng, Z.; Li, J.; Meng, F.; Zhu, S.-C.; Liang, Y.; and Zhang, M. 2023. Large Language Models are In-Context Semantic Reasoners rather than Symbolic Reasoners. arXiv preprint arXiv:2305.14825.",
1255
+ "Valmeekam, K.; Marquez, M.; and Kambhampati, S. 2023. Can Large Language Models Really Improve by Self-critiquing Their Own Plans? arXiv preprint arXiv:2310.08118."
1256
+ ],
1257
+ "bbox": [
1258
+ 83,
1259
+ 68,
1260
+ 480,
1261
+ 888
1262
+ ],
1263
+ "page_idx": 11
1264
+ },
1265
+ {
1266
+ "type": "list",
1267
+ "sub_type": "ref_text",
1268
+ "list_items": [
1269
+ "Valmeekam, K.; Marquez, M.; Olmo, A.; Sreedharan, S.; and Kambhampati, S. 2023a. PlanBench: An Extensible Benchmark for Evaluating Large Language Models on Planning and Reasoning about Change. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.",
1270
+ "Valmeekam, K.; Olmo, A.; Sreedharan, S.; and Kambhampati, S. 2022. Large Language Models Still Can't Plan (A Benchmark for LLMs on Planning and Reasoning about Change). arXiv preprint arXiv:2206.10498.",
1271
+ "Valmeekam, K.; Sreedharan, S.; Marquez, M.; Olmo, A.; and Kambhampati, S. 2023b. On the planning abilities of large language models (a critical investigation with a proposed benchmark). arXiv preprint arXiv:2302.06706.",
1272
+ "Wang, J.; Tong, J.; Tan, K.; Vorobeychik, Y.; and Kantaros, Y. 2023a. Conformal Temporal Logic Planning using Large Language Models: Knowing When to Do What and When to Ask for Help. arXiv preprint arXiv:2309.10092.",
1273
+ "Wang, L.; Xu, W.; Lan, Y.; Hu, Z.; Lan, Y.; Lee, R. K.-W.; and Lim, E.-P. 2023b. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. arXiv preprint arXiv:2305.04091.",
1274
+ "Wang, X.; Caccia, L.; Ostapenko, O.; Yuan, X.; and Sordoni, A. 2023c. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707.",
1275
+ "Wang, Z.; Cai, S.; Liu, A.; Ma, X.; and Liang, Y. 2023d. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560.",
1276
+ "Webb, T.; Mondal, S. S.; Wang, C.; Krabach, B.; and Momennejad, I. 2023. A Prefrontal Cortex-inspired Architecture for Planning in Large Language Models. arXiv preprint arXiv:2310.00194.",
1277
+ "Wei, L.; He, Z.; Zhao, H.; and Yao, Q. 2023. Unleashing the Power of Graph Learning through LLM-based Autonomous Agents. arXiv preprint arXiv:2309.04565.",
1278
+ "Wong, L.; Grand, G.; Lew, A. K.; Goodman, N. D.; Mansinghka, V. K.; Andreas, J.; and Tenenbaum, J. B. 2023. From Word Models to World Models: Translating from Natural Language to the Probabilistic Language of Thought. arXiv preprint arXiv:2306.12672.",
1279
+ "Wu, Y.; Min, S. Y.; Bisk, Y.; Salakhutdinov, R.; Azaria, A.; Li, Y.; Mitchell, T.; and Prabhumoye, S. 2023a. Plan, Eliminate, and Track-Language Models are Good Teachers for Embodied Agents. arXiv preprint arXiv:2305.02412.",
1280
+ "Wu, Z.; Ai, B.; and Hsu, D. 2023. Integrating Common Sense and Planning with Large Language Models for Room Tidying. In RSS 2023 Workshop on Learning for Task and Motion Planning.",
1281
+ "Wu, Z.; Wang, Z.; Xu, X.; Lu, J.; and Yan, H. 2023b. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848.",
1282
+ "Xie, Y.; Yu, C.; Zhu, T.; Bai, J.; Gong, Z.; and Soh, H. 2023. Translating natural language to planning goals with large-language models. arXiv preprint arXiv:2302.05128."
1283
+ ],
1284
+ "bbox": [
1285
+ 517,
1286
+ 68,
1287
+ 913,
1288
+ 888
1289
+ ],
1290
+ "page_idx": 11
1291
+ },
1292
+ {
1293
+ "type": "list",
1294
+ "sub_type": "ref_text",
1295
+ "list_items": [
1296
+ "Xu, B.; Liu, X.; Shen, H.; Han, Z.; Li, Y.; Yue, M.; Peng, Z.; Liu, Y.; Yao, Z.; and Xu, D. 2023a. Gentopia: A collaborative platform for tool-augmented llms. arXiv preprint arXiv:2308.04030.",
1297
+ "Xu, L. 1995. Case based reasoning. IEEE Potentials, 13(5): 10-13.",
1298
+ "Xu, M.; Huang, P.; Yu, W.; Liu, S.; Zhang, X.; Niu, Y.; Zhang, T.; Xia, F.; Tan, J.; and Zhao, D. 2023b. Creative Robot Tool Use with Large Language Models. arXiv preprint arXiv:2310.13065.",
1299
+ "Yang, J.; Chen, X.; Qian, S.; Madaan, N.; Iyengar, M.; Fouhey, D. F.; and Chai, J. 2023a. LLM-Grounder: Open-Vocabulary 3D Visual Grounding with Large Language Model as an Agent. arXiv preprint arXiv:2309.12311.",
1300
+ "Yang, R.; Hou, M.; Wang, J.; and Zhang, F. 2023b. OceanChat: Piloting Autonomous Underwater Vehicles in Natural Language. arXiv preprint arXiv:2309.16052.",
1301
+ "Yang, Y.; Gaglione, J.-R.; and Topcu, U. 2022. Learning Automata-Based Task Knowledge Representation from Large-Scale Generative Language Models. arXiv preprint arXiv:2212.01944.",
1302
+ "Yang, Y.; and Tomar, A. 2023. On the Planning, Search, and Memorization Capabilities of Large Language Models. arXiv preprint arXiv:2309.01868.",
1303
+ "Yang, Z. 2023. Neuro-Symbolic AI Approaches to Enhance Deep Neural Networks with Logical Reasoning and Knowledge Integration. Ph.D. thesis, Arizona State University.",
1304
+ "Yang, Z.; Ishay, A.; and Lee, J. 2023. Coupling Large Language Models with Logic Programming for Robust and General Reasoning from Text. arXiv preprint arXiv:2307.07696.",
1305
+ "Yao, S.; Yu, D.; Zhao, J.; Shafran, I.; Griffiths, T. L.; Cao, Y.; and Narasimhan, K. 2023. Tree of thoughts: Deliberate problem solving with large language models. arXiv preprint arXiv:2305.10601.",
1306
+ "Yoneda, T.; Fang, J.; Li, P.; Zhang, H.; Jiang, T.; Lin, S.; Picker, B.; Yunis, D.; Mei, H.; and Walter, M. R. 2023. Statler: State-maintaining language models for embodied reasoning. arXiv preprint arXiv:2306.17840.",
1307
+ "You, W.; Wu, W.; Liang, Y.; Mao, S.; Wu, C.; Cao, M.; Cai, Y.; Guo, Y.; Xia, Y.; Wei, F.; et al. 2023. EIPE-text: Evaluation-Guided Iterative Plan Extraction for Long-Form Narrative Text Generation. arXiv preprint arXiv:2310.08185.",
1308
+ "Yuan, H.; Zhang, C.; Wang, H.; Xie, F.; Cai, P.; Dong, H.; and Lu, Z. 2023a. Plan4mc: Skill reinforcement learning and planning for open-world northwest tasks. arXiv preprint arXiv:2303.16563.",
1309
+ "Yuan, S.; Chen, J.; Fu, Z.; Ge, X.; Shah, S.; Jankowski, C. R.; Yang, D.; and Xiao, Y. 2023b. Distilling Script Knowledge from Large Language Models for Constrained Language Planning. arXiv preprint arXiv:2305.05252.",
1310
+ "Zelikman, E.; Huang, Q.; Poesia, G.; Goodman, N.; and Haber, N. 2023. Parsel: Algorithmic Reasoning with Language Models by Composing Decompositions. In Thirty-seventh Conference on Neural Information Processing Systems."
1311
+ ],
1312
+ "bbox": [
1313
+ 83,
1314
+ 68,
1315
+ 478,
1316
+ 888
1317
+ ],
1318
+ "page_idx": 12
1319
+ },
1320
+ {
1321
+ "type": "list",
1322
+ "sub_type": "ref_text",
1323
+ "list_items": [
1324
+ "Zhang, B.; and Soh, H. 2023. Large language models as zero-shot human models for human-robot interaction. arXiv preprint arXiv:2303.03548.",
1325
+ "Zhang, C.; Liu, L.; Wang, J.; Wang, C.; Sun, X.; Wang, H.; and Cai, M. 2023a. Prefer: Prompt ensemble learning via feedback-reflect-refine. arXiv preprint arXiv:2308.12033.",
1326
+ "Zhang, F.; Jin, K.; and Zhuo, H. H. 2023. Planning with Logical Graph-based Language Model for Instruction Generation. arXiv:2308.13782.",
1327
+ "Zhang, H.; Du, W.; Shan, J.; Zhou, Q.; Du, Y.; Tenenbaum, J. B.; Shu, T.; and Gan, C. 2023b. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485.",
1328
+ "Zhang, J.; Zhang, J.; Pertsch, K.; Liu, Z.; Ren, X.; Chang, M.; Sun, S.-H.; and Lim, J. J. 2023c. Bootstrap your own skills: Learning to solve new tasks with large language model guidance. arXiv preprint arXiv:2310.10021.",
1329
+ "Zhao, Z.; Lee, W. S.; and Hsu, D. 2023. Large Language Models as Commonsense Knowledge for Large-Scale Task Planning. arXiv preprint arXiv:2305.14078.",
1330
+ "Zheng, L.; Chiang, W.-L.; Sheng, Y.; Zhuang, S.; Wu, Z.; Zhuang, Y.; Lin, Z.; Li, Z.; Li, D.; Xing, E. P.; Zhang, H.; Gonzalez, J. E.; and Stoica, I. 2023a. Judging LLM-as-a-judge with MT-Bench and Chatbot Arena. arXiv:2306.05685.",
1331
+ "Zheng, S.; Liu, J.; Feng, Y.; and Lu, Z. 2023b. Steve-Eye: Equipping LLM-based Embodied Agents with Visual Perception in Open Worlds. arXiv preprint arXiv:2310.13255.",
1332
+ "Zhou, Z.; Song, J.; Yao, K.; Shu, Z.; and Ma, L. 2023. ISR-LLM: Iterative Self-Refined Large Language Model for Long-Horizon Sequential Task Planning. arXiv preprint arXiv:2308.13724."
1333
+ ],
1334
+ "bbox": [
1335
+ 517,
1336
+ 68,
1337
+ 911,
1338
+ 536
1339
+ ],
1340
+ "page_idx": 12
1341
+ }
1342
+ ]
2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02500/3d935656-7e01-479c-8229-83bff2b1f1b7_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9c48eae399fc371db8e03baf45024d82d90ba647cff2affabfe334097dff9dc
3
+ size 520275
2401.02xxx/2401.02500/full.md ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # On the Prospects of Incorporating Large Language Models (LLMs) in Automated Planning and Scheduling (APS)
2
+
3
+ Vishal Pallagani<sup>1</sup>, Kaushik Roy<sup>1</sup>, Bharath Muppasani<sup>1</sup>, Francesco Fabiano<sup>2</sup>, Andrea Loreggia<sup>3</sup>, Keerthiram Murugesan<sup>4</sup>, Biplav Srivastava<sup>1</sup>, Francesca Rossi<sup>4</sup>, Lior Horesh<sup>4</sup>, Amit Sheth<sup>1</sup>
4
+
5
+ <sup>1</sup>University of South Carolina
6
+
7
+ $^{2}$ New Mexico State University
8
+
9
+ <sup>3</sup>University of Brescia
10
+
11
+ 4IBM Research
12
+
13
+ # Abstract
14
+
15
+ Automated Planning and Scheduling is among the growing areas in Artificial Intelligence (AI) where mention of LLMs has gained popularity. Based on a comprehensive review of 126 papers, this paper investigates eight categories based on the unique applications of LLMs in addressing various aspects of planning problems: language translation, plan generation, model construction, multi-agent planning, interactive planning, heuristics optimization, tool integration, and brain-inspired planning. For each category, we articulate the issues considered and existing gaps. A critical insight resulting from our review is that the true potential of LLMs unfolds when they are integrated with traditional symbolic planners, pointing towards a promising neuro-symbolic approach. This approach effectively combines the generative aspects of LLMs with the precision of classical planning methods. By synthesizing insights from existing literature, we underline the potential of this integration to address complex planning challenges. Our goal is to encourage the ICAPS community to recognize the complementary strengths of LLMs and symbolic planners, advocating for a direction in automated planning that leverages these synergistic capabilities to develop more advanced and intelligent planning systems.
16
+
17
+ # Introduction
18
+
19
+ As a sub-field of Artificial Intelligence (Russell and Norvig 2003), Automated Planning and Scheduling (Ghallab, Nau, and Traverso 2004) refers to developing algorithms and systems to generate plans or sequences of actions to achieve specific goals in a given environment or problem domain. APS is a valuable tool in domains where there is a need for intelligent decision-making, goal achievement, and efficient resource utilization. It enables the automation of complex tasks, making systems more capable and adaptable in dynamic environments. Over time, APS has evolved from the early development of robust theoretical foundations to practical applications in diverse sectors like manufacturing, space exploration, and personal scheduling. This evolution underscores the versatility and critical significance of APS.
20
+
21
+ In parallel with advancements in APS, the development and proliferation of LLMs have marked a substantial leap in AI, particularly within computational linguistics. Evolving from early efforts in natural language processing (NLP), LLMs have undergone significant transformation. Initially focused on basic tasks like word prediction and syntax
22
+
23
+ analysis, newer models are characterized by their ability to generate coherent, contextually relevant text and perform diverse, complex linguistic tasks. Trained on extensive text corpora, LLMs have mastered human-like language patterns. Their recent success in various NLP tasks has prompted efforts to apply these models in APS. There is a notable shift towards using language constructs to specify aspects of planning, such as preconditions, effects, and goals, rather than relying solely on traditional planning domain languages like PDDL.
24
+
25
+ This paper presents an exhaustive literature review exploring the integration of LLMs in APS across eight categories: Language Translation, Plan Generation, Model Construction, Multi-agent Planning, Interactive Planning, Heuristics Optimization, Brain-Inspired Planning, and Tool Integration. Table 1 provides the description for the eight categories. Our comprehensive analysis of 126 papers not only categorizes LLMs' diverse contributions but also identifies significant gaps in each domain. Through our review, we put forward the following position:
26
+
27
+ # Position Statement
28
+
29
+ Integrating LLMs into APS marks a pivotal advancement, bridging the gap between the advanced reasoning of traditional APS and the nuanced language understanding of LLMs. Traditional APS systems excel in structured, logical planning but often lack flexibility and contextual adaptability, a gap readily filled by LLMs. Conversely, while LLMs offer unparalleled natural language processing and a vast knowledge base, they fail to generate precise, actionable plans where APS systems thrive. This integration surpasses the limitations of each standalone method, offering a dynamic and context-aware planning approach, while also scaling up the traditional use of data and past experiences in the planning process.
30
+
31
+ In the forthcoming sections, we delve into the background of LLMs and classical planning problem, accompanied by the identification of literature. This sets the stage for an in-depth exploration of the application of LLMs in APS, where we critically examine the strengths and limitations of LLMs. Our position on the emerging neuro-symbolic AI paradigm
32
+
33
+ <table><tr><td>Category</td><td>Description</td></tr><tr><td>Language Translation</td><td>Involves converting natural language into structured planning languages or formats like PDDL and vice-versa, enhancing the interface between human linguistic input and machine-understandable planning directives.</td></tr><tr><td>Plan Generation</td><td>Entails the creation of plans or strategies directly by LLMs, focusing on generating actionable sequences or decision-making processes.</td></tr><tr><td>Model Construction</td><td>Utilizes LLMs to construct or refine world and domain models essential for accurate and effective planning.</td></tr><tr><td>Multi-agent Planning</td><td>Focuses on scenarios involving multiple agents, where LLMs contribute to coordination and cooperative strategy development.</td></tr><tr><td>Interactive Planning</td><td>Centers on scenarios requiring iterative feedback or interactive planning with users, external verifiers, or environment, emphasizing the adaptability of LLMs to dynamic inputs.</td></tr><tr><td>Heuristics Optimization</td><td>Applies LLMs in optimizing planning processes through refining existing plans or providing heuristic assistance to symbolic planners.</td></tr><tr><td>Tool Integration</td><td>Encompasses studies where LLMs act as central orchestrators or coordinators in a tool ecosystem, interfacing with planners, theorem provers, and other systems.</td></tr><tr><td>Brain-Inspired Planning</td><td>Covers research focusing on LLM architectures inspired by neurological or cognitive processes, particularly to enhance planning capabilities.</td></tr></table>
34
+
35
+ Table 1: Comprehensive description of the eight categories utilizing LLMs in APS
36
+
37
+ is central to our discussion, highlighting its unique advantages over purely neural network-based (i.e., statistical AI) or symbolic AI approaches. Finally, we will discuss prospective developments, address potential challenges, and identify promising opportunities in the field.
38
+
39
+ # Background
40
+
41
+ # Large Language Models
42
+
43
+ Large language models are neural network models with upwards of $\sim 3$ billion parameters that are trained on extremely large corpora of natural language data (trillions of tokens/words). These models are proficient in interpreting, generating, and contextualizing human language, leading to applications ranging from text generation to language-driven reasoning tasks. The evolution of LLMs in NLP began with rule-based models, progressed through statistical models, and achieved a significant breakthrough with the introduction of neural network-based models. The shift to sequence-based neural networks, with Recurrent Neural Networks (RNNs) and Long Short-Term Memory (LSTM) networks, marked a notable advancement due to their capability to process information and context over long sequences. Shortcomings in RNNs and LSTMs due to vanishing gradients and, consequently, loss of very long sequence contexts lead to the transformer model, which introduced self-attention (SA) mechanisms. The SA mechanism enabled focus on different parts of a long input sequence in parallel, which enhanced understanding of contextual nuances in language patterns over extremely long sequences. The SA mechanism is also complemented with positional encodings in transformers to enable the model to maintain an awareness of word-token order, which is required to understand accurate
44
+
45
+ grammar and syntax. The self-attention mechanism, central to transformers, uses a query, key, and value system to contextualize dependencies in the input sequence. Informally, the SA concept is inspired by classical information retrieval systems where the query is the input sequence context, the key refers to a "database" contained within the parametric memory, and the value is the actual value present at that reference. The operation is mathematically expressed in Equation 1.
46
+
47
+ $$
48
+ \operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {s o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d _ {k}}}\right) V \tag {1}
49
+ $$
50
+
51
+ In this equation, $Q$ , $K$ , and $V$ denote the query, key, and value matrices. The scaling factor $\sqrt{d_k}$ , where $d_k$ is the dimension of the keys, is employed to standardize the vectors to unit variance for ensuring stable softmax gradients during training. Since the introduction of LLMs with self-attention, there have been several architectural variants depending on the downstream tasks.
52
+
53
+ Causal Language Modeling (CLMs): CLMs, such as GPT-4, are designed for tasks where text generation is sequential and dependent on the preceding context. They predict each subsequent word based on the preceding words, modeling the probability of a word sequence in a forward direction. This process is mathematically formulated as shown in Equation 2.
54
+
55
+ $$
56
+ P (T) = \prod_ {i = 1} ^ {n} P \left(t _ {i} \mid t _ {< i}\right) \tag {2}
57
+ $$
58
+
59
+ In this formulation, $P(t_{i}|t_{< i})$ represents the probability of the $i$ -th token given all preceding tokens, $t_{< i}$ . This characteristic makes CLMs particularly suitable for applications
60
+
61
+ like content generation, where the flow and coherence of the text in the forward direction are crucial.
62
+
63
+ Masked Language Modeling (MLMs): Unlike CLMs, MLMs like BERT are trained to understand the bidirectional context by predicting words randomly masked in a sentence. This approach allows the model to learn both forward and backward dependencies in language structure. The MLM prediction process can be represented as Equation 3.
64
+
65
+ $$
66
+ P \left(T _ {\text {m a s k e d}} \mid T _ {\text {c o n t e x t}}\right) = \prod_ {i \in M} P \left(t _ {i} \mid T _ {\text {c o n t e x t}}\right) \tag {3}
67
+ $$
68
+
69
+ Here, $T_{\mathrm{masked}}$ is the set of masked tokens in the sentence, $T_{\mathrm{context}}$ represents the unmasked part of the sentence, and $M$ is the set of masked positions. MLMs have proven effective in NLP tasks such as sentiment analysis or question answering.
70
+
71
+ Sequence-to-Sequence (Seq2Seq) Modeling: Seq2Seq models, like T5, are designed to transform an input sequence into a related output sequence. They are often employed in tasks that require a mapping between different types of sequences, such as language translation or summarization. The Seq2Seq process is formulated as Equation 4.
72
+
73
+ $$
74
+ P \left(T _ {\text {o u t p u t}} \mid T _ {\text {i n p u t}}\right) = \prod_ {i = 1} ^ {m} P \left(t _ {\text {o u t p u t} _ {i}} \mid T _ {\text {i n p u t}}, t _ {\text {o u t p u t} _ {< i}}\right) \tag {4}
75
+ $$
76
+
77
+ In Equation 4, $T_{\mathrm{input}}$ is the input sequence, $T_{\mathrm{output}}$ is the output sequence, and $P(t_{\mathrm{output}_i}|T_{\mathrm{input}},t_{\mathrm{output}_{< i}})$ calculates the probability of generating each token in the output sequence, considering both the input sequence and the preceding tokens in the output sequence.
78
+
79
+ In addition to their architectural variants, the utility of LLMs is further enhanced by specific model utilization strategies, enabling their effective adaptation to various domains at scale. One key strategy is fine-tuning, which applies to pre-trained LLMs. Pre-trained LLMs are models already trained on large datasets to understand and generate language, acquiring a broad linguistic knowledge base. Fine-tuning involves further training pre-trained LLMs on a smaller, task-specific dataset, thereby adjusting the neural network weights for particular applications. This process is mathematically represented in Equation 5.
80
+
81
+ $$
82
+ \theta_ {\text {f i n e - t u n e d}} = \theta_ {\text {p r e - t r a i n e d}} - \eta \cdot \nabla_ {\theta} L (\theta , D _ {\text {t a s k}}) \tag {5}
83
+ $$
84
+
85
+ Here, $\theta_{\mathrm{fine - tuned}}$ are the model parameters after fine-tuning, $\theta_{\mathrm{pre - trained}}$ are the parameters obtained from pre-training, $\eta$ is the learning rate, and $\nabla_{\theta}L(\theta ,D_{\mathrm{task}})$ denotes the gradient of the loss function $L$ with respect to the parameters $\theta$ on the task-specific dataset $D_{\mathrm{task}}$ .
86
+
87
+ $$
88
+ P (T | C) = \prod_ {i = 1} ^ {n} P \left(t _ {i} \mid t _ {< i}, C\right) \tag {6}
89
+ $$
90
+
91
+ Complementing the fine-tuning approach is in-context learning, an alternative strategy that is particularly characteristic of models like the GPT series. This method diverges from fine-tuning by enabling the model to adapt its
92
+
93
+ responses based on immediate context or prompts without necessitating further training. The efficacy of in-context learning is a direct consequence of the comprehensive pretraining phase, where models are exposed to diverse textual datasets, thereby acquiring a nuanced understanding of language and context. Given a context $C$ , the model generates text $T$ that is contextually relevant, as shown in Equation 6. Here, $P(T|C)$ is the probability of generating text $T$ given the context $C$ , and $P(t_i|t_{<i}, C)$ is the probability of generating the $i$ -th token $t_i$ given the preceding tokens $t_{<i}$ and the context $C$ .
94
+
95
+ ![](images/9c9354954b9e2f81a6619598516e2381ffb89e4bc821a098ab69f1eacdded832.jpg)
96
+ Figure 1: Radar chart showcasing the relative performance of six language models (GPT-4, Claude-v1, GPT-3.5-turbo, Vicuna-13B, Alpaca-13B, LLama-13B) across key domains: Writing, Roleplay, Reasoning, Math, Coding, Extraction, STEM, and Humanities from Zheng et al. (2023a).
97
+
98
+ These diverse model types and training methodologies under the umbrella of LLMs showcase the flexibility and adaptability of language models in handling a wide range of complex tasks. Figure 1 illustrates the comparative capabilities of different LLMs across various competency domains, such as Writing (evaluating text generation quality), Role-play (assessing conversational interaction), Reasoning (logical problem-solving), Math (numerical problem-solving), Coding (programming language understanding and generation), Extraction (information retrieval from text), STEM (proficiency in scientific and technical contexts), and Humanities (engagement with arts, history, and social sciences content). Across these domains, GPT-4 exhibits the strongest performance in the benchmark dataset evaluated by Zheng et al. (2023a), indicative of its superior training and extensive knowledge base. Expanding LLMs into applications such as code generation signifies their adaptability and potential for cross-disciplinary innovation. However, fine-tuning and in-context learning methodologies also bring challenges, such as potential data overfitting and reliance on the quality of input context. LLMs' continuous development and refinement promise to open new frontiers in various domains, including automated planning and scheduling, by bridging AI with human-like language understanding.
99
+
100
+ # Automated Planning and Scheduling
101
+
102
+ APS is a branch of AI that focuses on the creation of strategies or action sequences, typically for execution by intelligent agents, autonomous robots, and unmanned ve-
103
+
104
+ ![](images/cae332ce1610600c60aeba97dbf09be3e8de45cc4817a92350418208f7015ca3.jpg)
105
+ Figure 2: Of the 126 papers surveyed in this study, 55 were accepted by peer-reviewed conferences. This chart illustrates the distribution of these papers across various conferences in the fields of LLMs and APS, highlighting the primary forums for scholarly contributions in these areas.
106
+
107
+ hicles. A basic category in APS is a Classical Planning Problem (CPP) (Russell and Norvig 2003) which is a tuple $\mathcal{M} = \langle \mathcal{D},\mathcal{I},\mathcal{G}\rangle$ with domain $\mathcal{D} = \langle F,A\rangle$ - where $F$ is a set of fluents that define a state $s\subseteq F$ , and $A$ is a set of actions - and initial and goal states $\mathcal{I},\mathcal{G}\subseteq F$ . Action $a\in A$ is a tuple $(c_{a},pre(a),eff^{\pm}(a))$ where $c_{a}$ is the cost, and $pre(a),eff^{\pm}(a)\subseteq F$ are the preconditions and add/delete effects, i.e., $\delta_{\mathcal{M}}(s,a)\models \bot s$ if $s\not\models pre(a)$ ; else $\delta_{\mathcal{M}}(s,a)\models s\cup \mathrm{eff}^{+}(a)\setminus \mathrm{eff}^{-}(a)$ where $\delta_{\mathcal{M}}(\cdot)$ is the transition function. The cumulative transition function is $\delta_{\mathcal{M}}(s,(a_1,a_2,\ldots ,a_n)) = \delta_{\mathcal{M}}(\delta_{\mathcal{M}}(s,a_1),(a_2,\ldots ,a_n))$ . A plan for a CPP is a sequence of actions $\langle a_1,a_2,\dots ,a_n\rangle$ that transforms the initial state $\mathcal{I}$ into the goal state $\mathcal{G}$ using the transition function $\delta_{\mathcal{M}}$ . Traditionally, a CPP is encoded using a symbolic representation, where states, actions, and transitions are explicitly enumerated. This symbolic approach, often implemented using Planning Domain Definition Language or PDDL (McDermott et al. 1998), ensures precise and unambiguous descriptions of planning problems. This formalism allows for applying search algorithms and heuristic methods to find a sequence of actions that lead to the goal state, which is the essence of the plan.
108
+
109
+ The advent of LLMs has sparked a significant evolution in representation methods for CPPs, moving towards leveraging the expressive power of natural language (Valmeekam et al. 2023a) and the perceptual capabilities of vision (Asai 2018). These novel approaches, inherently more suited for LLM processing, use text and vision-based representations, allowing researchers to utilize the pre-existing knowledge within LLMs. This shift enables a more humanistic comprehension and reasoning about planning tasks, enhancing the flexibility and applicability of planning algorithms in complex, dynamic environments. LLMs, while distinct in being trained on vast datasets outside the traditional scope of planning, loosely connect to previous data-driven methodologies, such as case-based reasoning (Xu 1995) applied to planning and Hierarchical Task Network (HTN) (Georgievski and Aiello 2015) which make use of task knowledge. It is an open area how LLMs may be used syn
110
+
111
+ ergestically with prior methods.
112
+
113
+ # LLMs in APS - Literature selection
114
+
115
+ A comprehensive survey of existing literature was conducted to explore the application of LLMs for automated planning. This endeavor led to identifying 126 pertinent research papers showcasing various methodologies, applications, and theoretical insights into utilizing LLMs within this domain.
116
+
117
+ The selection of these papers was guided by stringent criteria, focusing primarily on their relevance to the core theme of LLMs in automated planning. The search, conducted across multiple academic databases and journals, was steered by keywords such as "Large Language Models", "Automated Planning", "LLMs in Planning", and "LLMs + Robotics". Figure 2 presents the distribution of these selected papers across various peer-reviewed conferences, underlining the breadth and diversity of forums addressing the intersection of LLMs and APS. Even if a paper originated from a workshop within a conference, only the conference name is listed. Out of 126 papers, 71 are under review or available on arXiv. The inclusion criteria prioritized the relevance and contribution of papers to automated planning with LLMs over the publication date. Nonetheless, all surveyed papers emerged from either 2022 or 2023, a trend depicted in Figure 3, underscoring the recent surge in LLM research. A word cloud was generated to visually capture the prevalent research themes reflected in these papers' titles, illustrated in Figure 4. This cloud highlights the frequent use of terms such as "Language Model" and "Planning", which dominate the current discourse. In contrast, the emergence of "Neuro-Symbolic" reflects a nascent yet growing interest in integrating neural and symbolic approaches within the field. This systematic approach ensured a comprehensive inclusion of seminal works and recent advancements.
118
+
119
+ Upon the accumulation of these papers, a meticulous manual categorization was undertaken. The papers were divided into four piles, each containing approximately 30 papers. Each pile was manually categorized by one author,
120
+
121
+ ![](images/073f64493115ced78d94b76f87c30654f0a47c4bfb31b9139f35eae7bdf742fc.jpg)
122
+ Figure 3: Annual distribution of the 126 surveyed papers, indicating a significant increase in publications from 12 in 2022 to 114 in 2023, highlighting the rapid growth of LLM research within a single year.
123
+
124
+ with the final categorization being reviewed by all authors. During this process, each paper could belong to multiple categories out of the eight established. The maximum number of categories assigned to a single paper was three, although the median was typically one category per paper. This process was pivotal in distilling the vast information into coherent, thematic groups. The categorization was conducted based on the specific application of LLMs in planning. This formed eight distinct categories, each representing a unique facet of LLM application in automated planning. These categories facilitate a structured analysis and highlight LLMs' diverse applications and theoretical underpinnings in this field.
125
+
126
+ ![](images/c6bce785d81e2113a9455e41ee3cb5796b607308a1b1b790012676aca8c9f4c9.jpg)
127
+ Figure 4: Word cloud of terms from the titles of papers surveyed in this study, displaying the prevalence of "Language Model" and "Planning" as central themes. The presence of "Neuro-Symbolic" indicates an emergent trend toward the fusion of neural and symbolic methodologies in the domain.
128
+
129
+ # LLMs in APS - Literature Discussion
130
+
131
+ This section dwelves into the diverse applications of LLMs in planning tasks. We have identified eight distinct categories based on the utility and application of LLMs in planning, which are concisely summarized in Table 1. Figure 5 provides a detailed taxonomy, illustrating the categorization of the identified research papers.
132
+
133
+ # Language Translation
134
+
135
+ Language translation in the context of LLMs and planning involves transforming natural language instructions into structured planning languages (Wong et al. 2023; Kelly et al. 2023; Yang 2023; Pan et al. 2023; Xie et al. 2023; Yang, Ishay, and Lee 2023; Lin et al. 2023c; Sakib and Sun 2023; Yang et al. 2023b; Parakh et al. 2023; Yang et al. 2023a; Dai et al. 2023; Ding et al. 2023b; Zelikman et al. 2023; Xu et al. 2023b; Chen et al. 2023a; You et al. 2023) such as PDDL, and vice versa, utilizing in-context learning techniques (Guan et al. 2023). This capability effectively bridges the gap between human linguistic expression and machine-understandable formats, enhancing intuitive and efficient planning processes. The LLM+P framework (Liu et al. 2023) exemplifies this by converting natural language descriptions of planning problems into PDDL using GPT-4, leveraging classical planners for solution finding, and then translating these solutions back into natural language, with a specific focus on robot planning scenarios. Additionally, Graph2NL (Chalvatzaki et al. 2023) generates natural language text from scene graphs for long-horizon robot reasoning tasks, while (Shirai et al. 2023) introduces a vision-to-language interpreter for robot task planning. Further, (Brohan et al. 2023) examines the grounding of LLM-generated natural language utterances in actionable robot tasks, and (Yang, Gaglione, and Topcu 2022) utilizes LLMs for creating finite-state automatons for sequential decision-making problems. Despite these advancements, a critical research gap emerges in the autonomous translation capabilities of LLMs, particularly in converting natural language to PDDL without external expert intervention.
136
+
137
+ While LLMs effectively translate PDDL to natural language, a notable gap is evident in their limited understanding of real-world objects and the problem of grounding affordances, mainly when translating natural language to structured languages like PDDL. Addressing this gap calls for integrating neuro-symbolic approaches in LLMs, where the fusion of perceptual experience for concrete concept understanding from knowledge graphs complements LLMs' proficiency in distributional statistics (Lenat and Marcus 2023).
138
+
139
+ # Plan Generation
140
+
141
+ This category focuses on directly generating plans using LLMs. The research, primarily utilizing causal language models through in-context learning (Sermanet et al. 2023; Li et al. 2023b; Silver et al. 2023; Parakh et al. 2023; Zelikman et al. 2023; Besta et al. 2023; Huang et al. 2023a; Dalal et al. 2023; Wang et al. 2023b; Valmeekam et al. 2022; Valmeekam, Marquez, and Kambhampati 2023; Gramopadhye and Szafir 2022; Singh et al. 2023) $^{1}$ , demonstrates modest performance, indicating notable challenges in employing LLMs for effective plan generation. Novel in-context learning strategies, such as the Chain-of-Symbol and Tree of Thoughts, have been introduced to enhance LLMs' reasoning capabilities (Hu et al. 2023b; Yao et al. 2023). Ef
142
+
143
+ ![](images/83d1a34e2a54e9903fd6b7dac7ba203146e201485f993f83594961a414049fc8.jpg)
144
+ Figure 5: Taxonomy of recent research in the intersection of LLMs and Planning into categories (#). Each has scholarly papers based on their unique application or customization of LLMs in addressing various aspects of planning problems.
145
+
146
+ forts to generate multimodal, text, and image-based goal-conditioned plans are exemplified by (Lu et al. 2023b). Additionally, a subset of studies in this survey investigates the fine-tuning of seq2seq, code-based language models (Pallagani et al. 2022, 2023b), which are noted for their advanced syntactic encoding. These models show promise in improving plan generation within the confines of their training datasets (Logeswaran et al. 2023), yet exhibit limitations in generalizing to out-of-distribution domains (Pallagani et al. 2023a), highlighting a gap in their adaptability across diverse planning contexts.
147
+
148
+ Causal LLMs are predominantly used for plan generation, but their performance is often limited due to their design, which is focused on generating text based on preceding input. On the other hand, seq2seq LLMs can generate valid plans but struggle with generalization across diverse domains. This limitation highlights an opportunity for a synergistic approach: integrating even imperfect LLM outputs with symbolic planners can expedite heuristic searches, thereby enhancing efficiency and reducing search times (Fabiano et al. 2023).
149
+
150
+ # Model Construction
151
+
152
+ This category employs LLMs to build or refine world and domain models essential for accurate planning. Nottingham et al. (2023); Yuan et al. (2023a) leverage in-context learning with LLMs to develop an abstract world model in the Minecraft domain, highlighting the challenge of semantic grounding in LLMs. Similarly, Gragera and Pozanco (2023) explore the capability of LLMs in completing ill-defined PDDL domains. Efforts such as (Huang et al. 2023a; Brohan et al. 2023) delve into LLMs' grounding capabilities, with SayCan (Brohan et al. 2023) notably achieving $74\%$ executable plans. Hao et al. (2023a); Yoneda et al. (2023) innovatively positions LLMs as both world models and reasoning agents, enabling the simulation of world states and prediction of action outcomes. Research by (Zhang and Soh 2023; Wong et al. 2023; Mandi, Jain, and Song 2023; Hu et al. 2023a; Zhao, Lee, and Hsu 2023; Ding et al. 2023b; Huang et al. 2023a; Wu et al. 2023b; Xu et al. 2023b; Brohan et al. 2023) shows that LLMs can effectively model high-level human states and behaviors using their commonsense knowledge. Yet, they face difficulties accurately processing low-level geometrical or shape features due to spatial and numerical reasoning constraints. Additionally, Kelly et al. (2023) investigates the potential of LLMs in conjunction with planners to craft narratives and logical story models, integrating human-in-the-loop for iterative edits.
153
+
154
+ LLMs often struggle with detailed spatial reasoning and processing low-level environmental features, limiting their effectiveness in model construction. Integrating world models presents a viable solution, offering advanced abstractions for reasoning that encompass human-like cognitive elements and interactions, thereby enhancing LLMs' capabilities in model construction (Hu and Shu 2023).
155
+
156
+ # Multi-agent Planning
157
+
158
+ In multi-agent planning, LLMs play a vital role in scenarios involving interaction among multiple agents, typically mod
159
+
160
+ eled using distinct LLMs. These models enhance coordination and cooperation, leading to more complex and effective multi-agent strategies. (Zhang et al. 2023b) introduces an innovative framework that employs LLMs to develop cooperative embodied agents. AutoGraph (Wei et al. 2023) leverages LLMs to generate autonomous agents adept at devising solutions for varied graph-structured data problems. Addressing scalability in multi-robot task planning, (Chen et al. 2023d) proposes frameworks for the collaborative function of different LLM-based agents. Furthermore, (Abdelnabi et al. 2023) and (Hua et al. 2023) collectively demonstrate the effectiveness of LLM agents in complex negotiation and decision-making environments.
161
+
162
+ A key gap in multi-agent planning with LLMs lies in standardizing inter-agent communication and maintaining distinct belief states, including human aspects. Overcoming this requires advanced LLM algorithms for dynamic alignment of communication and belief states, drawing on epistemic reasoning and knowledge representation (de Zarza et al. 2023).
163
+
164
+ # Interactive Planning
165
+
166
+ In this category, LLMs are utilized in dynamic scenarios where real-time adaptability to user feedback or iterative planning is essential. The refinement of LLM outputs is typically achieved through four primary feedback variants: (a) External verifiers, such as VAL(Howey, Long, and Fox 2004) for PDDL or scene descriptors and success detectors in robotics (Guan et al. 2023; Arora and Kambhampati 2023; Jha et al. 2023; Huang et al. 2022b; Liu, Bahety, and Song 2023; Rana et al. 2023; Ren et al. 2023; Kim et al. 2023; Graule and Isler 2023; Driess et al. 2023; Zheng et al. 2023b); (b) Online reinforcement learning, which progressively updates the LLM about environmental changes (Carta et al. 2023); (c) Self-refinement by LLMs, where they provide feedback on their own outputs (Zhou et al. 2023; Hu et al. 2023c,b; Ding et al. 2023a; Sun et al. 2023; Naik et al. 2023); (d) Input from human experts (Raman et al. 2022; Wu, Ai, and Hsu 2023). Furthermore, (Chen et al. 2023b) introduces the "Action Before Action" method, enabling LLMs to proactively seek relevant information from external sources in natural language, thereby improving embodied decision-making in LLMs by $40\%$ .
167
+
168
+ A key gap in interactive planning with LLMs lies in harmonizing the "fast" neural processing of LLMs with "slow" symbolic reasoning, as manifested in feedback mechanisms. This integration is key to maintaining the neural speed of LLMs while effectively embedding the depth and precision of feedback, which is vital for accuracy in dynamic planning scenarios (Zhang et al. 2023a).
169
+
170
+ # Heuristics Optimization
171
+
172
+ In the realm of Heuristics Optimization, LLMs are leveraged to enhance planning processes, either by refining existing plans or aiding symbolic planners with heuristic guidance. Studies like (Hazra, Martires, and De Raedt 2023; Hao et al. 2023a; Dai et al. 2023; Feng et al. 2023) have effectively coupled LLMs with heuristic searches to identify optimal
173
+
174
+ action sequences. Research by (Silver et al. 2022; Shah et al. 2023; Valmeekam et al. 2023b) reveals that LLMs' outputs, even if partially correct, can provide valuable direction for symbolic planners such as LPG (Gerevini and Serina 2002), especially in problems beyond the LLMs' solvable scope. Furthermore, (Raimondo et al. 2023) makes an intriguing observation that including workflows and action plans in LLM input prompts can notably enhance task-oriented dialogue generalization.
175
+
176
+ This category marks significant progress towards realizing neuro-symbolic approaches in APS. Current methods emphasize plan validity, often at the expense of time efficiency. Future research should look at how to continually evolve LLMs for better plan generation, with its experience from complimenting symbolic planners (Du et al. 2023).
177
+
178
+ # Tool Integration
179
+
180
+ In tool integration, LLMs serve as coordinators within a diverse array of planning tools, enhancing functionality in complex scenarios. Studies like (Xu et al. 2023a; Lu et al. 2023a; Shen et al. 2023; Hao et al. 2023b; Ge et al. 2023) demonstrate that incorporating tools such as web search engines, Python functions, and API endpoints enhances LLM reasoning abilities. However, (Ruan et al. 2023) notes a tendency for LLMs to over-rely on specific tools, potentially prolonging the planning process. (Li et al. 2023a) introduces a benchmark for tool-augmented LLMs. While typical approaches involve teaching LLMs tool usage via multiple prompts, (Hsieh et al. 2023) suggests that leveraging tool documentation offers improved planning capabilities, circumventing the need for extensive demonstrations.
181
+
182
+ LLMs often hallucinate non-existent tools, overuse a single tool, and face scaling challenges with multiple tools. Overcoming these issues is key to enabling LLMs to effectively select and utilize various tools in complex planning scenarios (Elaraby et al. 2023).
183
+
184
+ # Brain-Inspired Planning
185
+
186
+ This area explores neurologically and cognitively inspired architectures in LLMs (Webb et al. 2023; Sumers et al. 2023; Momennejad et al. 2023; Hu et al. 2023d; Lin et al. 2023a), aiming to replicate human-like planning in enhancing problem-solving. However, while these methods rely on in-context learning, they frequently encounter challenges such as hallucination and grounding, as previously discussed, and tend to be more computationally intensive than in-context learning alone.
187
+
188
+ While LLMs attempt to mimic symbolic solvers through in-context learning for brain-inspired modules, this approach lacks adaptability and is a superficial understanding of complex cognitive processes. To overcome these issues, developing systems where neural and symbolic components are intrinsically intertwined is critical as it would accurately mirror human cognitive capabilities in planning (Fabiano et al. 2023).
189
+
190
+ # Discussion and Conclusion
191
+
192
+ In this position paper, we comprehensively investigate the role of LLMs within the domain of APS, analyzing 126 scholarly articles across eight distinct categories. This extensive survey not only provides a detailed landscape of current LLM applications and their limitations but also highlights the volume of research in each category: Language Translation with 23 papers demonstrates LLMs' proficiency, whereas Plan Generation, the most researched category with 53 papers, reveals their shortcomings in optimality, completeness, and correctness compared to traditional combinatorial planners. Our exploration extends to Model Construction (17 papers), which examines LLMs in developing planning models, and the relatively unexplored area of Multiagent Planning (4 papers). Interactive Planning is well represented with 21 papers, illustrating LLMs' adaptability in feedback-centric scenarios. Despite being less researched, Heuristics Optimization and Tool Integration, each with 8 papers, provide valuable insights into efficiency enhancement and integration of LLMs with symbolic solvers. Lastly, Brain-inspired Planning, although least represented with 5 papers, opens innovative avenues for human-like planning processes in LLMs. By identifying the research distribution and gaps in these categories, our paper proposes how neuro-symbolic approaches can address these voids, thereby underscoring the varying degrees of LLM applications in APS and guiding future research towards enhancing their capabilities in complex planning tasks.
193
+
194
+ It is important to acknowledge that while LLMs have shown promise, they are not a panacea for the inherent complexities of automated planning. The expectation that LLMs, operating within polynomial run-time bounds, could supplant the nuanced and often non-polynomial complexities of symbolic planners is not yet realizable. Indeed, the strengths of LLMs do not currently include generating sequences of actions akin to those devised by symbolic planners, which are essential for creating a coherent and practical plan for complex problems. However, this does not diminish the potential utility of LLMs within this space. When considering average-case scenarios, which are typically less complex than worst-case scenarios, LLMs could offer substantial efficiencies. They can be seen as akin to meta-heuristic approaches, capable of accelerating plan generation in a variety of settings. As such, their application, governed by cognitive-inspired frameworks like SOFAI(Fabiano et al. 2023), could delineate when and where their use is most advantageous.
195
+
196
+ Future research should prioritize three areas: developing new LLM training paradigms that ensure coherence and goal alignment in outputs; delving into Henry Kautz's neurosymbolic taxonomies (Kautz 2022) to better integrate neural and symbolic methods; and establishing clear performance metrics for LLM-assisted planners. In conclusion, integrating LLMs into automated planning, while challenging, opens avenues for innovation. Embracing a symbiotic approach that combines the creative strengths of LLMs with the precision of symbolic planners can lead to more effective, sophisticated AI applications in planning.
197
+
198
+ # References
199
+
200
+ Abdelnabi, S.; Gomaa, A.; Sivaprasad, S.; Schonherr, L.; and Fritz, M. 2023. Llm-deliberation: Evaluating llms with interactive multi-agent negotiation games. arXiv preprint arXiv:2309.17234.
201
+ Arora, D.; and Kambhampati, S. 2023. Learning and Leveraging Verifiers to Improve Planning Capabilities of Pre-trained Language Models. arXiv preprint arXiv:2305.17077.
202
+ Asai, M. 2018. Photo-Realistic Blockworld Dataset. arXiv preprint arXiv:1812.01818.
203
+ Besta, M.; Blach, N.; Kubicek, A.; Gerstenberger, R.; Gianinazzi, L.; Gajda, J.; Lehmann, T.; Podstawski, M.; Niewiadomski, H.; Nczyk, P.; et al. 2023. Graph of thoughts: Solving elaborate problems with large language models. arXiv preprint arXiv:2308.09687.
204
+ Brohan, A.; Chebotar, Y.; Finn, C.; Hausman, K.; Herzog, A.; Ho, D.; Ibarz, J.; Irpan, A.; Jang, E.; Julian, R.; et al. 2023. Do as i can, not as i say: Grounding language in robotic affordances. In Conference on Robot Learning, 287-318. PMLR.
205
+ Capitanelli, A.; and Mastrogiovanni, F. 2023. A Framework to Generate Neurosymbolic PDDL-compliant Planners. arXiv preprint arXiv:2303.00438.
206
+ Carta, T.; Romac, C.; Wolf, T.; Lamprier, S.; Sigaud, O.; and Oudefyer, P.-Y. 2023. Grounding large language models in interactive environments with online reinforcement learning. arXiv preprint arXiv:2302.02662.
207
+ Chalvatzaki, G.; Younes, A.; Nandha, D.; Le, A. T.; Ribeiro, L. F.; and Gurevych, I. 2023. Learning to reason over scene graphs: a case study of finetuning GPT-2 into a robot language model for grounded task planning. Frontiers in Robotics and AI, 10.
208
+ Chen, B.; Xia, F.; Ichter, B.; Rao, K.; Gopalakrishnan, K.; Ryoo, M. S.; Stone, A.; and Kappler, D. 2023a. Open-vocabulary queryable scene representations for real world planning. In 2023 IEEE International Conference on Robotics and Automation (ICRA), 11509-11522. IEEE.
209
+ Chen, X.; Zhang, S.; Zhang, P.; Zhao, L.; and Chen, J. 2023b. Asking Before Action: Gather Information in Embodied Decision Making with Language Models. arXiv preprint arXiv:2305.15695.
210
+ Chen, Y.; Arkin, J.; Zhang, Y.; Roy, N.; and Fan, C. 2023c. AutoTAMP: Autoregressive Task and Motion Planning with LLMs as Translators and Checkers. arXiv preprint arXiv:2306.06531.
211
+ Chen, Y.; Arkin, J.; Zhang, Y.; Roy, N.; and Fan, C. 2023d. Scalable Multi-Robot Collaboration with Large Language Models: Centralized or Decentralized Systems? arXiv preprint arXiv:2309.15943.
212
+ Dagan, G.; Keller, F.; and Lascarides, A. 2023. Dynamic Planning with a LLM. arXiv preprint arXiv:2308.06391.
213
+ Dai, Z.; Asgharivaskasi, A.; Duong, T.; Lin, S.; Tzes, M.-E.; Pappas, G.; and Atanasov, N. 2023. Optimal Scene Graph Planning with Large Language Model Guidance. arXiv preprint arXiv:2309.09182.
214
+
215
+ Dalal, M.; Chiruvolu, T.; Chaplot, D. S.; and Salakhutdinov, R. 2023. Plan-Seq-Learn: Language Model Guided RL for Solving Long Horizon Robotics Tasks. In 2nd Workshop on Language and Robot Learning: Language as Grounding.
216
+ de Zarza, I.; de Curto, J.; Roig, G.; Manzoni, P.; and Calafate, C. T. 2023. Emergent Cooperation and Strategy Adaptation in Multi-Agent Systems: An Extended Coevolutionary Theory with LLMs. *Electronics*, 12(12): 2722.
217
+ Ding, Y.; Zhang, X.; Amiri, S.; Cao, N.; Yang, H.; Kaminski, A.; Esselink, C.; and Zhang, S. 2023a. Integrating Action Knowledge and LLMs for Task Planning and Situation Handling in Open Worlds. arXiv preprint arXiv:2305.17590.
218
+ Ding, Y.; Zhang, X.; Paxton, C.; and Zhang, S. 2023b. Leveraging Commonsense Knowledge from Large Language Models for Task and Motion Planning. In RSS 2023 Workshop on Learning for Task and Motion Planning.
219
+ Ding, Y.; Zhang, X.; Paxton, C.; and Zhang, S. 2023c. Task and motion planning with large language models for object rearrangement. arXiv preprint arXiv:2303.06247.
220
+ Driess, D.; Xia, F.; Sajjadi, M. S.; Lynch, C.; Chowdhery, A.; Ichter, B.; Wahid, A.; Tompson, J.; Vuong, Q.; Yu, T.; et al. 2023. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378.
221
+ Du, M.; Luu, A. T.; Ji, B.; and Ng, S.-k. 2023. From Static to Dynamic: A Continual Learning Framework for Large Language Models. arXiv preprint arXiv:2310.14248.
222
+ Elaraby, M.; Lu, M.; Dunn, J.; Zhang, X.; Wang, Y.; and Liu, S. 2023. Halo: Estimation and reduction of hallucinations in open-source weak large language models. arXiv preprint arXiv:2308.11764.
223
+ Fabiano, F.; Pallagani, V.; Ganapini, M. B.; Horesh, L.; Loreggia, A.; Murugesan, K.; Rossi, F.; and Srivastava, B. 2023. Fast and Slow Planning. arXiv preprint arXiv:2303.04283.
224
+ Feng, X.; Wan, Z.; Wen, M.; Wen, Y.; Zhang, W.; and Wang, J. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179.
225
+ Gandhi, K.; Sadigh, D.; and Goodman, N. D. 2023. Strategic Reasoning with Language Models. arXiv preprint arXiv:2305.19165.
226
+ Ge, Y.; Hua, W.; Ji, J.; Tan, J.; Xu, S.; and Zhang, Y. 2023. Openagi: When llm meets domain experts. arXiv preprint arXiv:2304.04370.
227
+ Georgievski, I.; and Aiello, M. 2015. HTN planning: Overview, comparison, and beyond. Artif. Intell., 222: 124-156.
228
+ Gerevini, A.; and Serina, I. 2002. LPG: A Planner Based on Local Search for Planning Graphs with Action Costs. In Aips, volume 2, 281-290.
229
+ Ghallab, M.; Nau, D.; and Traverso, P. 2004. Automated Planning: Theory and Practice. The Morgan Kaufmann Series in Artificial Intelligence. Amsterdam: Morgan Kaufmann. ISBN 978-1-55860-856-6.
230
+
231
+ Gragera, A.; and Pozanco, A. 2023. Exploring the Limitations of using Large Language Models to Fix Planning Tasks.
232
+ Gramopadhye, M.; and Szafir, D. 2022. Generating executable action plans with environmentally-aware language models. arXiv preprint arXiv:2210.04964.
233
+ Graule, M. A.; and Isler, V. 2023. GG-LLM: Geometrically Grounding Large Language Models for Zero-shot Human Activity Forecasting in Human-Aware Task Planning. arXiv preprint arXiv:2310.20034.
234
+ Gu, Q.; Kuwajerwala, A.; Morin, S.; Jatavallabhula, K. M.; Sen, B.; Agarwal, A.; Rivera, C.; Paul, W.; Ellis, K.; Chellappa, R.; et al. 2023. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. arXiv preprint arXiv:2309.16650.
235
+ Guan, L.; Valmeekam, K.; Sreedharan, S.; and Kambhampati, S. 2023. Leveraging Pre-trained Large Language Models to Construct and Utilize World Models for Model-based Task Planning. arXiv preprint arXiv:2305.14909.
236
+ Hao, S.; Gu, Y.; Ma, H.; Hong, J. J.; Wang, Z.; Wang, D. Z.; and Hu, Z. 2023a. Reasoning with language model is planning with world model. arXiv preprint arXiv:2305.14992.
237
+ Hao, S.; Liu, T.; Wang, Z.; and Hu, Z. 2023b. ToolkenGPT: Augmenting Frozen Language Models with Massive Tools via Tool Embeddings. arXiv preprint arXiv:2305.11554.
238
+ Hazra, R.; Martires, P. Z. D.; and De Raedt, L. 2023. SayCanPay: Heuristic Planning with Large Language Models using Learnable Domain Knowledge. arXiv preprint arXiv:2308.12682.
239
+ Howey, R.; Long, D.; and Fox, M. 2004. VAL: automatic plan validation, continuous effects and mixed initiative planning using PDDL. In 16th IEEE International Conference on Tools with Artificial Intelligence, 294-301.
240
+ Hsieh, C.-Y.; Chen, S.-A.; Li, C.-L.; Fujii, Y.; Ratner, A.; Lee, C.-Y.; Krishna, R.; and Pfister, T. 2023. Tool documentation enables zero-shot tool-usage with large language models. arXiv preprint arXiv:2308.00675.
241
+ Hu, B.; Zhao, C.; Zhang, P.; Zhou, Z.; Yang, Y.; Xu, Z.; and Liu, B. 2023a. Enabling Efficient Interaction between an Algorithm Agent and an LLM: A Reinforcement Learning Approach. arXiv preprint arXiv:2306.03604.
242
+ Hu, H.; Lu, H.; Zhang, H.; Lam, W.; and Zhang, Y. 2023b. Chain-of-Symbol Prompting Elicits Planning in Large Language Models. arXiv preprint arXiv:2305.10276.
243
+ Hu, M.; Mu, Y.; Yu, X.; Ding, M.; Wu, S.; Shao, W.; Chen, Q.; Wang, B.; Qiao, Y.; and Luo, P. 2023c. Tree-Planner: Efficient Close-loop Task Planning with Large Language Models. arXiv preprint arXiv:2310.08582.
244
+ Hu, P.; Qi, J.; Li, X.; Li, H.; Wang, X.; Quan, B.; Wang, R.; and Zhou, Y. 2023d. Tree-of-mixed-thought: Combining fast and slow thinking for multi-hop visual reasoning. arXiv preprint arXiv:2308.09658.
245
+ Hu, Z.; and Shu, T. 2023. Language Models, Agent Models, and World Models: The LAW for Machine Reasoning and Planning. arXiv:2312.05230.
246
+
247
+ Hua, W.; Fan, L.; Li, L.; Mei, K.; Ji, J.; Ge, Y.; Hemphill, L.; and Zhang, Y. 2023. War and peace (waragent): Large language model-based multi-agent simulation of world wars. arXiv preprint arXiv:2311.17227.
248
+ Huang, W.; Abbeel, P.; Pathak, D.; and Mordatch, I. 2022a. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International Conference on Machine Learning, 9118-9147. PMLR.
249
+ Huang, W.; Wang, C.; Zhang, R.; Li, Y.; Wu, J.; and Fei-Fei, L. 2023a. Voxposer: Composable 3d value maps for robotic manipulation with language models. arXiv preprint arXiv:2307.05973.
250
+ Huang, W.; Xia, F.; Shah, D.; Driess, D.; Zeng, A.; Lu, Y.; Florence, P.; Mordatch, I.; Levine, S.; Hausman, K.; et al. 2023b. Grounded decoding: Guiding text generation with grounded models for robot control. arXiv preprint arXiv:2303.00855.
251
+ Huang, W.; Xia, F.; Xiao, T.; Chan, H.; Liang, J.; Florence, P.; Zeng, A.; Tompson, J.; Mordatch, I.; Chebotar, Y.; et al. 2022b. Inner monologue: Embodied reasoning through planning with language models. arXiv preprint arXiv:2207.05608.
252
+ Jha, S. K.; Jha, S.; Lincoln, P.; Bastian, N. D.; Velasquez, A.; Ewetz, R.; and Neema, S. 2023. Neuro Symbolic Reasoning for Planning: Counterexample Guided Inductive Synthesis using Large Language Models and Satisfiability Solving. arXiv preprint arXiv:2309.16436.
253
+ Joublin, F.; Ceravola, A.; Smirnov, P.; Ocker, F.; Deigmoeller, J.; Belardinelli, A.; Wang, C.; Hasler, S.; Tanneberg, D.; and Gienger, M. 2023. CoPAL: Corrective Planning of Robot Actions with Large Language Models. arXiv preprint arXiv:2310.07263.
254
+ Kannan, S. S.; Venkatesh, V. L.; and Min, B.-C. 2023. SMART-LLM: Smart Multi-Agent Robot Task Planning using Large Language Models. arXiv preprint arXiv:2309.10062.
255
+ Kant, Y.; Ramachandran, A.; Yenamandra, S.; Gilitschenski, I.; Batra, D.; Szot, A.; and Agrawal, H. 2022. Housekeep: Tidying virtual households using commonsense reasoning. In European Conference on Computer Vision, 355-373. Springer.
256
+ Kautz, H. A. 2022. The third AI summer: AAAI Robert S. Engelmore Memorial Lecture. AI Magazine, 43(1): 105-125.
257
+ Kelly, J.; Calderwood, A.; Wardrip-Fruin, N.; and Mateas, M. 2023. There and back again: extracting formal domains for controllable neurosymbolic story authoring. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, volume 19, 64-74.
258
+ Kim, G.; Kim, T.; Kannan, S. S.; Venkatesh, V. L.; Kim, D.; and Min, B.-C. 2023. DynaCon: Dynamic Robot Planner with Contextual Awareness via LLMs. arXiv preprint arXiv:2309.16031.
259
+ Kirk, J. R.; Wray, R. E.; and Laird, J. E. 2023. Exploiting Language Models as a Source of Knowledge for Cognitive Agents. arXiv preprint arXiv:2310.06846.
260
+
261
+ Lenat, D.; and Marcus, G. 2023. Getting from generative ai to trustworthy ai: What llms might learn from cyc. arXiv preprint arXiv:2308.04445.
262
+ Li, M.; Song, F.; Yu, B.; Yu, H.; Li, Z.; Huang, F.; and Li, Y. 2023a. Api-bank: A benchmark for tool-augmented llms. arXiv preprint arXiv:2304.08244.
263
+ Li, Y.; Kamra, N.; Desai, R.; and Halevy, A. 2023b. Human-Centered Planning. arXiv preprint arXiv:2311.04403.
264
+ Lin, B. Y.; Fu, Y.; Yang, K.; Ammanabrolu, P.; Brahman, F.; Huang, S.; Bhagavatula, C.; Choi, Y.; and Ren, X. 2023a. SwiftSage: A Generative Agent with Fast and Slow Thinking for Complex Interactive Tasks. arXiv preprint arXiv:2305.17390.
265
+ Lin, H.; Zala, A.; Cho, J.; and Bansal, M. 2023b. Videodirectorgpt: Consistent multi-scene video generation via llm-guided planning. arXiv preprint arXiv:2309.15091.
266
+ Lin, K.; Agia, C.; Migimatsu, T.; Pavone, M.; and Bohg, J. 2023c. Text2motion: From natural language instructions to feasible plans. arXiv preprint arXiv:2303.12153.
267
+ Liu, B.; Jiang, Y.; Zhang, X.; Liu, Q.; Zhang, S.; Biswas, J.; and Stone, P. 2023. Llm+ p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477.
268
+ Liu, Z.; Bahety, A.; and Song, S. 2023. Reflect: Summarizing robot experiences for failure explanation and correction. arXiv preprint arXiv:2306.15724.
269
+ Logeswaran, L.; Sohn, S.; Lyu, Y.; Liu, A. Z.; Kim, D.-K.; Shim, D.; Lee, M.; and Lee, H. 2023. Code Models are Zero-shot Precondition Reasoners. arXiv preprint arXiv:2311.09601.
270
+ Lu, P.; Peng, B.; Cheng, H.; Galley, M.; Chang, K.-W.; Wu, Y. N.; Zhu, S.-C.; and Gao, J. 2023a. Chameleon: Plug-and-play compositional reasoning with large language models. arXiv preprint arXiv:2304.09842.
271
+ Lu, Y.; Feng, W.; Zhu, W.; Xu, W.; Wang, X. E.; Eckstein, M.; and Wang, W. Y. 2022. Neuro-symbolic causal language planning with commonsense prompting. arXiv eprints, arXiv-2206.
272
+ Lu, Y.; Lu, P.; Chen, Z.; Zhu, W.; Wang, X. E.; and Wang, W. Y. 2023b. Multimodal Procedural Planning via Dual Text-Image Prompting. arXiv preprint arXiv:2305.01795.
273
+ Luo, L.; Li, Y.-F.; Haffari, G.; and Pan, S. 2023. Reasoning on graphs: Faithful and interpretable large language model reasoning. arXiv preprint arXiv:2310.01061.
274
+ Mandi, Z.; Jain, S.; and Song, S. 2023. Roco: Dialectic multi-robot collaboration with large language models. arXiv preprint arXiv:2307.04738.
275
+ McDermott, D.; Ghallab, M.; Howe, A.; Knoblock, C.; Ram, A.; Veloso, M.; Weld, D.; and Wilkins, D. 1998. PDDL-the planning domain definition language.
276
+ Momennejad, I.; Hasanbeig, H.; Vieira, F.; Sharma, H.; Ness, R. O.; Jojic, N.; Palangi, H.; and Larson, J. 2023. Evaluating Cognitive Maps and Planning in Large Language Models with CogEval. arXiv preprint arXiv:2309.15129.
277
+
278
+ Naik, R.; Chandrasekaran, V.; Yuksekgonul, M.; Palangi, H.; and Nushi, B. 2023. Diversity of Thought Improves Reasoning Abilities of Large Language Models. arXiv preprint arXiv:2310.07088.
279
+ Nottingham, K.; Ammanabrolu, P.; Suhr, A.; Choi, Y.; Hajishirzi, H.; Singh, S.; and Fox, R. 2023. Do embodied agents dream of pixelated sheep?: Embodied decision making using language guided world modelling. arXiv preprint arXiv:2301.12050.
280
+ Pallagani, V.; Muppasani, B.; Murugesan, K.; Rossi, F.; Horesh, L.; Srivastava, B.; Fabiano, F.; and Loreggia, A. 2022. Plansformer: Generating symbolic plans using transformers. arXiv preprint arXiv:2212.08681.
281
+ Pallagani, V.; Muppasani, B.; Murugesan, K.; Rossi, F.; Srivastava, B.; Horesh, L.; Fabiano, F.; and Loreggia, A. 2023a. Understanding the Capabilities of Large Language Models for Automated Planning. arXiv preprint arXiv:2305.16151.
282
+ Pallagani, V.; Muppasani, B.; Srivastava, B.; Rossi, F.; Horesh, L.; Murugesan, K.; Loreggia, A.; Fabiano, F.; Joseph, R.; Kethepalli, Y.; et al. 2023b. Plansformer Tool: Demonstrating Generation of Symbolic Plans Using Transformers. In *IJCAI*, volume 2023, 7158–7162. International Joint Conferences on Artificial Intelligence.
283
+ Pan, L.; Albalak, A.; Wang, X.; and Wang, W. Y. 2023. Logic-lm: Empowering large language models with symbolic solvers for faithful logical reasoning. arXiv preprint arXiv:2305.12295.
284
+ Parakh, M.; Fong, A.; Simeonov, A.; Gupta, A.; Chen, T.; and Agrawal, P. 2023. Human-Assisted Continual Robot Learning with Foundation Models. arXiv preprint arXiv:2309.14321.
285
+ Raimondo, S.; Pal, C.; Liu, X.; Vazquez, D.; and Palacios, H. 2023. Improving Generalization in Task-oriented Dialogues with Workflows and Action Plans. arXiv preprint arXiv:2306.01729.
286
+ Rajvanshi, A.; Sikka, K.; Lin, X.; Lee, B.; Chiu, H.-P.; and Velasquez, A. 2023. Saynav: Grounding large language models for dynamic planning to navigation in new environments. arXiv preprint arXiv:2309.04077.
287
+ Raman, S. S.; Cohen, V.; Rosen, E.; Idrees, I.; Paulius, D.; and TELEX, S. 2022. Planning with large language models via corrective re-prompting. In NeurIPS 2022 Foundation Models for Decision Making Workshop.
288
+ Rana, K.; Haviland, J.; Garg, S.; Abou-Chakra, J.; Reid, I.; and Suenderhauf, N. 2023. Sayplan: Grounding large language models using 3d scene graphs for scalable task planning. arXiv preprint arXiv:2307.06135.
289
+ Ren, A. Z.; Dixit, A.; Bodrova, A.; Singh, S.; Tu, S.; Brown, N.; Xu, P.; Takayama, L.; Xia, F.; Varley, J.; et al. 2023. Robots that ask for help: Uncertainty alignment for large language model planners. arXiv preprint arXiv:2307.01928.
290
+ Ruan, J.; Chen, Y.; Zhang, B.; Xu, Z.; Bao, T.; Du, G.; Shi, S.; Mao, H.; Zeng, X.; and Zhao, R. 2023. Tptu: Task planning and tool usage of large language model-based ai agents. arXiv preprint arXiv:2308.03427.
291
+ Russell, S.; and Norvig, P. 2003. Artificial Intelligence, A Modern Approach. Second Edition.
292
+
293
+ Sakib, M. S.; and Sun, Y. 2023. From Cooking Recipes to Robot Task Trees—Improving Planning Correctness and Task Efficiency by Leveraging LLMs with a Knowledge Network. arXiv preprint arXiv:2309.09181.
294
+ Sarkisyan, C.; Korchemnyi, A.; Kovalev, A. K.; and Panov, A. I. 2023. Evaluation of Pretrained Large Language Models in Embodied Planning Tasks. In International Conference on Artificial General Intelligence, 222-232. Springer.
295
+ Sermanet, P.; Ding, T.; Zhao, J.; Xia, F.; Dwibedi, D.; Gopalakrishnan, K.; Chan, C.; Dulac-Arnold, G.; Maddineni, S.; Joshi, N. J.; et al. 2023. RoboVQA: Multimodal Long-Horizon Reasoning for Robotics. arXiv preprint arXiv:2311.00899.
296
+ Shah, D.; Equi, M.; Osinski, B.; Xia, F.; Ichter, B.; and Levine, S. 2023. Navigation with large language models: Semantic guesswork as a heuristic for planning. arXiv preprint arXiv:2310.10103.
297
+ Shen, Y.; Song, K.; Tan, X.; Li, D.; Lu, W.; and Zhuang, Y. 2023. Huggingppt: Solving ai tasks with chatgpt and its friends in huggingface. arXiv preprint arXiv:2303.17580.
298
+ Shirai, K.; Beltran-Hernandez, C. C.; Hamaya, M.; Hashimoto, A.; Tanaka, S.; Kawaharazuka, K.; Tanaka, K.; Ushiku, Y.; and Mori, S. 2023. Vision-Language Interpreter for Robot Task Planning. arXiv preprint arXiv:2311.00967.
299
+ Silver, T.; Dan, S.; Srinivas, K.; Tenenbaum, J. B.; Kaelbling, L. P.; and Katz, M. 2023. Generalized Planning in PDDL Domains with Pretrained Large Language Models. arXiv preprint arXiv:2305.11014.
300
+ Silver, T.; Hariprasad, V.; Shuttleworth, R. S.; Kumar, N.; Lozano-Pérez, T.; and Kaelbling, L. P. 2022. PDDL planning with pretrained large language models. In NeurIPS 2022 Foundation Models for Decision Making Workshop.
301
+ Singh, I.; Blukis, V.; Mousavian, A.; Goyal, A.; Xu, D.; Tremblay, J.; Fox, D.; Thomason, J.; and Garg, A. 2023. ProgPrompt: program generation for situated robot task planning using large language models. Autonomous Robots, 1-14.
302
+ Song, C. H.; Wu, J.; Washington, C.; Sadler, B. M.; Chao, W.-L.; and Su, Y. 2023. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2998-3009.
303
+ Sumers, T.; Yao, S.; Narasimhan, K.; and Griffiths, T. L. 2023. Cognitive architectures for language agents. arXiv preprint arXiv:2309.02427.
304
+ Sun, H.; Zhuang, Y.; Kong, L.; Dai, B.; and Zhang, C. 2023. AdaPlanner: Adaptive Planning from Feedback with Language Models. arXiv preprint arXiv:2305.16653.
305
+ Tang, X.; Zheng, Z.; Li, J.; Meng, F.; Zhu, S.-C.; Liang, Y.; and Zhang, M. 2023. Large Language Models are In-Context Semantic Reasoners rather than Symbolic Reasoners. arXiv preprint arXiv:2305.14825.
306
+ Valmeekam, K.; Marquez, M.; and Kambhampati, S. 2023. Can Large Language Models Really Improve by Self-critiquing Their Own Plans? arXiv preprint arXiv:2310.08118.
307
+
308
+ Valmeekam, K.; Marquez, M.; Olmo, A.; Sreedharan, S.; and Kambhampati, S. 2023a. PlanBench: An Extensible Benchmark for Evaluating Large Language Models on Planning and Reasoning about Change. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.
309
+ Valmeekam, K.; Olmo, A.; Sreedharan, S.; and Kambhampati, S. 2022. Large Language Models Still Can't Plan (A Benchmark for LLMs on Planning and Reasoning about Change). arXiv preprint arXiv:2206.10498.
310
+ Valmeekam, K.; Sreedharan, S.; Marquez, M.; Olmo, A.; and Kambhampati, S. 2023b. On the planning abilities of large language models (a critical investigation with a proposed benchmark). arXiv preprint arXiv:2302.06706.
311
+ Wang, J.; Tong, J.; Tan, K.; Vorobeychik, Y.; and Kantaros, Y. 2023a. Conformal Temporal Logic Planning using Large Language Models: Knowing When to Do What and When to Ask for Help. arXiv preprint arXiv:2309.10092.
312
+ Wang, L.; Xu, W.; Lan, Y.; Hu, Z.; Lan, Y.; Lee, R. K.-W.; and Lim, E.-P. 2023b. Plan-and-solve prompting: Improving zero-shot chain-of-thought reasoning by large language models. arXiv preprint arXiv:2305.04091.
313
+ Wang, X.; Caccia, L.; Ostapenko, O.; Yuan, X.; and Sordoni, A. 2023c. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707.
314
+ Wang, Z.; Cai, S.; Liu, A.; Ma, X.; and Liang, Y. 2023d. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. arXiv preprint arXiv:2302.01560.
315
+ Webb, T.; Mondal, S. S.; Wang, C.; Krabach, B.; and Momennejad, I. 2023. A Prefrontal Cortex-inspired Architecture for Planning in Large Language Models. arXiv preprint arXiv:2310.00194.
316
+ Wei, L.; He, Z.; Zhao, H.; and Yao, Q. 2023. Unleashing the Power of Graph Learning through LLM-based Autonomous Agents. arXiv preprint arXiv:2309.04565.
317
+ Wong, L.; Grand, G.; Lew, A. K.; Goodman, N. D.; Mansinghka, V. K.; Andreas, J.; and Tenenbaum, J. B. 2023. From Word Models to World Models: Translating from Natural Language to the Probabilistic Language of Thought. arXiv preprint arXiv:2306.12672.
318
+ Wu, Y.; Min, S. Y.; Bisk, Y.; Salakhutdinov, R.; Azaria, A.; Li, Y.; Mitchell, T.; and Prabhumoye, S. 2023a. Plan, Eliminate, and Track-Language Models are Good Teachers for Embodied Agents. arXiv preprint arXiv:2305.02412.
319
+ Wu, Z.; Ai, B.; and Hsu, D. 2023. Integrating Common Sense and Planning with Large Language Models for Room Tidying. In RSS 2023 Workshop on Learning for Task and Motion Planning.
320
+ Wu, Z.; Wang, Z.; Xu, X.; Lu, J.; and Yan, H. 2023b. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848.
321
+ Xie, Y.; Yu, C.; Zhu, T.; Bai, J.; Gong, Z.; and Soh, H. 2023. Translating natural language to planning goals with large-language models. arXiv preprint arXiv:2302.05128.
322
+
323
+ Xu, B.; Liu, X.; Shen, H.; Han, Z.; Li, Y.; Yue, M.; Peng, Z.; Liu, Y.; Yao, Z.; and Xu, D. 2023a. Gentopia: A collaborative platform for tool-augmented llms. arXiv preprint arXiv:2308.04030.
324
+ Xu, L. 1995. Case based reasoning. IEEE Potentials, 13(5): 10-13.
325
+ Xu, M.; Huang, P.; Yu, W.; Liu, S.; Zhang, X.; Niu, Y.; Zhang, T.; Xia, F.; Tan, J.; and Zhao, D. 2023b. Creative Robot Tool Use with Large Language Models. arXiv preprint arXiv:2310.13065.
326
+ Yang, J.; Chen, X.; Qian, S.; Madaan, N.; Iyengar, M.; Fouhey, D. F.; and Chai, J. 2023a. LLM-Grounder: Open-Vocabulary 3D Visual Grounding with Large Language Model as an Agent. arXiv preprint arXiv:2309.12311.
327
+ Yang, R.; Hou, M.; Wang, J.; and Zhang, F. 2023b. OceanChat: Piloting Autonomous Underwater Vehicles in Natural Language. arXiv preprint arXiv:2309.16052.
328
+ Yang, Y.; Gaglione, J.-R.; and Topcu, U. 2022. Learning Automata-Based Task Knowledge Representation from Large-Scale Generative Language Models. arXiv preprint arXiv:2212.01944.
329
+ Yang, Y.; and Tomar, A. 2023. On the Planning, Search, and Memorization Capabilities of Large Language Models. arXiv preprint arXiv:2309.01868.
330
+ Yang, Z. 2023. Neuro-Symbolic AI Approaches to Enhance Deep Neural Networks with Logical Reasoning and Knowledge Integration. Ph.D. thesis, Arizona State University.
331
+ Yang, Z.; Ishay, A.; and Lee, J. 2023. Coupling Large Language Models with Logic Programming for Robust and General Reasoning from Text. arXiv preprint arXiv:2307.07696.
332
+ Yao, S.; Yu, D.; Zhao, J.; Shafran, I.; Griffiths, T. L.; Cao, Y.; and Narasimhan, K. 2023. Tree of thoughts: Deliberate problem solving with large language models. arXiv preprint arXiv:2305.10601.
333
+ Yoneda, T.; Fang, J.; Li, P.; Zhang, H.; Jiang, T.; Lin, S.; Picker, B.; Yunis, D.; Mei, H.; and Walter, M. R. 2023. Statler: State-maintaining language models for embodied reasoning. arXiv preprint arXiv:2306.17840.
334
+ You, W.; Wu, W.; Liang, Y.; Mao, S.; Wu, C.; Cao, M.; Cai, Y.; Guo, Y.; Xia, Y.; Wei, F.; et al. 2023. EIPE-text: Evaluation-Guided Iterative Plan Extraction for Long-Form Narrative Text Generation. arXiv preprint arXiv:2310.08185.
335
+ Yuan, H.; Zhang, C.; Wang, H.; Xie, F.; Cai, P.; Dong, H.; and Lu, Z. 2023a. Plan4mc: Skill reinforcement learning and planning for open-world northwest tasks. arXiv preprint arXiv:2303.16563.
336
+ Yuan, S.; Chen, J.; Fu, Z.; Ge, X.; Shah, S.; Jankowski, C. R.; Yang, D.; and Xiao, Y. 2023b. Distilling Script Knowledge from Large Language Models for Constrained Language Planning. arXiv preprint arXiv:2305.05252.
337
+ Zelikman, E.; Huang, Q.; Poesia, G.; Goodman, N.; and Haber, N. 2023. Parsel: Algorithmic Reasoning with Language Models by Composing Decompositions. In Thirty-seventh Conference on Neural Information Processing Systems.
338
+
339
+ Zhang, B.; and Soh, H. 2023. Large language models as zero-shot human models for human-robot interaction. arXiv preprint arXiv:2303.03548.
340
+ Zhang, C.; Liu, L.; Wang, J.; Wang, C.; Sun, X.; Wang, H.; and Cai, M. 2023a. Prefer: Prompt ensemble learning via feedback-reflect-refine. arXiv preprint arXiv:2308.12033.
341
+ Zhang, F.; Jin, K.; and Zhuo, H. H. 2023. Planning with Logical Graph-based Language Model for Instruction Generation. arXiv:2308.13782.
342
+ Zhang, H.; Du, W.; Shan, J.; Zhou, Q.; Du, Y.; Tenenbaum, J. B.; Shu, T.; and Gan, C. 2023b. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485.
343
+ Zhang, J.; Zhang, J.; Pertsch, K.; Liu, Z.; Ren, X.; Chang, M.; Sun, S.-H.; and Lim, J. J. 2023c. Bootstrap your own skills: Learning to solve new tasks with large language model guidance. arXiv preprint arXiv:2310.10021.
344
+ Zhao, Z.; Lee, W. S.; and Hsu, D. 2023. Large Language Models as Commonsense Knowledge for Large-Scale Task Planning. arXiv preprint arXiv:2305.14078.
345
+ Zheng, L.; Chiang, W.-L.; Sheng, Y.; Zhuang, S.; Wu, Z.; Zhuang, Y.; Lin, Z.; Li, Z.; Li, D.; Xing, E. P.; Zhang, H.; Gonzalez, J. E.; and Stoica, I. 2023a. Judging LLM-as-a-judge with MT-Bench and Chatbot Arena. arXiv:2306.05685.
346
+ Zheng, S.; Liu, J.; Feng, Y.; and Lu, Z. 2023b. Steve-Eye: Equipping LLM-based Embodied Agents with Visual Perception in Open Worlds. arXiv preprint arXiv:2310.13255.
347
+ Zhou, Z.; Song, J.; Yao, K.; Shu, Z.; and Ma, L. 2023. ISR-LLM: Iterative Self-Refined Large Language Model for Long-Horizon Sequential Task Planning. arXiv preprint arXiv:2308.13724.
2401.02xxx/2401.02500/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d72f06394093404f6ea439fff4a8e9c617d43794e211da4699ba6a95abb1344
3
+ size 617756
2401.02xxx/2401.02500/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02524/5908bf5d-8a3c-4dd5-b251-368b8dbc0e8b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e63cbad2d31801c6ae178b56415e21e836a70fa2d1730906dba7218f6ba8ee
3
+ size 15427270
2401.02xxx/2401.02524/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02524/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:069b4b8bc78346e898827b50b319fd74cbe4b354ca01c96768845071ff119cab
3
+ size 14794448
2401.02xxx/2401.02524/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_content_list.json ADDED
@@ -0,0 +1,1406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "COCOT: CONTRASTIVE CHAIN-OF-THOUGHT PROMPTING FOR LARGE MULTIMODAL MODELS WITH MULTIPLE IMAGE INPUTS",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 84,
8
+ 117,
9
+ 911,
10
+ 152
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Daoan Zhang $^{1,\\ast}$ , Junming Yang $^{2,\\ast}$ , Hanjia Lyu $^{1,\\ast}$ , Zijian Jin $^{3}$ , Yuan Yao $^{1}$ , Mingkai Chen $^{4}$ , Jiebo Luo $^{1}$",
17
+ "bbox": [
18
+ 102,
19
+ 172,
20
+ 890,
21
+ 191
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "<sup>1</sup> University of Rochester, <sup>2</sup> Nanjing University of Posts and Telecommunications,",
28
+ "bbox": [
29
+ 174,
30
+ 208,
31
+ 820,
32
+ 224
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$^{3}$ New York University, $^{4}$ Stony Brook University,",
39
+ "bbox": [
40
+ 303,
41
+ 226,
42
+ 694,
43
+ 243
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "* These authors contributed equally to this work.",
50
+ "bbox": [
51
+ 307,
52
+ 244,
53
+ 689,
54
+ 261
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "ABSTRACT",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 240,
64
+ 295,
65
+ 331,
66
+ 309
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "When exploring the development of Artificial General Intelligence (AGI), a critical task for these models involves interpreting and processing information from multiple image inputs. However, Large Multimodal Models (LMMs) encounter two issues in such scenarios: (1) a lack of fine-grained perception, and (2) a tendency to blend information across multiple images. We first extensively investigate the capability of LMMs to perceive fine-grained visual details when dealing with multiple input images. The research focuses on two aspects: first, image-to-image matching (to evaluate whether LMMs can effectively reason and pair relevant images), and second, multi-image-to-text matching (to assess whether LMMs can accurately capture and summarize detailed image information). We conduct evaluations on a range of both open-source and closed-source large models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL. To enhance model performance, we further develop a Contrastive Chain-of-Thought (CoCoT) prompting approach based on multi-input multimodal models. This method requires LMMs to compare the similarities and differences among multiple image inputs, and then guide the models to answer detailed questions about multi-image inputs based on the identified similarities and differences. Our experimental results showcase CoCoT's proficiency in enhancing the multi-image comprehension capabilities of large multimodal models.",
73
+ "bbox": [
74
+ 86,
75
+ 315,
76
+ 486,
77
+ 691
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Index Terms—Large Multimodal Model, Multimodal Prompting, Large Language Model",
84
+ "bbox": [
85
+ 83,
86
+ 699,
87
+ 486,
88
+ 729
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "1. INTRODUCTION",
95
+ "text_level": 1,
96
+ "bbox": [
97
+ 207,
98
+ 750,
99
+ 364,
100
+ 762
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "Recent advancements in Large Language Models (LLMs) [1, 2, 3] have sparked optimism in the pursuit of Artificial General Intelligence (AGI). Given the pivotal role of vision in human information acquisition, its integration is crucial for AGI's perceptual capabilities. To bridge the gap between textual and visual modalities, researchers are experimenting with aligning language with vision [4, 5] and directly encoding visual inputs into discrete tokens [6, 7]. These efforts have demonstrated the substantial potential of large multimodal",
107
+ "bbox": [
108
+ 81,
109
+ 776,
110
+ 486,
111
+ 912
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "models in processing multimodal content. However, they still fall short of human-like perception of the world [8, 9]. One significant challenge is the loss of image detail when using natural language, a medium less precise than visual data. Complex visual information, such as subtle lighting shifts or intricate patterns, often requires comprehensive verbal description. Another hurdle is understanding the relationship between multiple image inputs. Language-based descriptions of relationships and interactions within and across images can become challenging, necessitating explanations of both individual elements and their spatial and contextual ties. This complexity often results in ambiguous or overly extensive explanations, highlighting the limitations of current models in emulating human-like perception. To address these two issues, researchers have developed various multimodal prompting strategies [10, 11] in an attempt to guide LMMs in extracting essential information from the visual content for effective multi-image understanding. Although these methods have shown proficiency in comprehending single-image contexts, they encounter obstacles when it comes to discerning relationships between multiple images. This difficulty primarily stems from an insufficient focus on key information, which requires joint consideration of all images involved.",
118
+ "bbox": [
119
+ 508,
120
+ 295,
121
+ 913,
122
+ 642
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "In our study, we introduce Contrastive Chain-of-Thought (CoCoT), a novel multimodal prompting strategy designed to overcome the limitations of existing prompting strategies and enhance LMMs' performance in multi-image tasks. CoCoT prompts LMMs to discern and articulate the similarities and differences among various inputs, laying the groundwork for answering detailed, multi-image-based questions (Fig. 1). This method sharpens the models' focus, particularly on the distinctions between inputs, ensuring comprehensive capture of nuanced, question-relevant information during summarization. We rigorously evaluate CoCoT in two multi-image tasks. The first task, image-to-image matching, assesses the LMMs' competence in logically associating and pairing relevant images. The second task, image-to-text matching, tests the LMMs' ability to accurately capture and summarize detailed image data. Our evaluations span both widely-used open-source models and leading-edge closed-source models.",
129
+ "bbox": [
130
+ 508,
131
+ 656,
132
+ 913,
133
+ 912
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "aside_text",
139
+ "text": "arXiv:2401.02582v1 [cs.CV] 5 Jan 2024",
140
+ "bbox": [
141
+ 22,
142
+ 273,
143
+ 57,
144
+ 700
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "image",
150
+ "img_path": "images/41f476dff395b09b957c09628c1bbbe733413d75719c1818c7f7ef1f02813621.jpg",
151
+ "image_caption": [
152
+ "Fig. 1. Comparison between different multimodal prompting strategies. The unique components in each prompting strategy's corresponding response are highlighted in varied colors. Note that GPT-4V is used in this example."
153
+ ],
154
+ "image_footnote": [],
155
+ "bbox": [
156
+ 102,
157
+ 87,
158
+ 890,
159
+ 425
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "To summarize, our main contributions are:",
166
+ "bbox": [
167
+ 84,
168
+ 474,
169
+ 367,
170
+ 489
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "list",
176
+ "sub_type": "text",
177
+ "list_items": [
178
+ "- We find that most current models do not perform well in fine-grained multi-image tasks.",
179
+ "- To address the issues with existing methods, we propose a novel Contrastive Chain-of-Thought (CoCoT) prompting strategy to enhance models' understanding of the relationships between multiple image inputs.",
180
+ "- Our proposed method produces significant improvement for both open-source and closed-source models."
181
+ ],
182
+ "bbox": [
183
+ 109,
184
+ 500,
185
+ 486,
186
+ 640
187
+ ],
188
+ "page_idx": 1
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "2. RELATED WORK",
193
+ "text_level": 1,
194
+ "bbox": [
195
+ 205,
196
+ 657,
197
+ 366,
198
+ 672
199
+ ],
200
+ "page_idx": 1
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "Large Multimodal Models. Inspired by the advancements of LLMs (e.g., LLAMA [1]), LMMs offer a promising way towards AGI with multimodal information. These models blend the textual reasoning prowess of LLMs with the image and video comprehension of Vision-and-Language models. This fusion enables LMMs to handle complex tasks requiring both a profound understanding and expressive generation across various modalities. Several open-source LMMs like LLAVA [4] have emerged, demonstrating competence in tasks such as image captioning and visual question-answering. However, their architectural limitations restrict their understanding and reasoning to a single image. Conversely, models like OPENFLAMINGO [12], and MMICL [13] employ specialized architectures enabling the processing of multiple image features, which better mirrors",
205
+ "bbox": [
206
+ 81,
207
+ 686,
208
+ 488,
209
+ 914
210
+ ],
211
+ "page_idx": 1
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "real-world scenarios. Closed-source LMMs such as GPT-4V [14] and GEMINI [15] go beyond basic object descriptions to capture the scene's context [11], emotions [13], and relationships [16]. A common technique to enhance performance is fine-tuning, but applying similar methods to LMMs presents computation challenges [17]. To overcome this, we propose a novel approach to directly enable detailed analysis and reasoning on images without additional training data.",
216
+ "bbox": [
217
+ 508,
218
+ 474,
219
+ 915,
220
+ 597
221
+ ],
222
+ "page_idx": 1
223
+ },
224
+ {
225
+ "type": "text",
226
+ "text": "Multimodal Prompting Methods. Within the domain of LLMs, several language prompt methods have been established to enhance inference capabilities and ensure accurate results during prediction. These include zero-shot [18], few-shot [19], and Chain-of-Thought (CoT) [10, 11] approaches. Recently, research has begun exploring the application of prompting techniques in the multimodal domain to improve the comprehension and reasoning abilities of LMMs for image data. Current multimodal prompts employed in LMMs often exhibit limitations in capturing the intricate interrelationships between visual and language information, particularly when faced with multi-image inputs. As shown in the example in Fig. 1, they are not able to identify the critical action of the boy throwing the ball. To overcome this challenge, we propose a novel prompting method that directs LMMs to extract and analyze essential information, requiring a holistic consideration of all the input images.",
227
+ "bbox": [
228
+ 508,
229
+ 656,
230
+ 915,
231
+ 914
232
+ ],
233
+ "page_idx": 1
234
+ },
235
+ {
236
+ "type": "image",
237
+ "img_path": "images/6f65d9f25776d137f57f0cc0861c039b515d4915da044a73807dad7b532ab52c.jpg",
238
+ "image_caption": [
239
+ "Fig. 2. Different CoT-based methods and their performance in extracting information from images under various conditions, with GPT-4V being used in the experiments. Left: Utilizing CCoT to generate image information; Middle: CoCoT prompting between images with a big domain gap; Right: CoCoT prompting between images with a small domain gap."
240
+ ],
241
+ "image_footnote": [],
242
+ "bbox": [
243
+ 102,
244
+ 97,
245
+ 890,
246
+ 425
247
+ ],
248
+ "page_idx": 2
249
+ },
250
+ {
251
+ "type": "text",
252
+ "text": "3. CONTRASTIVE CHAIN-OF-THOUGHT",
253
+ "text_level": 1,
254
+ "bbox": [
255
+ 124,
256
+ 489,
257
+ 447,
258
+ 503
259
+ ],
260
+ "page_idx": 2
261
+ },
262
+ {
263
+ "type": "text",
264
+ "text": "3.1. Motivation Analysis",
265
+ "text_level": 1,
266
+ "bbox": [
267
+ 83,
268
+ 545,
269
+ 264,
270
+ 560
271
+ ],
272
+ "page_idx": 2
273
+ },
274
+ {
275
+ "type": "text",
276
+ "text": "Traditional CoT-based prompting methods for LMMs can be categorized into two types. The first type is based on text understanding, such as DDCoT (i.e., Duty-Distinct Chain-of-Thought) [10], which decomposes a question into subquestions for a step-by-step response. The second type is based on image understanding, like CCoT (i.e., Compositional Chain-of-Thought) [11], which generates a scene graph of the image to provide answers. However, while processing images, the text-based CoT does not enable LMMs to directly acquire and comprehend the detailed information in images. As shown in Fig. 1, DDCoT does not enable the LMM to recognize that the kid in the second image is not throwing a basketball. The image-based CCoT merely extracts basic information about the main objects in the image, also overlooking significant details. As shown in Fig. 1, CCoT generates a series of scene graphs unrelated to the question. Existing CoT-based prompting methods struggle to notice the details when answering questions about images rich in detail. Therefore, an effective prompting method should enable LMMs to discern and understand the details in images, and subsequently answer questions based on this understanding.",
277
+ "bbox": [
278
+ 81,
279
+ 595,
280
+ 488,
281
+ 912
282
+ ],
283
+ "page_idx": 2
284
+ },
285
+ {
286
+ "type": "text",
287
+ "text": "3.2. Methodology",
288
+ "text_level": 1,
289
+ "bbox": [
290
+ 509,
291
+ 489,
292
+ 640,
293
+ 506
294
+ ],
295
+ "page_idx": 2
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "We focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. Initially, we examine the extent to which LMMs based on CCoT can extract information from images, as illustrated in Fig. 2. GPT-4V, utilizing CCoT, is limited to identifying entities, their characteristics, and straightforward details like events and relationships between entities. Drawing inspiration from contrastive learning, our approach encourages LMMs to discern similarities and differences within images. We discover that these models are capable of engaging with more complex information, such as reasoning, even when there is a considerable difference in the domain between the images being compared and the original. For instance, they might deduce that an image's scene likely follows a storm and recognize a negative emotional tone in it. When comparing similar images, focusing on the similarities and differences of images effectively highlights the contrasts, such as recognizing more severe damage in one image compared to another, or differentiating the causes of car damage between two images, thereby effectively facilitating causal reasoning. Consequently, we develop the Contrastive Chain-of-Thought prompting. As shown in Fig. 1, this approach, similarly starting from an image perspective, initially compares the similarities and differences between various image inputs. It then directs LMMs to answer questions based on the insights gathered from such comparisons.",
300
+ "bbox": [
301
+ 508,
302
+ 520,
303
+ 916,
304
+ 912
305
+ ],
306
+ "page_idx": 2
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "4. EXPERIMENTS AND RESULTS",
311
+ "text_level": 1,
312
+ "bbox": [
313
+ 153,
314
+ 90,
315
+ 418,
316
+ 104
317
+ ],
318
+ "page_idx": 3
319
+ },
320
+ {
321
+ "type": "text",
322
+ "text": "4.1. Experiment Setup",
323
+ "text_level": 1,
324
+ "bbox": [
325
+ 83,
326
+ 119,
327
+ 250,
328
+ 136
329
+ ],
330
+ "page_idx": 3
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "Datasets. We evaluate the effectiveness of CoCoT on two fine-grained multi-image tasks: (1) image-to-image matching and (2) multi-image-to-text matching. Both tasks are well-suited for assessing whether the CoT-based method enables LMMs to acquire more fine-grained information from multiple image inputs. The image-to-image matching task employs the Raven-50 [20, 21] and Factify2 [22] datasets. This task tests the models' ability to identify and interpret visual details, requiring them to determine the degree of match between different images. For the multi-image-to-text matching task, we use Winoground [16]. This task requires LMMs to effectively pair similar images with their corresponding textual descriptions, or alternatively, to align similar texts with the corresponding images. Details including dataset statistics and preprocessing specifics are discussed in Appendix A.",
335
+ "bbox": [
336
+ 81,
337
+ 143,
338
+ 488,
339
+ 372
340
+ ],
341
+ "page_idx": 3
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "Baselines. We compare CoCoT prompting to two state-of-the-art methods in CoT-based multimodal prompting. This includes DDCoT [10] and CCoT [11]. Additionally, we benchmark CoCoT against the standard prompting baseline, which does not incorporate any CoT instructions. Note that all the experiments are conducted under the zero-shot setting. Example prompts and answers can be found in Fig. 1.",
346
+ "bbox": [
347
+ 81,
348
+ 372,
349
+ 488,
350
+ 478
351
+ ],
352
+ "page_idx": 3
353
+ },
354
+ {
355
+ "type": "text",
356
+ "text": "Language Models. We evaluate different prompting strategies on two open-source LMMs: OPENFLAMINGO [12] and MMICL [13], as well as two proprietary models including GPT-4V [14] and GEMINI [15]. Due to API restrictions of GPT4-V, we only evaluate the standard and CoCoT prompting for it. For the setting of generation, we use the default configuration for each model. We use beam search with beam width of 3 for OPENFLAMINGO. In the case of MMICL, the beam width is set to 8. For GEMINI, we opt for the API of Gemini Pro Vision under the default settings which include a temperature of 0.4, TopK set to 32, TopP at 1, and a maximum length of 4,096. For GPT-4V, we use the default settings of the web version as of December 30, 2023.",
357
+ "bbox": [
358
+ 81,
359
+ 479,
360
+ 488,
361
+ 675
362
+ ],
363
+ "page_idx": 3
364
+ },
365
+ {
366
+ "type": "text",
367
+ "text": "4.2. Main Results",
368
+ "text_level": 1,
369
+ "bbox": [
370
+ 83,
371
+ 696,
372
+ 217,
373
+ 709
374
+ ],
375
+ "page_idx": 3
376
+ },
377
+ {
378
+ "type": "text",
379
+ "text": "4.2.1. Image-to-image Matching",
380
+ "text_level": 1,
381
+ "bbox": [
382
+ 83,
383
+ 722,
384
+ 305,
385
+ 736
386
+ ],
387
+ "page_idx": 3
388
+ },
389
+ {
390
+ "type": "text",
391
+ "text": "The task of image-to-image matching requires the model to extract information from two images simultaneously and then determine under a prompt whether the information from both images matches, as exemplified in Fig. 3. LLMs are expected to select the correct answer from the given choices. In addition to the aforementioned methods, we include another random choice baseline for comparative reference. Accuracy of LMMs with different prompting methods is shown in Table 1.",
392
+ "bbox": [
393
+ 81,
394
+ 746,
395
+ 488,
396
+ 867
397
+ ],
398
+ "page_idx": 3
399
+ },
400
+ {
401
+ "type": "text",
402
+ "text": "CoCoT significantly improves LMMs' performance in the image-to-image matching task. Most models show improved performance when DDCoT and CCoT are employed,",
403
+ "bbox": [
404
+ 81,
405
+ 868,
406
+ 488,
407
+ 914
408
+ ],
409
+ "page_idx": 3
410
+ },
411
+ {
412
+ "type": "image",
413
+ "img_path": "images/e8949dcb1f9ae7f977c139746e6958aef8c7c362b24d2d439501c5a23b644a83.jpg",
414
+ "image_caption": [
415
+ "Fig. 3. An example question from the image-to-image matching task, sourced from the Raven-50 [20, 21] dataset."
416
+ ],
417
+ "image_footnote": [],
418
+ "bbox": [
419
+ 513,
420
+ 90,
421
+ 908,
422
+ 176
423
+ ],
424
+ "page_idx": 3
425
+ },
426
+ {
427
+ "type": "table",
428
+ "img_path": "images/b7f029d25e65f58554a48d5688dde55d44a8206081c64136eb1e0e419b3355ec.jpg",
429
+ "table_caption": [
430
+ "Table 1. Accuracy of LMMs employing different prompting strategies in the image-to-image matching task. The best performance within each LMM is highlighted in bold."
431
+ ],
432
+ "table_footnote": [],
433
+ "table_body": "<table><tr><td></td><td>Raven-50</td><td>Factify-V</td></tr><tr><td>Random Choice</td><td>17.00</td><td>50.00</td></tr><tr><td>OPENFLAMINGO</td><td>24.00</td><td>54.00</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>24.00</td><td>58.40</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>24.00</td><td>63.20</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>26.00</td><td>65.00</td></tr><tr><td>MMICL</td><td>22.00</td><td>64.60</td></tr><tr><td>MMICL + DDCoT</td><td>10.00</td><td>68.40</td></tr><tr><td>MMICL + CCoT</td><td>26.00</td><td>73.20</td></tr><tr><td>MMICL + CoCoT</td><td>26.00</td><td>77.00</td></tr><tr><td>GEMINI</td><td>18.00</td><td>58.00</td></tr><tr><td>GEMINI + DDCoT</td><td>12.00</td><td>65.40</td></tr><tr><td>GEMINI + CCoT</td><td>20.00</td><td>80.20</td></tr><tr><td>GEMINI + CoCoT</td><td>22.00</td><td>77.80</td></tr><tr><td>GPT-4V</td><td>30.00</td><td>74.00</td></tr><tr><td>GPT-4V + CoCoT</td><td>45.00</td><td>80.60</td></tr></table>",
434
+ "bbox": [
435
+ 535,
436
+ 285,
437
+ 890,
438
+ 506
439
+ ],
440
+ "page_idx": 3
441
+ },
442
+ {
443
+ "type": "text",
444
+ "text": "but the extent of improvement is not as significant as with CoCoT in most cases. Furthermore, regarding the Raven-50 dataset, which comprises non-natural images made up of various shapes, surprisingly, GEMINI emerges as the model with the poorest performance in our evaluations when GPT-4V performs the best which surpasses all models, including the open-source ones like OPENFLAMINGO and MMICL.",
445
+ "bbox": [
446
+ 506,
447
+ 518,
448
+ 913,
449
+ 623
450
+ ],
451
+ "page_idx": 3
452
+ },
453
+ {
454
+ "type": "text",
455
+ "text": "For the Factify-V dataset featuring natural images, GEMINI without CoT outperforms OPENFLAMINGO in similar conditions. However, when CoT is incorporated, GEMINI's performance is almost on par with that of GPT4-V under similar conditions. This outcome differs from the results on the Raven-50 dataset, suggesting that GEMINI inherently possesses the capability to extract detailed information from natural images. Its full potential in this aspect is not fully demonstrated without the use of prompts.",
456
+ "bbox": [
457
+ 506,
458
+ 625,
459
+ 913,
460
+ 761
461
+ ],
462
+ "page_idx": 3
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "In summary, our analysis of the image-to-image matching task reveals a consistent enhancement in performance across most models upon integrating various types of CoT-based prompting. This improvement underscores the ability of the visual components within LMMs to concentrate on details in terms of the task at hand. These details are subsequently processed by the LMMs for in-depth analysis, following the CoT-based prompting approach. Notably, in a majority of cases, CoCoT prompting elicits LMMs to achieve state-of-the-art performance on both natural and artificial datasets, surpass",
467
+ "bbox": [
468
+ 506,
469
+ 762,
470
+ 913,
471
+ 912
472
+ ],
473
+ "page_idx": 3
474
+ },
475
+ {
476
+ "type": "text",
477
+ "text": "ing other CoT-based strategies. This showcases the efficacy of CoCoT in guiding LMMs to accurately extract and analyze task-relevant information from images, facilitating enhanced comparative and analytical reasoning within these models.",
478
+ "bbox": [
479
+ 83,
480
+ 90,
481
+ 486,
482
+ 151
483
+ ],
484
+ "page_idx": 4
485
+ },
486
+ {
487
+ "type": "text",
488
+ "text": "4.2.2. Multi-image-to-text Matching",
489
+ "text_level": 1,
490
+ "bbox": [
491
+ 83,
492
+ 183,
493
+ 330,
494
+ 199
495
+ ],
496
+ "page_idx": 4
497
+ },
498
+ {
499
+ "type": "text",
500
+ "text": "Compared to the image-to-image matching task, the multi-image-to-text matching task requires models to precisely extract information from images and match it with text. An exemplar question can be found in Fig. 1. In particular, Winoground dataset is used for this task. Performance on Winoground (shown in Table 2) is assessed using three distinct metrics, each examining a different facet of the models' abilities to reason with both vision and language. The first metric, known as the text score, evaluates the model's capability to accurately choose the right caption when provided with an image. The second metric is the image score, assessing a model's ability to correctly identify the appropriate image when presented with a caption. The last metric is a composite score that integrates the first two metrics. In this group score, a case is considered correct if the model successfully achieves both the accurate text score and image score.",
501
+ "bbox": [
502
+ 81,
503
+ 210,
504
+ 488,
505
+ 452
506
+ ],
507
+ "page_idx": 4
508
+ },
509
+ {
510
+ "type": "text",
511
+ "text": "CoCoT boosts LMMs' performance in the multi-image-to-text matching task, achieving substantial gains. It outperforms other CoT-based methods in the majority of scenarios. This indicates that when comparing the similarities and differences of images, LMMs can better match with the text by identifying subtle differences in the input image pairs. The example in Fig. 1 also shows that methods like DDCoT and CCoT may miss key information, possibly as a result of misdirected focus.",
512
+ "bbox": [
513
+ 81,
514
+ 455,
515
+ 488,
516
+ 590
517
+ ],
518
+ "page_idx": 4
519
+ },
520
+ {
521
+ "type": "text",
522
+ "text": "GEMINI's performance is still the worst, indicating that although GEMINI's visual encoder can extract detailed information from the image, the model is not able to effectively summarize the information in the image, resulting in a poor match with the text. GPT-4V's performance on this task is also inferior to MMICL, indicating that GPT-4V also struggles to effectively summarize detailed information within images, particularly when the input image pairs are very similar.",
523
+ "bbox": [
524
+ 81,
525
+ 594,
526
+ 488,
527
+ 715
528
+ ],
529
+ "page_idx": 4
530
+ },
531
+ {
532
+ "type": "text",
533
+ "text": "All current models exhibit a significant gap compared to human performance. The reasons for this situation could include several possibilities: 1) The visual encoder's capabilities are insufficient; 2) Large models are unable to extract detailed information from the visual encoder, thereby failing in reasoning; and 3) Large models themselves are inadequate, leading to incorrect reasoning. Based on current experimental results, the visual encoder of LMMs is actually capable of recognizing some detailed information. However, due to the differences in the latent spaces between the visual encoder and the large language models, as well as the generalization issues of LLMs, LMMs are unable to fully understand images, resulting in mediocre performance across various tasks.",
534
+ "bbox": [
535
+ 81,
536
+ 717,
537
+ 488,
538
+ 912
539
+ ],
540
+ "page_idx": 4
541
+ },
542
+ {
543
+ "type": "table",
544
+ "img_path": "images/535ff15bd7350e7051341cb62ddab10ec67a5f7efbe6aebca8519fdc4b74f5dc.jpg",
545
+ "table_caption": [
546
+ "Table 2. Accuracy of LMMs employing different prompting strategies in the multi-image-to-text matching task. The best performance within each LMM is highlighted in bold."
547
+ ],
548
+ "table_footnote": [],
549
+ "table_body": "<table><tr><td></td><td>Text</td><td>Image</td><td>Group</td></tr><tr><td>MTurk Human</td><td>89.50</td><td>88.50</td><td>85.50</td></tr><tr><td>Random Choice</td><td>25.00</td><td>25.00</td><td>16.67</td></tr><tr><td>OPENFLAMINGO</td><td>39.00</td><td>41.25</td><td>33.25</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>47.50</td><td>47.25</td><td>39.00</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>42.50</td><td>27.50</td><td>20.00</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>58.25</td><td>55.25</td><td>41.50</td></tr><tr><td>MMICL</td><td>46.50</td><td>40.75</td><td>37.75</td></tr><tr><td>MMICL + DDCoT</td><td>46.75</td><td>45.00</td><td>36.75</td></tr><tr><td>MMICL + CCoT</td><td>51.00</td><td>48.00</td><td>47.50</td></tr><tr><td>MMICL + CoCoT</td><td>64.25</td><td>52.50</td><td>50.75</td></tr><tr><td>GEMINI</td><td>30.75</td><td>26.00</td><td>25.00</td></tr><tr><td>GEMINI + DDCoT</td><td>45.00</td><td>25.00</td><td>23.75</td></tr><tr><td>GEMINI + CCoT</td><td>22.50</td><td>33.00</td><td>20.75</td></tr><tr><td>GEMINI + CoCoT</td><td>40.00</td><td>32.50</td><td>27.75</td></tr><tr><td>GPT-4V</td><td>54.50</td><td>42.50</td><td>37.75</td></tr><tr><td>GPT-4V + CoCoT</td><td>58.50</td><td>49.50</td><td>44.50</td></tr></table>",
550
+ "bbox": [
551
+ 535,
552
+ 143,
553
+ 893,
554
+ 385
555
+ ],
556
+ "page_idx": 4
557
+ },
558
+ {
559
+ "type": "table",
560
+ "img_path": "images/faf615457218278f53a55627c99819e4f3924a85df8624f0e428bd64e442b3fb.jpg",
561
+ "table_caption": [
562
+ "Table 3. Ablation study of the similarities and differences varaints of CoCoT on the Factify-V dataset."
563
+ ],
564
+ "table_footnote": [],
565
+ "table_body": "<table><tr><td></td><td>MMICL</td><td>GEMINI</td></tr><tr><td>No prompt</td><td>64.60</td><td>58.00</td></tr><tr><td>+ Similarities</td><td>75.60</td><td>60.80</td></tr><tr><td>+ Differences</td><td>63.40</td><td>65.40</td></tr><tr><td>+ CoCoT</td><td>77.00</td><td>77.80</td></tr></table>",
566
+ "bbox": [
567
+ 578,
568
+ 434,
569
+ 846,
570
+ 513
571
+ ],
572
+ "page_idx": 4
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "4.3. Ablation Study",
577
+ "text_level": 1,
578
+ "bbox": [
579
+ 509,
580
+ 534,
581
+ 656,
582
+ 550
583
+ ],
584
+ "page_idx": 4
585
+ },
586
+ {
587
+ "type": "text",
588
+ "text": "CoCoT instructs LMMs to identify the similarities and differences across multiple image inputs first before providing an answer. In our ablation study, we break down the prompts into two distinct components: 1) a prompt that only requests the identification of similarities, and 2) a prompt that solely focuses on extracting the differences. As shown in Table. 3, we can observe that for GEMINI, the performance improves to some extent with the addition of either similarities or differences prompts alone, but not as much as when all prompts are included. For MMICL, adding only the differences prompts leads to a minimal decrease in performance, but the best results are achieved when both prompts are incorporated.",
589
+ "bbox": [
590
+ 506,
591
+ 558,
592
+ 915,
593
+ 741
594
+ ],
595
+ "page_idx": 4
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "5. DISCUSSIONS AND CONCLUSIONS",
600
+ "text_level": 1,
601
+ "bbox": [
602
+ 562,
603
+ 762,
604
+ 862,
605
+ 777
606
+ ],
607
+ "page_idx": 4
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "In this study, we address the challenges faced by large multimodal models in processing detailed visual information from multiple images. We have developed the Contrastive Chain-of-Thought (CoCoT) approach, a novel prompting strategy that significantly enhances LMMs' ability to discern fine-grained details in multi-image tasks. Our experiments with various models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL, demonstrate that CoCoT im",
612
+ "bbox": [
613
+ 506,
614
+ 792,
615
+ 915,
616
+ 912
617
+ ],
618
+ "page_idx": 4
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "proves performance in image-to-image matching and multi-image-to-text tasks. This study contributes to the field of Artificial General Intelligence (AGI), offering new possibilities in areas requiring precise image interpretation. However, CoCoT, due to its requirement for other images for comparison, might introduce unnecessary noise. Future research should focus on refining CoCoT for more complex scenarios and integrating it with other AI technologies to further advance multimodal understanding and AGI development.",
623
+ "bbox": [
624
+ 86,
625
+ 90,
626
+ 486,
627
+ 224
628
+ ],
629
+ "page_idx": 5
630
+ },
631
+ {
632
+ "type": "text",
633
+ "text": "6. REFERENCES",
634
+ "text_level": 1,
635
+ "bbox": [
636
+ 220,
637
+ 253,
638
+ 352,
639
+ 265
640
+ ],
641
+ "page_idx": 5
642
+ },
643
+ {
644
+ "type": "list",
645
+ "sub_type": "ref_text",
646
+ "list_items": [
647
+ "[1] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al., \"Llama: Open and efficient foundation language models,\" arXiv preprint arXiv:2302.13971, 2023.",
648
+ "[2] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al., \"Training language models to follow instructions with human feedback,\" Advances in Neural Information Processing Systems, vol. 35, pp. 27730-27744, 2022.",
649
+ "[3] Daoan Zhang, Weitong Zhang, Bing He, Jianguo Zhang, Chenchen Qin, and Jianhua Yao, \"Dnagpt: A generalized pretrained tool for multiple dna sequence analysis tasks,\" bioRxiv, pp. 2023-07, 2023.",
650
+ "[4] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee, “Visual instruction tuning,” arXiv preprint arXiv:2304.08485, 2023.",
651
+ "[5] Yunlong Tang, Jinrui Zhang, Xiangchen Wang, Teng Wang, and Feng Zheng, “Llmva-gebc: Large language model with video adapter for generic event boundary captioning,” arXiv preprint arXiv:2306.10354, 2023.",
652
+ "[6] Yushi Hu, Hang Hua, Zhengyuan Yang, Weijia Shi, Noah A Smith, and Jiebo Luo, “Promptcap: Prompt-guided task-aware image captioning,” arXiv preprint arXiv:2211.09699, 2022.",
653
+ "[7] Hang Hua, Xingjian Li, Dejing Dou, Cheng-Zhong Xu, and Jiebo Luo, “Fine-tuning pre-trained language models with noise stability regularization,” arXiv preprint arXiv:2206.05658, 2022.",
654
+ "[8] Hanjia Lyu, Jina Huang, Daoan Zhang, Yongsheng Yu, Xinyi Mou, Jinsheng Pan, Zhengyuan Yang, Zhongyu Wei, and Jiebo Luo, “Gpt-4v (ision) as a social media analysis engine,” arXiv preprint arXiv:2311.07547, 2023.",
655
+ "[9] Yongsheng Yu, Dawei Du, Libo Zhang, and Tiejian Luo, \"Unbiased multi-modality guidance for image inpainting,\" in European Conference on Computer Vision. Springer, 2022, pp. 668-684.",
656
+ "[10] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang, \"Ddcot: Duty-distinct chain-of-thought"
657
+ ],
658
+ "bbox": [
659
+ 88,
660
+ 281,
661
+ 486,
662
+ 912
663
+ ],
664
+ "page_idx": 5
665
+ },
666
+ {
667
+ "type": "list",
668
+ "sub_type": "ref_text",
669
+ "list_items": [
670
+ "prompting for multimodal reasoning in language models,\" arXiv preprint arXiv:2310.16436, 2023.",
671
+ "[11] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig, \"Compositional chain-of-thought prompting for large multimodal models,\" arXiv preprint arXiv:2311.17076, 2023.",
672
+ "[12] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, et al., “Open-flamingo: An open-source framework for training large autoregressive vision-language models,” arXiv preprint arXiv:2308.01390, 2023.",
673
+ "[13] Haozhe Zhao, Zefan Cai, Shuzheng Si, Xiaojian Ma, Kaikai An, Liang Chen, Zixuan Liu, Sheng Wang, Wenjuan Han, and Baobao Chang, \"Mmicl: Empowering vision-language model with multi-modal in-context learning,\" arXiv preprint arXiv:2309.07915, 2023.",
674
+ "[14] OpenAI, “GPT-4 technical report,” CoRR, vol. abs/2303.08774, 2023.",
675
+ "[15] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al., \"Gemini: A family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023.",
676
+ "[16] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross, “Winoground: Probing vision and language models for visio-linguistic compositionality,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 5238–5248.",
677
+ "[17] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al., “Mmbench: Is your multi-modal model an all-around player?” arXiv preprint arXiv:2307.06281, 2023.",
678
+ "[18] Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa, “Large language models are zero-shot reasoners,” Advances in neural information processing systems, vol. 35, pp. 22199-22213, 2022.",
679
+ "[19] Renrui Zhang, Xiangfei Hu, Bohao Li, Siyuan Huang, Hanqiu Deng, Yu Qiao, Peng Gao, and Hongsheng Li, \"Prompt, generate, then cache: Cascade of foundation models makes strong few-shot learners,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 15211-15222.",
680
+ "[20] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu, “Raven: A dataset for relational and analogical visual reasoning,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 5317-5327.",
681
+ "[21] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al., \"Language"
682
+ ],
683
+ "bbox": [
684
+ 511,
685
+ 92,
686
+ 913,
687
+ 912
688
+ ],
689
+ "page_idx": 5
690
+ },
691
+ {
692
+ "type": "list",
693
+ "sub_type": "ref_text",
694
+ "list_items": [
695
+ "is not all you need: Aligning perception with language models,\" arXiv preprint arXiv:2302.14045, 2023.",
696
+ "[22] S Suryavardan, Shreyash Mishra, Parth Patwa, Megha Chakraborty, Anku Rani, Aishwarya Reganti, Aman Chadha, Amitava Das, Amit Sheth, Manoj Chinnakotla, et al., \"Factify 2: A multimodal fake news and satire news dataset,\" arXiv preprint arXiv:2304.03897, 2023."
697
+ ],
698
+ "bbox": [
699
+ 86,
700
+ 90,
701
+ 488,
702
+ 196
703
+ ],
704
+ "page_idx": 6
705
+ },
706
+ {
707
+ "type": "text",
708
+ "text": "A. DATASET DESCRIPTION",
709
+ "text_level": 1,
710
+ "bbox": [
711
+ 176,
712
+ 90,
713
+ 397,
714
+ 104
715
+ ],
716
+ "page_idx": 7
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "A.1. Winoground",
721
+ "text_level": 1,
722
+ "bbox": [
723
+ 84,
724
+ 119,
725
+ 217,
726
+ 133
727
+ ],
728
+ "page_idx": 7
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "The Winoground [16] task involves matching images and captions which contains 400 groups of image-caption pairs. Each group contains two similar image-caption pairs. This task is challenging because the captions have the same words but in different sequences. LMMs must analyze both images and texts to identify subtle differences and understand the implied references. The Winoground is chosen to test if LMMs can comprehend fine-grained image information to text. Example questions are shown in Fig. 4. There are two tasks in the Winoground dataset: 1) given two images, the model is required to find out which image can match the given caption; 2) given two pieces of text, the model is required to find out which text can match the given image.",
733
+ "bbox": [
734
+ 86,
735
+ 143,
736
+ 488,
737
+ 339
738
+ ],
739
+ "page_idx": 7
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "A.2. Raven-50",
744
+ "text_level": 1,
745
+ "bbox": [
746
+ 84,
747
+ 359,
748
+ 192,
749
+ 372
750
+ ],
751
+ "page_idx": 7
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "The Raven-50 [20, 21] test is a common tool for assessing the nonverbal reasoning capabilities of LMMs. This test demands both visual acuity and logical reasoning to decipher the connections between images. In each scenario, participants are presented with either 3 or 8 images as inputs, alongside 6 potential answer images, each with a distinct solution. The goal is to correctly identify the appropriate image. Example questions are shown in Fig. 4. Note that the evaluation metric for OPENFLAMINGO and MMICL on Raven-50 dataset is to calculate the logits of the output for each image pair; while for GPT-4V and GEMINI, we directly let the model choose the correct result and calculate the accuracy.",
756
+ "bbox": [
757
+ 86,
758
+ 383,
759
+ 488,
760
+ 564
761
+ ],
762
+ "page_idx": 7
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "A.3. Facty-V",
767
+ "text_level": 1,
768
+ "bbox": [
769
+ 84,
770
+ 583,
771
+ 192,
772
+ 598
773
+ ],
774
+ "page_idx": 7
775
+ },
776
+ {
777
+ "type": "text",
778
+ "text": "The Factify2 [22] dataset features 35,000 data pairs for training, and 7,500 pairs each for validation and testing. Every data pair includes a claim and a corresponding document, both of which are made up of an image, text, and OCR-generated text from the image. These pairs are categorized into one of five labels: \"support multimodal\", \"support text\", \"refute\", \"insufficient multimodal\", or \"insufficient text\". Specifically, we randomly sample 500 cases in the test set, 100 for each of the 5 categories. We only use the images in the dataset in our experiments where the labels are reorganized into \"support image\" and \"refute\". The generated subset is called Factify-V. Example questions are shown in Fig. 4. The task involves prompting the model to determine whether the pair of input images are contextually entailed.",
779
+ "bbox": [
780
+ 86,
781
+ 607,
782
+ 488,
783
+ 819
784
+ ],
785
+ "page_idx": 7
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "B. ADDITIONAL DISCUSSIONS OF MOTIVATION",
790
+ "text_level": 1,
791
+ "bbox": [
792
+ 93,
793
+ 839,
794
+ 478,
795
+ 853
796
+ ],
797
+ "page_idx": 7
798
+ },
799
+ {
800
+ "type": "text",
801
+ "text": "As shown in Fig. 1, DDCoT, leaning towards a language perspective in handling images, first decouples the original question and image information into sub-questions. It then",
802
+ "bbox": [
803
+ 83,
804
+ 868,
805
+ 488,
806
+ 914
807
+ ],
808
+ "page_idx": 7
809
+ },
810
+ {
811
+ "type": "text",
812
+ "text": "prompts LMMs to answer these sub-questions, generating sub-answers, and finally, LMMs use these sub-questions and sub-answers to respond to the original question. CCoT, more image-oriented, initially directs LMMs to generate a Scene Graph (SG) based on image information. LMMs then use the SG's image information in conjunction with the user's question to find an answer. Given that the above methods are not effective in catching detailed information, we focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. To address this, CoCoT is designed to guide LMMs in identifying both the similarities and the nuanced differences between images, facilitating a more in-depth and accurate interpretation of visual content.",
813
+ "bbox": [
814
+ 509,
815
+ 90,
816
+ 916,
817
+ 301
818
+ ],
819
+ "page_idx": 7
820
+ },
821
+ {
822
+ "type": "text",
823
+ "text": "C. MORE EXAMPLES",
824
+ "text_level": 1,
825
+ "bbox": [
826
+ 624,
827
+ 321,
828
+ 800,
829
+ 335
830
+ ],
831
+ "page_idx": 7
832
+ },
833
+ {
834
+ "type": "text",
835
+ "text": "We further provide example responses of CoCoT based on GPT-4V on different datasets, as shown in Fig. 5 and Fig. 6.",
836
+ "bbox": [
837
+ 509,
838
+ 349,
839
+ 913,
840
+ 381
841
+ ],
842
+ "page_idx": 7
843
+ },
844
+ {
845
+ "type": "image",
846
+ "img_path": "images/9819ad3d613e673cea754bdc3a62ec6e694819972d980e360293799f42488bf9.jpg",
847
+ "image_caption": [
848
+ "Winoground Dataset"
849
+ ],
850
+ "image_footnote": [
851
+ "Q: Which image match the caption: \"some plants surrounding a lightbulb\"? A: The first one."
852
+ ],
853
+ "bbox": [
854
+ 132,
855
+ 351,
856
+ 243,
857
+ 481
858
+ ],
859
+ "page_idx": 8
860
+ },
861
+ {
862
+ "type": "image",
863
+ "img_path": "images/2e989e8ffbb08e3fc016ff0696e903fc12abddf1e0893e614b81919e58c54183.jpg",
864
+ "image_caption": [],
865
+ "image_footnote": [],
866
+ "bbox": [
867
+ 251,
868
+ 351,
869
+ 411,
870
+ 439
871
+ ],
872
+ "page_idx": 8
873
+ },
874
+ {
875
+ "type": "text",
876
+ "text": "Raven-50 Dataset",
877
+ "text_level": 1,
878
+ "bbox": [
879
+ 563,
880
+ 352,
881
+ 686,
882
+ 364
883
+ ],
884
+ "page_idx": 8
885
+ },
886
+ {
887
+ "type": "image",
888
+ "img_path": "images/ff4748cebed9a1ffc52e1d19d97c0009842a729fcf94d52fbea788b32064edf3.jpg",
889
+ "image_caption": [],
890
+ "image_footnote": [],
891
+ "bbox": [
892
+ 431,
893
+ 373,
894
+ 460,
895
+ 396
896
+ ],
897
+ "page_idx": 8
898
+ },
899
+ {
900
+ "type": "image",
901
+ "img_path": "images/7802c79e0891b880b288a4cd347f8bbcccfbdca788e0a35dcd1067dcc9fd8fce.jpg",
902
+ "image_caption": [],
903
+ "image_footnote": [],
904
+ "bbox": [
905
+ 470,
906
+ 369,
907
+ 506,
908
+ 398
909
+ ],
910
+ "page_idx": 8
911
+ },
912
+ {
913
+ "type": "image",
914
+ "img_path": "images/d870364626723eab7f2f855664dd708a3bd1582e5df3bfc3fa549d399bd1f0de.jpg",
915
+ "image_caption": [],
916
+ "image_footnote": [],
917
+ "bbox": [
918
+ 433,
919
+ 407,
920
+ 457,
921
+ 428
922
+ ],
923
+ "page_idx": 8
924
+ },
925
+ {
926
+ "type": "image",
927
+ "img_path": "images/e0a0ddbfc9a6d2b7e20b47ffa0124292ff869d680a4bbdd3e3acc5f7befa47f9.jpg",
928
+ "image_caption": [],
929
+ "image_footnote": [
930
+ "Correct?"
931
+ ],
932
+ "bbox": [
933
+ 468,
934
+ 401,
935
+ 506,
936
+ 431
937
+ ],
938
+ "page_idx": 8
939
+ },
940
+ {
941
+ "type": "image",
942
+ "img_path": "images/b98561e4fd4ca592fd66b5c5cde62f5fd40f167f0360804482f176f50c86086d.jpg",
943
+ "image_caption": [],
944
+ "image_footnote": [],
945
+ "bbox": [
946
+ 436,
947
+ 440,
948
+ 455,
949
+ 458
950
+ ],
951
+ "page_idx": 8
952
+ },
953
+ {
954
+ "type": "image",
955
+ "img_path": "images/ac865b1ff40bf5c1cd77edf5216e580bc68f5e2def169e7d9709e19e9f45961e.jpg",
956
+ "image_caption": [],
957
+ "image_footnote": [],
958
+ "bbox": [
959
+ 468,
960
+ 434,
961
+ 508,
962
+ 464
963
+ ],
964
+ "page_idx": 8
965
+ },
966
+ {
967
+ "type": "image",
968
+ "img_path": "images/7845c56ab0a99ae46f753a1797b6855d3fbcb20360a5ffd2e55a45ec84bc4624.jpg",
969
+ "image_caption": [],
970
+ "image_footnote": [],
971
+ "bbox": [
972
+ 519,
973
+ 371,
974
+ 557,
975
+ 401
976
+ ],
977
+ "page_idx": 8
978
+ },
979
+ {
980
+ "type": "image",
981
+ "img_path": "images/c30bf4084612694f7d4170775c882d7c6a26db7038c410a68d700ebd1b9fb3bc.jpg",
982
+ "image_caption": [],
983
+ "image_footnote": [],
984
+ "bbox": [
985
+ 517,
986
+ 402,
987
+ 555,
988
+ 431
989
+ ],
990
+ "page_idx": 8
991
+ },
992
+ {
993
+ "type": "image",
994
+ "img_path": "images/56ed5790a1e6b07141999021cbedf8b407856ba3da958540cb81dfe9159eadc0.jpg",
995
+ "image_caption": [],
996
+ "image_footnote": [],
997
+ "bbox": [
998
+ 526,
999
+ 436,
1000
+ 547,
1001
+ 460
1002
+ ],
1003
+ "page_idx": 8
1004
+ },
1005
+ {
1006
+ "type": "image",
1007
+ "img_path": "images/a9b5cc7754fae19ae6b6e5d40efa24b8fadddcb47a2bc5635e5d63f6b826e2f2.jpg",
1008
+ "image_caption": [
1009
+ "A"
1010
+ ],
1011
+ "image_footnote": [
1012
+ "Do you agree the following image is:"
1013
+ ],
1014
+ "bbox": [
1015
+ 589,
1016
+ 400,
1017
+ 614,
1018
+ 421
1019
+ ],
1020
+ "page_idx": 8
1021
+ },
1022
+ {
1023
+ "type": "image",
1024
+ "img_path": "images/9d4a18fe50cdd0bf3d777e6377be5c5db6f61db2099a2d67e1b573db649c49c1.jpg",
1025
+ "image_caption": [
1026
+ "B"
1027
+ ],
1028
+ "image_footnote": [],
1029
+ "bbox": [
1030
+ 630,
1031
+ 392,
1032
+ 669,
1033
+ 422
1034
+ ],
1035
+ "page_idx": 8
1036
+ },
1037
+ {
1038
+ "type": "image",
1039
+ "img_path": "images/4550a5969e563eb3f99d300ac0cfd9eb960b216f0d5a2087c38f34b20d4a07ee.jpg",
1040
+ "image_caption": [],
1041
+ "image_footnote": [],
1042
+ "bbox": [
1043
+ 681,
1044
+ 392,
1045
+ 720,
1046
+ 422
1047
+ ],
1048
+ "page_idx": 8
1049
+ },
1050
+ {
1051
+ "type": "image",
1052
+ "img_path": "images/c514ca84bfc42059e5e23bfc6758e67fd6a988c1d131eabfe54e38a2bcecaa64.jpg",
1053
+ "image_caption": [
1054
+ "D"
1055
+ ],
1056
+ "image_footnote": [],
1057
+ "bbox": [
1058
+ 723,
1059
+ 392,
1060
+ 771,
1061
+ 422
1062
+ ],
1063
+ "page_idx": 8
1064
+ },
1065
+ {
1066
+ "type": "image",
1067
+ "img_path": "images/0435442be1fd1f8551bbe0af344b306a3f00fcd7487a8f1d10aa94fc02913a5a.jpg",
1068
+ "image_caption": [
1069
+ "E"
1070
+ ],
1071
+ "image_footnote": [],
1072
+ "bbox": [
1073
+ 782,
1074
+ 392,
1075
+ 805,
1076
+ 422
1077
+ ],
1078
+ "page_idx": 8
1079
+ },
1080
+ {
1081
+ "type": "image",
1082
+ "img_path": "images/3c718387264e80cedd5b30954a9e5ae994965691622f55eabe9f0dca4b6c69bd.jpg",
1083
+ "image_caption": [
1084
+ "F"
1085
+ ],
1086
+ "image_footnote": [
1087
+ "Correct?"
1088
+ ],
1089
+ "bbox": [
1090
+ 823,
1091
+ 391,
1092
+ 862,
1093
+ 422
1094
+ ],
1095
+ "page_idx": 8
1096
+ },
1097
+ {
1098
+ "type": "image",
1099
+ "img_path": "images/3dfbc2ede994ae594690ca411336b720a1e030e5a21f9a5864720d33b660a7f3.jpg",
1100
+ "image_caption": [],
1101
+ "image_footnote": [
1102
+ "Correct?"
1103
+ ],
1104
+ "bbox": [
1105
+ 653,
1106
+ 444,
1107
+ 666,
1108
+ 452
1109
+ ],
1110
+ "page_idx": 8
1111
+ },
1112
+ {
1113
+ "type": "image",
1114
+ "img_path": "images/f754fd6e85932b99fc6d252c28f25e1fbd39fc738edf71d8d3ae1eaccd95d004.jpg",
1115
+ "image_caption": [],
1116
+ "image_footnote": [
1117
+ "ect? Correct"
1118
+ ],
1119
+ "bbox": [
1120
+ 730,
1121
+ 444,
1122
+ 746,
1123
+ 452
1124
+ ],
1125
+ "page_idx": 8
1126
+ },
1127
+ {
1128
+ "type": "text",
1129
+ "text": "Factify-V Dataset",
1130
+ "text_level": 1,
1131
+ "bbox": [
1132
+ 584,
1133
+ 487,
1134
+ 702,
1135
+ 500
1136
+ ],
1137
+ "page_idx": 8
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "text": "Does the second image entail with the first image?",
1142
+ "bbox": [
1143
+ 480,
1144
+ 502,
1145
+ 815,
1146
+ 516
1147
+ ],
1148
+ "page_idx": 8
1149
+ },
1150
+ {
1151
+ "type": "image",
1152
+ "img_path": "images/73ba1bf357e9a70abac01cb280dcad7e3e37bc6a1e87f53870b67a00ce1130b3.jpg",
1153
+ "image_caption": [
1154
+ "Fig. 4. Sampled questions from the Raven-50, Factify-V, and Winoground datasets."
1155
+ ],
1156
+ "image_footnote": [],
1157
+ "bbox": [
1158
+ 148,
1159
+ 521,
1160
+ 496,
1161
+ 625
1162
+ ],
1163
+ "page_idx": 8
1164
+ },
1165
+ {
1166
+ "type": "image",
1167
+ "img_path": "images/4cc3d0345db176004b99dfbc058e795d63597b749c68f7aba7e917a131a62857.jpg",
1168
+ "image_caption": [],
1169
+ "image_footnote": [],
1170
+ "bbox": [
1171
+ 501,
1172
+ 521,
1173
+ 848,
1174
+ 625
1175
+ ],
1176
+ "page_idx": 8
1177
+ },
1178
+ {
1179
+ "type": "image",
1180
+ "img_path": "images/5aab48c1442e1122dfa016732a6b6626c0da2df58eff150365507a3ca4185cda.jpg",
1181
+ "image_caption": [],
1182
+ "image_footnote": [],
1183
+ "bbox": [
1184
+ 246,
1185
+ 236,
1186
+ 343,
1187
+ 311
1188
+ ],
1189
+ "page_idx": 9
1190
+ },
1191
+ {
1192
+ "type": "image",
1193
+ "img_path": "images/8404064298ed89fc6629b4d0063d343d9e333f4657982b032becd134d06befab.jpg",
1194
+ "image_caption": [],
1195
+ "image_footnote": [],
1196
+ "bbox": [
1197
+ 359,
1198
+ 252,
1199
+ 756,
1200
+ 300
1201
+ ],
1202
+ "page_idx": 9
1203
+ },
1204
+ {
1205
+ "type": "text",
1206
+ "text": "User: Please tell me the similarities and differences of these two images, base on the information in the first image, please select the missing shape from the second image and tell me which shape is your choice, counting from left to right, DO NOT use code.",
1207
+ "bbox": [
1208
+ 202,
1209
+ 319,
1210
+ 781,
1211
+ 364
1212
+ ],
1213
+ "page_idx": 9
1214
+ },
1215
+ {
1216
+ "type": "text",
1217
+ "text": "GPT4-V: The two images you've provided seem to be part of a visual pattern recognition or IQ test, where the goal is to determine the rule that applies to the transformations of the shapes in the first image, and then apply that rule to select the missing shape from the options in the second image.",
1218
+ "bbox": [
1219
+ 202,
1220
+ 383,
1221
+ 782,
1222
+ 445
1223
+ ],
1224
+ "page_idx": 9
1225
+ },
1226
+ {
1227
+ "type": "text",
1228
+ "text": "In the first image, there are three shapes: the first is a diamond with a cross inside, the second is a diamond that is rotated 45 degrees without any cross, and the third is a square with a smaller square inside. In the second image, we have a sequence of four shapes with one missing (the third position is blank). The first shape is a square with both a cross and an X inside, the second is a diamond with a cross inside, the fourth is a square with a smaller square inside, and the fifth is a diamond with both a cross and an X inside.",
1229
+ "bbox": [
1230
+ 202,
1231
+ 460,
1232
+ 785,
1233
+ 566
1234
+ ],
1235
+ "page_idx": 9
1236
+ },
1237
+ {
1238
+ "type": "text",
1239
+ "text": "To find the similarities and differences, we'll look at patterns in the transformations of the shapes:",
1240
+ "bbox": [
1241
+ 202,
1242
+ 582,
1243
+ 782,
1244
+ 612
1245
+ ],
1246
+ "page_idx": 9
1247
+ },
1248
+ {
1249
+ "type": "list",
1250
+ "sub_type": "text",
1251
+ "list_items": [
1252
+ "The shapes alternate between a square and a diamond.",
1253
+ "- The internal lines (cross and X) seem to alternate in presence and absence."
1254
+ ],
1255
+ "bbox": [
1256
+ 204,
1257
+ 613,
1258
+ 712,
1259
+ 643
1260
+ ],
1261
+ "page_idx": 9
1262
+ },
1263
+ {
1264
+ "type": "text",
1265
+ "text": "Based on these observations from the first image, we can infer that the missing shape in the second image should be a shape that is a square since it follows a diamond and does not have any internal lines, as the pattern suggests an alternation in the presence of internal lines. Considering the sequence from the second image, the missing shape would then be the third shape and should be a square without any internal lines to maintain the pattern. Counting from left to right, the missing shape is the third one in the sequence.",
1266
+ "bbox": [
1267
+ 202,
1268
+ 659,
1269
+ 789,
1270
+ 765
1271
+ ],
1272
+ "page_idx": 9
1273
+ },
1274
+ {
1275
+ "type": "text",
1276
+ "text": "Fig. 5. An example response generated by GPT-4V via CoCoT on the Raven-50 dataset.",
1277
+ "bbox": [
1278
+ 202,
1279
+ 811,
1280
+ 792,
1281
+ 828
1282
+ ],
1283
+ "page_idx": 9
1284
+ },
1285
+ {
1286
+ "type": "image",
1287
+ "img_path": "images/8312ee044b7d341ad834f97736789a0b659aea16078c1ee3aec4a3010490d276.jpg",
1288
+ "image_caption": [
1289
+ "Fig. 6. An example response generated by GPT-4V via CoCoT on the Factify-V dataset."
1290
+ ],
1291
+ "image_footnote": [],
1292
+ "bbox": [
1293
+ 282,
1294
+ 224,
1295
+ 470,
1296
+ 310
1297
+ ],
1298
+ "page_idx": 10
1299
+ },
1300
+ {
1301
+ "type": "image",
1302
+ "img_path": "images/4c3edd3018c74499ee368b1203ca10733a7e818267a54873a6f84753fb564e53.jpg",
1303
+ "image_caption": [],
1304
+ "image_footnote": [],
1305
+ "bbox": [
1306
+ 508,
1307
+ 227,
1308
+ 700,
1309
+ 314
1310
+ ],
1311
+ "page_idx": 10
1312
+ },
1313
+ {
1314
+ "type": "text",
1315
+ "text": "User: Please tell me the similarities and differences of these two images, and tell me whether these two images are entailed?",
1316
+ "bbox": [
1317
+ 191,
1318
+ 314,
1319
+ 764,
1320
+ 344
1321
+ ],
1322
+ "page_idx": 10
1323
+ },
1324
+ {
1325
+ "type": "text",
1326
+ "text": "GPT4-V: The two images you've provided appear to be election-related graphics from major news networks, specifically ABC News and CNN, declaring Joe Biden as the projected winner of Colorado and New York, respectively, during the 2020 United States presidential election. Here are the similarities and differences:",
1327
+ "bbox": [
1328
+ 191,
1329
+ 354,
1330
+ 785,
1331
+ 415
1332
+ ],
1333
+ "page_idx": 10
1334
+ },
1335
+ {
1336
+ "type": "text",
1337
+ "text": "Similarities:",
1338
+ "text_level": 1,
1339
+ "bbox": [
1340
+ 192,
1341
+ 431,
1342
+ 277,
1343
+ 444
1344
+ ],
1345
+ "page_idx": 10
1346
+ },
1347
+ {
1348
+ "type": "list",
1349
+ "sub_type": "text",
1350
+ "list_items": [
1351
+ "1.Both images feature Joe Biden.",
1352
+ "2.Both declare Biden as the projected winner of a state.",
1353
+ "3.The graphics use a blue and white color scheme associated with the Democratic Party.",
1354
+ "4.Both images include the news network's logo and the year of the election.",
1355
+ "5.Both use a checkmark symbol to indicate the projection of a winner."
1356
+ ],
1357
+ "bbox": [
1358
+ 192,
1359
+ 446,
1360
+ 782,
1361
+ 522
1362
+ ],
1363
+ "page_idx": 10
1364
+ },
1365
+ {
1366
+ "type": "text",
1367
+ "text": "Differences:",
1368
+ "text_level": 1,
1369
+ "bbox": [
1370
+ 192,
1371
+ 537,
1372
+ 279,
1373
+ 550
1374
+ ],
1375
+ "page_idx": 10
1376
+ },
1377
+ {
1378
+ "type": "list",
1379
+ "sub_type": "text",
1380
+ "list_items": [
1381
+ "1. The images are from different news outlets, ABC News and CNN, as indicated by their logos.",
1382
+ "2. They declare Biden as the winner in different states, Colorado in the first and New York in the second.",
1383
+ "3.The images have different designs and layouts due to varying graphic standards of the two networks.",
1384
+ "4. The first image has a graphic element that looks like a voting ballot, while the second one does not.",
1385
+ "5.The text font and style differ between the two images."
1386
+ ],
1387
+ "bbox": [
1388
+ 192,
1389
+ 553,
1390
+ 789,
1391
+ 689
1392
+ ],
1393
+ "page_idx": 10
1394
+ },
1395
+ {
1396
+ "type": "text",
1397
+ "text": "Regarding whether the second image entails the first one, the answer is no. The concept of entailment in this context would mean that the second image could logically imply the information in the first. Since the images are projections for different states, the second does not imply that Biden is the projected winner of Colorado, only that he is projected to win New York. They are separate statements about the outcomes in different states.",
1398
+ "bbox": [
1399
+ 191,
1400
+ 691,
1401
+ 797,
1402
+ 767
1403
+ ],
1404
+ "page_idx": 10
1405
+ }
1406
+ ]
2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_model.json ADDED
@@ -0,0 +1,1894 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.275,
8
+ 0.058,
9
+ 0.701
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2401.02582v1 [cs.CV] 5 Jan 2024"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.086,
18
+ 0.118,
19
+ 0.913,
20
+ 0.154
21
+ ],
22
+ "angle": 0,
23
+ "content": "COCOT: CONTRASTIVE CHAIN-OF-THOUGHT PROMPTING FOR LARGE MULTIMODAL MODELS WITH MULTIPLE IMAGE INPUTS"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.103,
29
+ 0.173,
30
+ 0.892,
31
+ 0.192
32
+ ],
33
+ "angle": 0,
34
+ "content": "Daoan Zhang\\(^{1,\\ast}\\), Junming Yang\\(^{2,\\ast}\\), Hanjia Lyu\\(^{1,\\ast}\\), Zijian Jin\\(^{3}\\), Yuan Yao\\(^{1}\\), Mingkai Chen\\(^{4}\\), Jiebo Luo\\(^{1}\\)"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.175,
40
+ 0.209,
41
+ 0.821,
42
+ 0.226
43
+ ],
44
+ "angle": 0,
45
+ "content": "<sup>1</sup> University of Rochester, <sup>2</sup> Nanjing University of Posts and Telecommunications,"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.305,
51
+ 0.227,
52
+ 0.695,
53
+ 0.244
54
+ ],
55
+ "angle": 0,
56
+ "content": "\\(^{3}\\) New York University, \\(^{4}\\) Stony Brook University,"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.308,
62
+ 0.246,
63
+ 0.691,
64
+ 0.262
65
+ ],
66
+ "angle": 0,
67
+ "content": "* These authors contributed equally to this work."
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.241,
73
+ 0.296,
74
+ 0.333,
75
+ 0.31
76
+ ],
77
+ "angle": 0,
78
+ "content": "ABSTRACT"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.087,
84
+ 0.316,
85
+ 0.488,
86
+ 0.693
87
+ ],
88
+ "angle": 0,
89
+ "content": "When exploring the development of Artificial General Intelligence (AGI), a critical task for these models involves interpreting and processing information from multiple image inputs. However, Large Multimodal Models (LMMs) encounter two issues in such scenarios: (1) a lack of fine-grained perception, and (2) a tendency to blend information across multiple images. We first extensively investigate the capability of LMMs to perceive fine-grained visual details when dealing with multiple input images. The research focuses on two aspects: first, image-to-image matching (to evaluate whether LMMs can effectively reason and pair relevant images), and second, multi-image-to-text matching (to assess whether LMMs can accurately capture and summarize detailed image information). We conduct evaluations on a range of both open-source and closed-source large models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL. To enhance model performance, we further develop a Contrastive Chain-of-Thought (CoCoT) prompting approach based on multi-input multimodal models. This method requires LMMs to compare the similarities and differences among multiple image inputs, and then guide the models to answer detailed questions about multi-image inputs based on the identified similarities and differences. Our experimental results showcase CoCoT's proficiency in enhancing the multi-image comprehension capabilities of large multimodal models."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.084,
95
+ 0.7,
96
+ 0.488,
97
+ 0.731
98
+ ],
99
+ "angle": 0,
100
+ "content": "Index Terms—Large Multimodal Model, Multimodal Prompting, Large Language Model"
101
+ },
102
+ {
103
+ "type": "title",
104
+ "bbox": [
105
+ 0.209,
106
+ 0.75,
107
+ 0.365,
108
+ 0.763
109
+ ],
110
+ "angle": 0,
111
+ "content": "1. INTRODUCTION"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.083,
117
+ 0.777,
118
+ 0.488,
119
+ 0.914
120
+ ],
121
+ "angle": 0,
122
+ "content": "Recent advancements in Large Language Models (LLMs) [1, 2, 3] have sparked optimism in the pursuit of Artificial General Intelligence (AGI). Given the pivotal role of vision in human information acquisition, its integration is crucial for AGI's perceptual capabilities. To bridge the gap between textual and visual modalities, researchers are experimenting with aligning language with vision [4, 5] and directly encoding visual inputs into discrete tokens [6, 7]. These efforts have demonstrated the substantial potential of large multimodal"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.509,
128
+ 0.296,
129
+ 0.915,
130
+ 0.643
131
+ ],
132
+ "angle": 0,
133
+ "content": "models in processing multimodal content. However, they still fall short of human-like perception of the world [8, 9]. One significant challenge is the loss of image detail when using natural language, a medium less precise than visual data. Complex visual information, such as subtle lighting shifts or intricate patterns, often requires comprehensive verbal description. Another hurdle is understanding the relationship between multiple image inputs. Language-based descriptions of relationships and interactions within and across images can become challenging, necessitating explanations of both individual elements and their spatial and contextual ties. This complexity often results in ambiguous or overly extensive explanations, highlighting the limitations of current models in emulating human-like perception. To address these two issues, researchers have developed various multimodal prompting strategies [10, 11] in an attempt to guide LMMs in extracting essential information from the visual content for effective multi-image understanding. Although these methods have shown proficiency in comprehending single-image contexts, they encounter obstacles when it comes to discerning relationships between multiple images. This difficulty primarily stems from an insufficient focus on key information, which requires joint consideration of all images involved."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.509,
139
+ 0.657,
140
+ 0.915,
141
+ 0.914
142
+ ],
143
+ "angle": 0,
144
+ "content": "In our study, we introduce Contrastive Chain-of-Thought (CoCoT), a novel multimodal prompting strategy designed to overcome the limitations of existing prompting strategies and enhance LMMs' performance in multi-image tasks. CoCoT prompts LMMs to discern and articulate the similarities and differences among various inputs, laying the groundwork for answering detailed, multi-image-based questions (Fig. 1). This method sharpens the models' focus, particularly on the distinctions between inputs, ensuring comprehensive capture of nuanced, question-relevant information during summarization. We rigorously evaluate CoCoT in two multi-image tasks. The first task, image-to-image matching, assesses the LMMs' competence in logically associating and pairing relevant images. The second task, image-to-text matching, tests the LMMs' ability to accurately capture and summarize detailed image data. Our evaluations span both widely-used open-source models and leading-edge closed-source models."
145
+ }
146
+ ],
147
+ [
148
+ {
149
+ "type": "image",
150
+ "bbox": [
151
+ 0.104,
152
+ 0.088,
153
+ 0.891,
154
+ 0.426
155
+ ],
156
+ "angle": 0,
157
+ "content": null
158
+ },
159
+ {
160
+ "type": "image_caption",
161
+ "bbox": [
162
+ 0.083,
163
+ 0.43,
164
+ 0.916,
165
+ 0.462
166
+ ],
167
+ "angle": 0,
168
+ "content": "Fig. 1. Comparison between different multimodal prompting strategies. The unique components in each prompting strategy's corresponding response are highlighted in varied colors. Note that GPT-4V is used in this example."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.085,
174
+ 0.476,
175
+ 0.369,
176
+ 0.49
177
+ ],
178
+ "angle": 0,
179
+ "content": "To summarize, our main contributions are:"
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.111,
185
+ 0.5,
186
+ 0.487,
187
+ 0.53
188
+ ],
189
+ "angle": 0,
190
+ "content": "- We find that most current models do not perform well in fine-grained multi-image tasks."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.111,
196
+ 0.54,
197
+ 0.488,
198
+ 0.601
199
+ ],
200
+ "angle": 0,
201
+ "content": "- To address the issues with existing methods, we propose a novel Contrastive Chain-of-Thought (CoCoT) prompting strategy to enhance models' understanding of the relationships between multiple image inputs."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.111,
207
+ 0.61,
208
+ 0.487,
209
+ 0.641
210
+ ],
211
+ "angle": 0,
212
+ "content": "- Our proposed method produces significant improvement for both open-source and closed-source models."
213
+ },
214
+ {
215
+ "type": "list",
216
+ "bbox": [
217
+ 0.111,
218
+ 0.5,
219
+ 0.488,
220
+ 0.641
221
+ ],
222
+ "angle": 0,
223
+ "content": null
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.206,
229
+ 0.659,
230
+ 0.367,
231
+ 0.673
232
+ ],
233
+ "angle": 0,
234
+ "content": "2. RELATED WORK"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.083,
240
+ 0.687,
241
+ 0.49,
242
+ 0.915
243
+ ],
244
+ "angle": 0,
245
+ "content": "Large Multimodal Models. Inspired by the advancements of LLMs (e.g., LLAMA [1]), LMMs offer a promising way towards AGI with multimodal information. These models blend the textual reasoning prowess of LLMs with the image and video comprehension of Vision-and-Language models. This fusion enables LMMs to handle complex tasks requiring both a profound understanding and expressive generation across various modalities. Several open-source LMMs like LLAVA [4] have emerged, demonstrating competence in tasks such as image captioning and visual question-answering. However, their architectural limitations restrict their understanding and reasoning to a single image. Conversely, models like OPENFLAMINGO [12], and MMICL [13] employ specialized architectures enabling the processing of multiple image features, which better mirrors"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.509,
251
+ 0.476,
252
+ 0.916,
253
+ 0.598
254
+ ],
255
+ "angle": 0,
256
+ "content": "real-world scenarios. Closed-source LMMs such as GPT-4V [14] and GEMINI [15] go beyond basic object descriptions to capture the scene's context [11], emotions [13], and relationships [16]. A common technique to enhance performance is fine-tuning, but applying similar methods to LMMs presents computation challenges [17]. To overcome this, we propose a novel approach to directly enable detailed analysis and reasoning on images without additional training data."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.509,
262
+ 0.657,
263
+ 0.916,
264
+ 0.915
265
+ ],
266
+ "angle": 0,
267
+ "content": "Multimodal Prompting Methods. Within the domain of LLMs, several language prompt methods have been established to enhance inference capabilities and ensure accurate results during prediction. These include zero-shot [18], few-shot [19], and Chain-of-Thought (CoT) [10, 11] approaches. Recently, research has begun exploring the application of prompting techniques in the multimodal domain to improve the comprehension and reasoning abilities of LMMs for image data. Current multimodal prompts employed in LMMs often exhibit limitations in capturing the intricate interrelationships between visual and language information, particularly when faced with multi-image inputs. As shown in the example in Fig. 1, they are not able to identify the critical action of the boy throwing the ball. To overcome this challenge, we propose a novel prompting method that directs LMMs to extract and analyze essential information, requiring a holistic consideration of all the input images."
268
+ }
269
+ ],
270
+ [
271
+ {
272
+ "type": "image",
273
+ "bbox": [
274
+ 0.104,
275
+ 0.098,
276
+ 0.891,
277
+ 0.426
278
+ ],
279
+ "angle": 0,
280
+ "content": null
281
+ },
282
+ {
283
+ "type": "image_caption",
284
+ "bbox": [
285
+ 0.083,
286
+ 0.43,
287
+ 0.916,
288
+ 0.476
289
+ ],
290
+ "angle": 0,
291
+ "content": "Fig. 2. Different CoT-based methods and their performance in extracting information from images under various conditions, with GPT-4V being used in the experiments. Left: Utilizing CCoT to generate image information; Middle: CoCoT prompting between images with a big domain gap; Right: CoCoT prompting between images with a small domain gap."
292
+ },
293
+ {
294
+ "type": "title",
295
+ "bbox": [
296
+ 0.125,
297
+ 0.49,
298
+ 0.449,
299
+ 0.505
300
+ ],
301
+ "angle": 0,
302
+ "content": "3. CONTRASTIVE CHAIN-OF-THOUGHT"
303
+ },
304
+ {
305
+ "type": "title",
306
+ "bbox": [
307
+ 0.084,
308
+ 0.546,
309
+ 0.265,
310
+ 0.561
311
+ ],
312
+ "angle": 0,
313
+ "content": "3.1. Motivation Analysis"
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.083,
319
+ 0.597,
320
+ 0.49,
321
+ 0.914
322
+ ],
323
+ "angle": 0,
324
+ "content": "Traditional CoT-based prompting methods for LMMs can be categorized into two types. The first type is based on text understanding, such as DDCoT (i.e., Duty-Distinct Chain-of-Thought) [10], which decomposes a question into subquestions for a step-by-step response. The second type is based on image understanding, like CCoT (i.e., Compositional Chain-of-Thought) [11], which generates a scene graph of the image to provide answers. However, while processing images, the text-based CoT does not enable LMMs to directly acquire and comprehend the detailed information in images. As shown in Fig. 1, DDCoT does not enable the LMM to recognize that the kid in the second image is not throwing a basketball. The image-based CCoT merely extracts basic information about the main objects in the image, also overlooking significant details. As shown in Fig. 1, CCoT generates a series of scene graphs unrelated to the question. Existing CoT-based prompting methods struggle to notice the details when answering questions about images rich in detail. Therefore, an effective prompting method should enable LMMs to discern and understand the details in images, and subsequently answer questions based on this understanding."
325
+ },
326
+ {
327
+ "type": "title",
328
+ "bbox": [
329
+ 0.511,
330
+ 0.49,
331
+ 0.642,
332
+ 0.507
333
+ ],
334
+ "angle": 0,
335
+ "content": "3.2. Methodology"
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.509,
341
+ 0.521,
342
+ 0.917,
343
+ 0.914
344
+ ],
345
+ "angle": 0,
346
+ "content": "We focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. Initially, we examine the extent to which LMMs based on CCoT can extract information from images, as illustrated in Fig. 2. GPT-4V, utilizing CCoT, is limited to identifying entities, their characteristics, and straightforward details like events and relationships between entities. Drawing inspiration from contrastive learning, our approach encourages LMMs to discern similarities and differences within images. We discover that these models are capable of engaging with more complex information, such as reasoning, even when there is a considerable difference in the domain between the images being compared and the original. For instance, they might deduce that an image's scene likely follows a storm and recognize a negative emotional tone in it. When comparing similar images, focusing on the similarities and differences of images effectively highlights the contrasts, such as recognizing more severe damage in one image compared to another, or differentiating the causes of car damage between two images, thereby effectively facilitating causal reasoning. Consequently, we develop the Contrastive Chain-of-Thought prompting. As shown in Fig. 1, this approach, similarly starting from an image perspective, initially compares the similarities and differences between various image inputs. It then directs LMMs to answer questions based on the insights gathered from such comparisons."
347
+ }
348
+ ],
349
+ [
350
+ {
351
+ "type": "title",
352
+ "bbox": [
353
+ 0.155,
354
+ 0.091,
355
+ 0.419,
356
+ 0.106
357
+ ],
358
+ "angle": 0,
359
+ "content": "4. EXPERIMENTS AND RESULTS"
360
+ },
361
+ {
362
+ "type": "title",
363
+ "bbox": [
364
+ 0.084,
365
+ 0.121,
366
+ 0.251,
367
+ 0.137
368
+ ],
369
+ "angle": 0,
370
+ "content": "4.1. Experiment Setup"
371
+ },
372
+ {
373
+ "type": "text",
374
+ "bbox": [
375
+ 0.082,
376
+ 0.145,
377
+ 0.489,
378
+ 0.373
379
+ ],
380
+ "angle": 0,
381
+ "content": "Datasets. We evaluate the effectiveness of CoCoT on two fine-grained multi-image tasks: (1) image-to-image matching and (2) multi-image-to-text matching. Both tasks are well-suited for assessing whether the CoT-based method enables LMMs to acquire more fine-grained information from multiple image inputs. The image-to-image matching task employs the Raven-50 [20, 21] and Factify2 [22] datasets. This task tests the models' ability to identify and interpret visual details, requiring them to determine the degree of match between different images. For the multi-image-to-text matching task, we use Winoground [16]. This task requires LMMs to effectively pair similar images with their corresponding textual descriptions, or alternatively, to align similar texts with the corresponding images. Details including dataset statistics and preprocessing specifics are discussed in Appendix A."
382
+ },
383
+ {
384
+ "type": "text",
385
+ "bbox": [
386
+ 0.082,
387
+ 0.373,
388
+ 0.489,
389
+ 0.479
390
+ ],
391
+ "angle": 0,
392
+ "content": "Baselines. We compare CoCoT prompting to two state-of-the-art methods in CoT-based multimodal prompting. This includes DDCoT [10] and CCoT [11]. Additionally, we benchmark CoCoT against the standard prompting baseline, which does not incorporate any CoT instructions. Note that all the experiments are conducted under the zero-shot setting. Example prompts and answers can be found in Fig. 1."
393
+ },
394
+ {
395
+ "type": "text",
396
+ "bbox": [
397
+ 0.082,
398
+ 0.48,
399
+ 0.49,
400
+ 0.676
401
+ ],
402
+ "angle": 0,
403
+ "content": "Language Models. We evaluate different prompting strategies on two open-source LMMs: OPENFLAMINGO [12] and MMICL [13], as well as two proprietary models including GPT-4V [14] and GEMINI [15]. Due to API restrictions of GPT4-V, we only evaluate the standard and CoCoT prompting for it. For the setting of generation, we use the default configuration for each model. We use beam search with beam width of 3 for OPENFLAMINGO. In the case of MMICL, the beam width is set to 8. For GEMINI, we opt for the API of Gemini Pro Vision under the default settings which include a temperature of 0.4, TopK set to 32, TopP at 1, and a maximum length of 4,096. For GPT-4V, we use the default settings of the web version as of December 30, 2023."
404
+ },
405
+ {
406
+ "type": "title",
407
+ "bbox": [
408
+ 0.084,
409
+ 0.697,
410
+ 0.218,
411
+ 0.71
412
+ ],
413
+ "angle": 0,
414
+ "content": "4.2. Main Results"
415
+ },
416
+ {
417
+ "type": "title",
418
+ "bbox": [
419
+ 0.084,
420
+ 0.723,
421
+ 0.307,
422
+ 0.737
423
+ ],
424
+ "angle": 0,
425
+ "content": "4.2.1. Image-to-image Matching"
426
+ },
427
+ {
428
+ "type": "text",
429
+ "bbox": [
430
+ 0.082,
431
+ 0.747,
432
+ 0.489,
433
+ 0.868
434
+ ],
435
+ "angle": 0,
436
+ "content": "The task of image-to-image matching requires the model to extract information from two images simultaneously and then determine under a prompt whether the information from both images matches, as exemplified in Fig. 3. LLMs are expected to select the correct answer from the given choices. In addition to the aforementioned methods, we include another random choice baseline for comparative reference. Accuracy of LMMs with different prompting methods is shown in Table 1."
437
+ },
438
+ {
439
+ "type": "text",
440
+ "bbox": [
441
+ 0.082,
442
+ 0.869,
443
+ 0.489,
444
+ 0.915
445
+ ],
446
+ "angle": 0,
447
+ "content": "CoCoT significantly improves LMMs' performance in the image-to-image matching task. Most models show improved performance when DDCoT and CCoT are employed,"
448
+ },
449
+ {
450
+ "type": "image",
451
+ "bbox": [
452
+ 0.514,
453
+ 0.092,
454
+ 0.91,
455
+ 0.177
456
+ ],
457
+ "angle": 0,
458
+ "content": null
459
+ },
460
+ {
461
+ "type": "image_caption",
462
+ "bbox": [
463
+ 0.509,
464
+ 0.184,
465
+ 0.915,
466
+ 0.215
467
+ ],
468
+ "angle": 0,
469
+ "content": "Fig. 3. An example question from the image-to-image matching task, sourced from the Raven-50 [20, 21] dataset."
470
+ },
471
+ {
472
+ "type": "table_caption",
473
+ "bbox": [
474
+ 0.509,
475
+ 0.24,
476
+ 0.915,
477
+ 0.285
478
+ ],
479
+ "angle": 0,
480
+ "content": "Table 1. Accuracy of LMMs employing different prompting strategies in the image-to-image matching task. The best performance within each LMM is highlighted in bold."
481
+ },
482
+ {
483
+ "type": "table",
484
+ "bbox": [
485
+ 0.536,
486
+ 0.286,
487
+ 0.892,
488
+ 0.507
489
+ ],
490
+ "angle": 0,
491
+ "content": "<table><tr><td></td><td>Raven-50</td><td>Factify-V</td></tr><tr><td>Random Choice</td><td>17.00</td><td>50.00</td></tr><tr><td>OPENFLAMINGO</td><td>24.00</td><td>54.00</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>24.00</td><td>58.40</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>24.00</td><td>63.20</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>26.00</td><td>65.00</td></tr><tr><td>MMICL</td><td>22.00</td><td>64.60</td></tr><tr><td>MMICL + DDCoT</td><td>10.00</td><td>68.40</td></tr><tr><td>MMICL + CCoT</td><td>26.00</td><td>73.20</td></tr><tr><td>MMICL + CoCoT</td><td>26.00</td><td>77.00</td></tr><tr><td>GEMINI</td><td>18.00</td><td>58.00</td></tr><tr><td>GEMINI + DDCoT</td><td>12.00</td><td>65.40</td></tr><tr><td>GEMINI + CCoT</td><td>20.00</td><td>80.20</td></tr><tr><td>GEMINI + CoCoT</td><td>22.00</td><td>77.80</td></tr><tr><td>GPT-4V</td><td>30.00</td><td>74.00</td></tr><tr><td>GPT-4V + CoCoT</td><td>45.00</td><td>80.60</td></tr></table>"
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.508,
497
+ 0.519,
498
+ 0.914,
499
+ 0.624
500
+ ],
501
+ "angle": 0,
502
+ "content": "but the extent of improvement is not as significant as with CoCoT in most cases. Furthermore, regarding the Raven-50 dataset, which comprises non-natural images made up of various shapes, surprisingly, GEMINI emerges as the model with the poorest performance in our evaluations when GPT-4V performs the best which surpasses all models, including the open-source ones like OPENFLAMINGO and MMICL."
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.508,
508
+ 0.626,
509
+ 0.914,
510
+ 0.762
511
+ ],
512
+ "angle": 0,
513
+ "content": "For the Factify-V dataset featuring natural images, GEMINI without CoT outperforms OPENFLAMINGO in similar conditions. However, when CoT is incorporated, GEMINI's performance is almost on par with that of GPT4-V under similar conditions. This outcome differs from the results on the Raven-50 dataset, suggesting that GEMINI inherently possesses the capability to extract detailed information from natural images. Its full potential in this aspect is not fully demonstrated without the use of prompts."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.508,
519
+ 0.763,
520
+ 0.915,
521
+ 0.914
522
+ ],
523
+ "angle": 0,
524
+ "content": "In summary, our analysis of the image-to-image matching task reveals a consistent enhancement in performance across most models upon integrating various types of CoT-based prompting. This improvement underscores the ability of the visual components within LMMs to concentrate on details in terms of the task at hand. These details are subsequently processed by the LMMs for in-depth analysis, following the CoT-based prompting approach. Notably, in a majority of cases, CoCoT prompting elicits LMMs to achieve state-of-the-art performance on both natural and artificial datasets, surpass"
525
+ }
526
+ ],
527
+ [
528
+ {
529
+ "type": "text",
530
+ "bbox": [
531
+ 0.084,
532
+ 0.092,
533
+ 0.488,
534
+ 0.152
535
+ ],
536
+ "angle": 0,
537
+ "content": "ing other CoT-based strategies. This showcases the efficacy of CoCoT in guiding LMMs to accurately extract and analyze task-relevant information from images, facilitating enhanced comparative and analytical reasoning within these models."
538
+ },
539
+ {
540
+ "type": "title",
541
+ "bbox": [
542
+ 0.084,
543
+ 0.184,
544
+ 0.331,
545
+ 0.2
546
+ ],
547
+ "angle": 0,
548
+ "content": "4.2.2. Multi-image-to-text Matching"
549
+ },
550
+ {
551
+ "type": "text",
552
+ "bbox": [
553
+ 0.082,
554
+ 0.212,
555
+ 0.49,
556
+ 0.453
557
+ ],
558
+ "angle": 0,
559
+ "content": "Compared to the image-to-image matching task, the multi-image-to-text matching task requires models to precisely extract information from images and match it with text. An exemplar question can be found in Fig. 1. In particular, Winoground dataset is used for this task. Performance on Winoground (shown in Table 2) is assessed using three distinct metrics, each examining a different facet of the models' abilities to reason with both vision and language. The first metric, known as the text score, evaluates the model's capability to accurately choose the right caption when provided with an image. The second metric is the image score, assessing a model's ability to correctly identify the appropriate image when presented with a caption. The last metric is a composite score that integrates the first two metrics. In this group score, a case is considered correct if the model successfully achieves both the accurate text score and image score."
560
+ },
561
+ {
562
+ "type": "text",
563
+ "bbox": [
564
+ 0.082,
565
+ 0.456,
566
+ 0.49,
567
+ 0.592
568
+ ],
569
+ "angle": 0,
570
+ "content": "CoCoT boosts LMMs' performance in the multi-image-to-text matching task, achieving substantial gains. It outperforms other CoT-based methods in the majority of scenarios. This indicates that when comparing the similarities and differences of images, LMMs can better match with the text by identifying subtle differences in the input image pairs. The example in Fig. 1 also shows that methods like DDCoT and CCoT may miss key information, possibly as a result of misdirected focus."
571
+ },
572
+ {
573
+ "type": "text",
574
+ "bbox": [
575
+ 0.082,
576
+ 0.595,
577
+ 0.49,
578
+ 0.716
579
+ ],
580
+ "angle": 0,
581
+ "content": "GEMINI's performance is still the worst, indicating that although GEMINI's visual encoder can extract detailed information from the image, the model is not able to effectively summarize the information in the image, resulting in a poor match with the text. GPT-4V's performance on this task is also inferior to MMICL, indicating that GPT-4V also struggles to effectively summarize detailed information within images, particularly when the input image pairs are very similar."
582
+ },
583
+ {
584
+ "type": "text",
585
+ "bbox": [
586
+ 0.082,
587
+ 0.718,
588
+ 0.49,
589
+ 0.914
590
+ ],
591
+ "angle": 0,
592
+ "content": "All current models exhibit a significant gap compared to human performance. The reasons for this situation could include several possibilities: 1) The visual encoder's capabilities are insufficient; 2) Large models are unable to extract detailed information from the visual encoder, thereby failing in reasoning; and 3) Large models themselves are inadequate, leading to incorrect reasoning. Based on current experimental results, the visual encoder of LMMs is actually capable of recognizing some detailed information. However, due to the differences in the latent spaces between the visual encoder and the large language models, as well as the generalization issues of LLMs, LMMs are unable to fully understand images, resulting in mediocre performance across various tasks."
593
+ },
594
+ {
595
+ "type": "table_caption",
596
+ "bbox": [
597
+ 0.509,
598
+ 0.1,
599
+ 0.916,
600
+ 0.145
601
+ ],
602
+ "angle": 0,
603
+ "content": "Table 2. Accuracy of LMMs employing different prompting strategies in the multi-image-to-text matching task. The best performance within each LMM is highlighted in bold."
604
+ },
605
+ {
606
+ "type": "table",
607
+ "bbox": [
608
+ 0.536,
609
+ 0.145,
610
+ 0.895,
611
+ 0.386
612
+ ],
613
+ "angle": 0,
614
+ "content": "<table><tr><td></td><td>Text</td><td>Image</td><td>Group</td></tr><tr><td>MTurk Human</td><td>89.50</td><td>88.50</td><td>85.50</td></tr><tr><td>Random Choice</td><td>25.00</td><td>25.00</td><td>16.67</td></tr><tr><td>OPENFLAMINGO</td><td>39.00</td><td>41.25</td><td>33.25</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>47.50</td><td>47.25</td><td>39.00</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>42.50</td><td>27.50</td><td>20.00</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>58.25</td><td>55.25</td><td>41.50</td></tr><tr><td>MMICL</td><td>46.50</td><td>40.75</td><td>37.75</td></tr><tr><td>MMICL + DDCoT</td><td>46.75</td><td>45.00</td><td>36.75</td></tr><tr><td>MMICL + CCoT</td><td>51.00</td><td>48.00</td><td>47.50</td></tr><tr><td>MMICL + CoCoT</td><td>64.25</td><td>52.50</td><td>50.75</td></tr><tr><td>GEMINI</td><td>30.75</td><td>26.00</td><td>25.00</td></tr><tr><td>GEMINI + DDCoT</td><td>45.00</td><td>25.00</td><td>23.75</td></tr><tr><td>GEMINI + CCoT</td><td>22.50</td><td>33.00</td><td>20.75</td></tr><tr><td>GEMINI + CoCoT</td><td>40.00</td><td>32.50</td><td>27.75</td></tr><tr><td>GPT-4V</td><td>54.50</td><td>42.50</td><td>37.75</td></tr><tr><td>GPT-4V + CoCoT</td><td>58.50</td><td>49.50</td><td>44.50</td></tr></table>"
615
+ },
616
+ {
617
+ "type": "table_caption",
618
+ "bbox": [
619
+ 0.509,
620
+ 0.405,
621
+ 0.915,
622
+ 0.434
623
+ ],
624
+ "angle": 0,
625
+ "content": "Table 3. Ablation study of the similarities and differences varaints of CoCoT on the Factify-V dataset."
626
+ },
627
+ {
628
+ "type": "table",
629
+ "bbox": [
630
+ 0.58,
631
+ 0.435,
632
+ 0.848,
633
+ 0.514
634
+ ],
635
+ "angle": 0,
636
+ "content": "<table><tr><td></td><td>MMICL</td><td>GEMINI</td></tr><tr><td>No prompt</td><td>64.60</td><td>58.00</td></tr><tr><td>+ Similarities</td><td>75.60</td><td>60.80</td></tr><tr><td>+ Differences</td><td>63.40</td><td>65.40</td></tr><tr><td>+ CoCoT</td><td>77.00</td><td>77.80</td></tr></table>"
637
+ },
638
+ {
639
+ "type": "title",
640
+ "bbox": [
641
+ 0.51,
642
+ 0.535,
643
+ 0.657,
644
+ 0.551
645
+ ],
646
+ "angle": 0,
647
+ "content": "4.3. Ablation Study"
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.508,
653
+ 0.559,
654
+ 0.916,
655
+ 0.742
656
+ ],
657
+ "angle": 0,
658
+ "content": "CoCoT instructs LMMs to identify the similarities and differences across multiple image inputs first before providing an answer. In our ablation study, we break down the prompts into two distinct components: 1) a prompt that only requests the identification of similarities, and 2) a prompt that solely focuses on extracting the differences. As shown in Table. 3, we can observe that for GEMINI, the performance improves to some extent with the addition of either similarities or differences prompts alone, but not as much as when all prompts are included. For MMICL, adding only the differences prompts leads to a minimal decrease in performance, but the best results are achieved when both prompts are incorporated."
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.563,
664
+ 0.763,
665
+ 0.864,
666
+ 0.779
667
+ ],
668
+ "angle": 0,
669
+ "content": "5. DISCUSSIONS AND CONCLUSIONS"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.508,
675
+ 0.793,
676
+ 0.916,
677
+ 0.914
678
+ ],
679
+ "angle": 0,
680
+ "content": "In this study, we address the challenges faced by large multimodal models in processing detailed visual information from multiple images. We have developed the Contrastive Chain-of-Thought (CoCoT) approach, a novel prompting strategy that significantly enhances LMMs' ability to discern fine-grained details in multi-image tasks. Our experiments with various models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL, demonstrate that CoCoT im"
681
+ }
682
+ ],
683
+ [
684
+ {
685
+ "type": "text",
686
+ "bbox": [
687
+ 0.087,
688
+ 0.092,
689
+ 0.487,
690
+ 0.226
691
+ ],
692
+ "angle": 0,
693
+ "content": "proves performance in image-to-image matching and multi-image-to-text tasks. This study contributes to the field of Artificial General Intelligence (AGI), offering new possibilities in areas requiring precise image interpretation. However, CoCoT, due to its requirement for other images for comparison, might introduce unnecessary noise. Future research should focus on refining CoCoT for more complex scenarios and integrating it with other AI technologies to further advance multimodal understanding and AGI development."
694
+ },
695
+ {
696
+ "type": "title",
697
+ "bbox": [
698
+ 0.221,
699
+ 0.254,
700
+ 0.354,
701
+ 0.266
702
+ ],
703
+ "angle": 0,
704
+ "content": "6. REFERENCES"
705
+ },
706
+ {
707
+ "type": "ref_text",
708
+ "bbox": [
709
+ 0.096,
710
+ 0.282,
711
+ 0.487,
712
+ 0.357
713
+ ],
714
+ "angle": 0,
715
+ "content": "[1] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al., \"Llama: Open and efficient foundation language models,\" arXiv preprint arXiv:2302.13971, 2023."
716
+ },
717
+ {
718
+ "type": "ref_text",
719
+ "bbox": [
720
+ 0.096,
721
+ 0.36,
722
+ 0.487,
723
+ 0.449
724
+ ],
725
+ "angle": 0,
726
+ "content": "[2] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al., \"Training language models to follow instructions with human feedback,\" Advances in Neural Information Processing Systems, vol. 35, pp. 27730-27744, 2022."
727
+ },
728
+ {
729
+ "type": "ref_text",
730
+ "bbox": [
731
+ 0.096,
732
+ 0.452,
733
+ 0.487,
734
+ 0.511
735
+ ],
736
+ "angle": 0,
737
+ "content": "[3] Daoan Zhang, Weitong Zhang, Bing He, Jianguo Zhang, Chenchen Qin, and Jianhua Yao, \"Dnagpt: A generalized pretrained tool for multiple dna sequence analysis tasks,\" bioRxiv, pp. 2023-07, 2023."
738
+ },
739
+ {
740
+ "type": "ref_text",
741
+ "bbox": [
742
+ 0.096,
743
+ 0.513,
744
+ 0.487,
745
+ 0.557
746
+ ],
747
+ "angle": 0,
748
+ "content": "[4] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee, “Visual instruction tuning,” arXiv preprint arXiv:2304.08485, 2023."
749
+ },
750
+ {
751
+ "type": "ref_text",
752
+ "bbox": [
753
+ 0.096,
754
+ 0.56,
755
+ 0.487,
756
+ 0.62
757
+ ],
758
+ "angle": 0,
759
+ "content": "[5] Yunlong Tang, Jinrui Zhang, Xiangchen Wang, Teng Wang, and Feng Zheng, “Llmva-gebc: Large language model with video adapter for generic event boundary captioning,” arXiv preprint arXiv:2306.10354, 2023."
760
+ },
761
+ {
762
+ "type": "ref_text",
763
+ "bbox": [
764
+ 0.096,
765
+ 0.622,
766
+ 0.487,
767
+ 0.681
768
+ ],
769
+ "angle": 0,
770
+ "content": "[6] Yushi Hu, Hang Hua, Zhengyuan Yang, Weijia Shi, Noah A Smith, and Jiebo Luo, “Promptcap: Prompt-guided task-aware image captioning,” arXiv preprint arXiv:2211.09699, 2022."
771
+ },
772
+ {
773
+ "type": "ref_text",
774
+ "bbox": [
775
+ 0.096,
776
+ 0.684,
777
+ 0.487,
778
+ 0.743
779
+ ],
780
+ "angle": 0,
781
+ "content": "[7] Hang Hua, Xingjian Li, Dejing Dou, Cheng-Zhong Xu, and Jiebo Luo, “Fine-tuning pre-trained language models with noise stability regularization,” arXiv preprint arXiv:2206.05658, 2022."
782
+ },
783
+ {
784
+ "type": "ref_text",
785
+ "bbox": [
786
+ 0.096,
787
+ 0.746,
788
+ 0.487,
789
+ 0.819
790
+ ],
791
+ "angle": 0,
792
+ "content": "[8] Hanjia Lyu, Jina Huang, Daoan Zhang, Yongsheng Yu, Xinyi Mou, Jinsheng Pan, Zhengyuan Yang, Zhongyu Wei, and Jiebo Luo, “Gpt-4v (ision) as a social media analysis engine,” arXiv preprint arXiv:2311.07547, 2023."
793
+ },
794
+ {
795
+ "type": "ref_text",
796
+ "bbox": [
797
+ 0.096,
798
+ 0.822,
799
+ 0.487,
800
+ 0.883
801
+ ],
802
+ "angle": 0,
803
+ "content": "[9] Yongsheng Yu, Dawei Du, Libo Zhang, and Tiejian Luo, \"Unbiased multi-modality guidance for image inpainting,\" in European Conference on Computer Vision. Springer, 2022, pp. 668-684."
804
+ },
805
+ {
806
+ "type": "ref_text",
807
+ "bbox": [
808
+ 0.089,
809
+ 0.884,
810
+ 0.487,
811
+ 0.914
812
+ ],
813
+ "angle": 0,
814
+ "content": "[10] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang, \"Ddcot: Duty-distinct chain-of-thought"
815
+ },
816
+ {
817
+ "type": "list",
818
+ "bbox": [
819
+ 0.089,
820
+ 0.282,
821
+ 0.487,
822
+ 0.914
823
+ ],
824
+ "angle": 0,
825
+ "content": null
826
+ },
827
+ {
828
+ "type": "ref_text",
829
+ "bbox": [
830
+ 0.545,
831
+ 0.093,
832
+ 0.913,
833
+ 0.121
834
+ ],
835
+ "angle": 0,
836
+ "content": "prompting for multimodal reasoning in language models,\" arXiv preprint arXiv:2310.16436, 2023."
837
+ },
838
+ {
839
+ "type": "ref_text",
840
+ "bbox": [
841
+ 0.514,
842
+ 0.124,
843
+ 0.914,
844
+ 0.181
845
+ ],
846
+ "angle": 0,
847
+ "content": "[11] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig, \"Compositional chain-of-thought prompting for large multimodal models,\" arXiv preprint arXiv:2311.17076, 2023."
848
+ },
849
+ {
850
+ "type": "ref_text",
851
+ "bbox": [
852
+ 0.513,
853
+ 0.184,
854
+ 0.914,
855
+ 0.272
856
+ ],
857
+ "angle": 0,
858
+ "content": "[12] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, et al., “Open-flamingo: An open-source framework for training large autoregressive vision-language models,” arXiv preprint arXiv:2308.01390, 2023."
859
+ },
860
+ {
861
+ "type": "ref_text",
862
+ "bbox": [
863
+ 0.514,
864
+ 0.276,
865
+ 0.913,
866
+ 0.349
867
+ ],
868
+ "angle": 0,
869
+ "content": "[13] Haozhe Zhao, Zefan Cai, Shuzheng Si, Xiaojian Ma, Kaikai An, Liang Chen, Zixuan Liu, Sheng Wang, Wenjuan Han, and Baobao Chang, \"Mmicl: Empowering vision-language model with multi-modal in-context learning,\" arXiv preprint arXiv:2309.07915, 2023."
870
+ },
871
+ {
872
+ "type": "ref_text",
873
+ "bbox": [
874
+ 0.514,
875
+ 0.352,
876
+ 0.913,
877
+ 0.379
878
+ ],
879
+ "angle": 0,
880
+ "content": "[14] OpenAI, “GPT-4 technical report,” CoRR, vol. abs/2303.08774, 2023."
881
+ },
882
+ {
883
+ "type": "ref_text",
884
+ "bbox": [
885
+ 0.514,
886
+ 0.383,
887
+ 0.913,
888
+ 0.456
889
+ ],
890
+ "angle": 0,
891
+ "content": "[15] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al., \"Gemini: A family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023."
892
+ },
893
+ {
894
+ "type": "ref_text",
895
+ "bbox": [
896
+ 0.514,
897
+ 0.459,
898
+ 0.913,
899
+ 0.547
900
+ ],
901
+ "angle": 0,
902
+ "content": "[16] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross, “Winoground: Probing vision and language models for visio-linguistic compositionality,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 5238–5248."
903
+ },
904
+ {
905
+ "type": "ref_text",
906
+ "bbox": [
907
+ 0.514,
908
+ 0.55,
909
+ 0.913,
910
+ 0.623
911
+ ],
912
+ "angle": 0,
913
+ "content": "[17] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al., “Mmbench: Is your multi-modal model an all-around player?” arXiv preprint arXiv:2307.06281, 2023."
914
+ },
915
+ {
916
+ "type": "ref_text",
917
+ "bbox": [
918
+ 0.514,
919
+ 0.626,
920
+ 0.913,
921
+ 0.699
922
+ ],
923
+ "angle": 0,
924
+ "content": "[18] Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa, “Large language models are zero-shot reasoners,” Advances in neural information processing systems, vol. 35, pp. 22199-22213, 2022."
925
+ },
926
+ {
927
+ "type": "ref_text",
928
+ "bbox": [
929
+ 0.514,
930
+ 0.703,
931
+ 0.913,
932
+ 0.791
933
+ ],
934
+ "angle": 0,
935
+ "content": "[19] Renrui Zhang, Xiangfei Hu, Bohao Li, Siyuan Huang, Hanqiu Deng, Yu Qiao, Peng Gao, and Hongsheng Li, \"Prompt, generate, then cache: Cascade of foundation models makes strong few-shot learners,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 15211-15222."
936
+ },
937
+ {
938
+ "type": "ref_text",
939
+ "bbox": [
940
+ 0.514,
941
+ 0.793,
942
+ 0.913,
943
+ 0.867
944
+ ],
945
+ "angle": 0,
946
+ "content": "[20] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu, “Raven: A dataset for relational and analogical visual reasoning,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 5317-5327."
947
+ },
948
+ {
949
+ "type": "ref_text",
950
+ "bbox": [
951
+ 0.514,
952
+ 0.87,
953
+ 0.913,
954
+ 0.914
955
+ ],
956
+ "angle": 0,
957
+ "content": "[21] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al., \"Language"
958
+ },
959
+ {
960
+ "type": "list",
961
+ "bbox": [
962
+ 0.513,
963
+ 0.093,
964
+ 0.914,
965
+ 0.914
966
+ ],
967
+ "angle": 0,
968
+ "content": null
969
+ }
970
+ ],
971
+ [
972
+ {
973
+ "type": "ref_text",
974
+ "bbox": [
975
+ 0.12,
976
+ 0.092,
977
+ 0.488,
978
+ 0.121
979
+ ],
980
+ "angle": 0,
981
+ "content": "is not all you need: Aligning perception with language models,\" arXiv preprint arXiv:2302.14045, 2023."
982
+ },
983
+ {
984
+ "type": "ref_text",
985
+ "bbox": [
986
+ 0.088,
987
+ 0.123,
988
+ 0.49,
989
+ 0.197
990
+ ],
991
+ "angle": 0,
992
+ "content": "[22] S Suryavardan, Shreyash Mishra, Parth Patwa, Megha Chakraborty, Anku Rani, Aishwarya Reganti, Aman Chadha, Amitava Das, Amit Sheth, Manoj Chinnakotla, et al., \"Factify 2: A multimodal fake news and satire news dataset,\" arXiv preprint arXiv:2304.03897, 2023."
993
+ },
994
+ {
995
+ "type": "list",
996
+ "bbox": [
997
+ 0.088,
998
+ 0.092,
999
+ 0.49,
1000
+ 0.197
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": null
1004
+ }
1005
+ ],
1006
+ [
1007
+ {
1008
+ "type": "title",
1009
+ "bbox": [
1010
+ 0.177,
1011
+ 0.091,
1012
+ 0.398,
1013
+ 0.106
1014
+ ],
1015
+ "angle": 0,
1016
+ "content": "A. DATASET DESCRIPTION"
1017
+ },
1018
+ {
1019
+ "type": "title",
1020
+ "bbox": [
1021
+ 0.085,
1022
+ 0.12,
1023
+ 0.218,
1024
+ 0.135
1025
+ ],
1026
+ "angle": 0,
1027
+ "content": "A.1. Winoground"
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "bbox": [
1032
+ 0.087,
1033
+ 0.144,
1034
+ 0.49,
1035
+ 0.34
1036
+ ],
1037
+ "angle": 0,
1038
+ "content": "The Winoground [16] task involves matching images and captions which contains 400 groups of image-caption pairs. Each group contains two similar image-caption pairs. This task is challenging because the captions have the same words but in different sequences. LMMs must analyze both images and texts to identify subtle differences and understand the implied references. The Winoground is chosen to test if LMMs can comprehend fine-grained image information to text. Example questions are shown in Fig. 4. There are two tasks in the Winoground dataset: 1) given two images, the model is required to find out which image can match the given caption; 2) given two pieces of text, the model is required to find out which text can match the given image."
1039
+ },
1040
+ {
1041
+ "type": "title",
1042
+ "bbox": [
1043
+ 0.085,
1044
+ 0.36,
1045
+ 0.194,
1046
+ 0.373
1047
+ ],
1048
+ "angle": 0,
1049
+ "content": "A.2. Raven-50"
1050
+ },
1051
+ {
1052
+ "type": "text",
1053
+ "bbox": [
1054
+ 0.087,
1055
+ 0.384,
1056
+ 0.49,
1057
+ 0.565
1058
+ ],
1059
+ "angle": 0,
1060
+ "content": "The Raven-50 [20, 21] test is a common tool for assessing the nonverbal reasoning capabilities of LMMs. This test demands both visual acuity and logical reasoning to decipher the connections between images. In each scenario, participants are presented with either 3 or 8 images as inputs, alongside 6 potential answer images, each with a distinct solution. The goal is to correctly identify the appropriate image. Example questions are shown in Fig. 4. Note that the evaluation metric for OPENFLAMINGO and MMICL on Raven-50 dataset is to calculate the logits of the output for each image pair; while for GPT-4V and GEMINI, we directly let the model choose the correct result and calculate the accuracy."
1061
+ },
1062
+ {
1063
+ "type": "title",
1064
+ "bbox": [
1065
+ 0.085,
1066
+ 0.584,
1067
+ 0.194,
1068
+ 0.599
1069
+ ],
1070
+ "angle": 0,
1071
+ "content": "A.3. Facty-V"
1072
+ },
1073
+ {
1074
+ "type": "text",
1075
+ "bbox": [
1076
+ 0.087,
1077
+ 0.608,
1078
+ 0.49,
1079
+ 0.82
1080
+ ],
1081
+ "angle": 0,
1082
+ "content": "The Factify2 [22] dataset features 35,000 data pairs for training, and 7,500 pairs each for validation and testing. Every data pair includes a claim and a corresponding document, both of which are made up of an image, text, and OCR-generated text from the image. These pairs are categorized into one of five labels: \"support multimodal\", \"support text\", \"refute\", \"insufficient multimodal\", or \"insufficient text\". Specifically, we randomly sample 500 cases in the test set, 100 for each of the 5 categories. We only use the images in the dataset in our experiments where the labels are reorganized into \"support image\" and \"refute\". The generated subset is called Factify-V. Example questions are shown in Fig. 4. The task involves prompting the model to determine whether the pair of input images are contextually entailed."
1083
+ },
1084
+ {
1085
+ "type": "title",
1086
+ "bbox": [
1087
+ 0.094,
1088
+ 0.84,
1089
+ 0.48,
1090
+ 0.854
1091
+ ],
1092
+ "angle": 0,
1093
+ "content": "B. ADDITIONAL DISCUSSIONS OF MOTIVATION"
1094
+ },
1095
+ {
1096
+ "type": "text",
1097
+ "bbox": [
1098
+ 0.084,
1099
+ 0.869,
1100
+ 0.49,
1101
+ 0.915
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "As shown in Fig. 1, DDCoT, leaning towards a language perspective in handling images, first decouples the original question and image information into sub-questions. It then"
1105
+ },
1106
+ {
1107
+ "type": "text",
1108
+ "bbox": [
1109
+ 0.51,
1110
+ 0.092,
1111
+ 0.917,
1112
+ 0.302
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "prompts LMMs to answer these sub-questions, generating sub-answers, and finally, LMMs use these sub-questions and sub-answers to respond to the original question. CCoT, more image-oriented, initially directs LMMs to generate a Scene Graph (SG) based on image information. LMMs then use the SG's image information in conjunction with the user's question to find an answer. Given that the above methods are not effective in catching detailed information, we focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. To address this, CoCoT is designed to guide LMMs in identifying both the similarities and the nuanced differences between images, facilitating a more in-depth and accurate interpretation of visual content."
1116
+ },
1117
+ {
1118
+ "type": "title",
1119
+ "bbox": [
1120
+ 0.625,
1121
+ 0.322,
1122
+ 0.802,
1123
+ 0.337
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "C. MORE EXAMPLES"
1127
+ },
1128
+ {
1129
+ "type": "text",
1130
+ "bbox": [
1131
+ 0.51,
1132
+ 0.351,
1133
+ 0.915,
1134
+ 0.382
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "We further provide example responses of CoCoT based on GPT-4V on different datasets, as shown in Fig. 5 and Fig. 6."
1138
+ }
1139
+ ],
1140
+ [
1141
+ {
1142
+ "type": "image",
1143
+ "bbox": [
1144
+ 0.133,
1145
+ 0.352,
1146
+ 0.245,
1147
+ 0.482
1148
+ ],
1149
+ "angle": 0,
1150
+ "content": null
1151
+ },
1152
+ {
1153
+ "type": "image",
1154
+ "bbox": [
1155
+ 0.253,
1156
+ 0.352,
1157
+ 0.412,
1158
+ 0.44
1159
+ ],
1160
+ "angle": 0,
1161
+ "content": null
1162
+ },
1163
+ {
1164
+ "type": "image_footnote",
1165
+ "bbox": [
1166
+ 0.25,
1167
+ 0.448,
1168
+ 0.421,
1169
+ 0.477
1170
+ ],
1171
+ "angle": 0,
1172
+ "content": "Q: Which image match the caption: \"some plants surrounding a lightbulb\"? A: The first one."
1173
+ },
1174
+ {
1175
+ "type": "image_caption",
1176
+ "bbox": [
1177
+ 0.261,
1178
+ 0.486,
1179
+ 0.404,
1180
+ 0.499
1181
+ ],
1182
+ "angle": 0,
1183
+ "content": "Winoground Dataset"
1184
+ },
1185
+ {
1186
+ "type": "title",
1187
+ "bbox": [
1188
+ 0.565,
1189
+ 0.353,
1190
+ 0.687,
1191
+ 0.366
1192
+ ],
1193
+ "angle": 0,
1194
+ "content": "Raven-50 Dataset"
1195
+ },
1196
+ {
1197
+ "type": "image",
1198
+ "bbox": [
1199
+ 0.432,
1200
+ 0.374,
1201
+ 0.461,
1202
+ 0.397
1203
+ ],
1204
+ "angle": 0,
1205
+ "content": null
1206
+ },
1207
+ {
1208
+ "type": "image",
1209
+ "bbox": [
1210
+ 0.471,
1211
+ 0.371,
1212
+ 0.508,
1213
+ 0.4
1214
+ ],
1215
+ "angle": 0,
1216
+ "content": null
1217
+ },
1218
+ {
1219
+ "type": "image",
1220
+ "bbox": [
1221
+ 0.434,
1222
+ 0.409,
1223
+ 0.458,
1224
+ 0.429
1225
+ ],
1226
+ "angle": 0,
1227
+ "content": null
1228
+ },
1229
+ {
1230
+ "type": "image",
1231
+ "bbox": [
1232
+ 0.47,
1233
+ 0.402,
1234
+ 0.508,
1235
+ 0.432
1236
+ ],
1237
+ "angle": 0,
1238
+ "content": null
1239
+ },
1240
+ {
1241
+ "type": "image",
1242
+ "bbox": [
1243
+ 0.437,
1244
+ 0.441,
1245
+ 0.456,
1246
+ 0.459
1247
+ ],
1248
+ "angle": 0,
1249
+ "content": null
1250
+ },
1251
+ {
1252
+ "type": "image",
1253
+ "bbox": [
1254
+ 0.47,
1255
+ 0.435,
1256
+ 0.509,
1257
+ 0.465
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": null
1261
+ },
1262
+ {
1263
+ "type": "image",
1264
+ "bbox": [
1265
+ 0.52,
1266
+ 0.372,
1267
+ 0.558,
1268
+ 0.402
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": null
1272
+ },
1273
+ {
1274
+ "type": "image",
1275
+ "bbox": [
1276
+ 0.519,
1277
+ 0.404,
1278
+ 0.557,
1279
+ 0.432
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": null
1283
+ },
1284
+ {
1285
+ "type": "image",
1286
+ "bbox": [
1287
+ 0.527,
1288
+ 0.438,
1289
+ 0.549,
1290
+ 0.462
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": null
1294
+ },
1295
+ {
1296
+ "type": "image_footnote",
1297
+ "bbox": [
1298
+ 0.633,
1299
+ 0.374,
1300
+ 0.807,
1301
+ 0.386
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "Do you agree the following image is:"
1305
+ },
1306
+ {
1307
+ "type": "image",
1308
+ "bbox": [
1309
+ 0.591,
1310
+ 0.401,
1311
+ 0.615,
1312
+ 0.422
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": null
1316
+ },
1317
+ {
1318
+ "type": "image_caption",
1319
+ "bbox": [
1320
+ 0.593,
1321
+ 0.429,
1322
+ 0.61,
1323
+ 0.437
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "A"
1327
+ },
1328
+ {
1329
+ "type": "image",
1330
+ "bbox": [
1331
+ 0.632,
1332
+ 0.393,
1333
+ 0.671,
1334
+ 0.424
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": null
1338
+ },
1339
+ {
1340
+ "type": "image_caption",
1341
+ "bbox": [
1342
+ 0.633,
1343
+ 0.429,
1344
+ 0.658,
1345
+ 0.437
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "B"
1349
+ },
1350
+ {
1351
+ "type": "image",
1352
+ "bbox": [
1353
+ 0.683,
1354
+ 0.393,
1355
+ 0.721,
1356
+ 0.424
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": null
1360
+ },
1361
+ {
1362
+ "type": "image",
1363
+ "bbox": [
1364
+ 0.724,
1365
+ 0.393,
1366
+ 0.772,
1367
+ 0.424
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": null
1371
+ },
1372
+ {
1373
+ "type": "image_caption",
1374
+ "bbox": [
1375
+ 0.751,
1376
+ 0.429,
1377
+ 0.76,
1378
+ 0.437
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "D"
1382
+ },
1383
+ {
1384
+ "type": "image",
1385
+ "bbox": [
1386
+ 0.784,
1387
+ 0.393,
1388
+ 0.807,
1389
+ 0.424
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": null
1393
+ },
1394
+ {
1395
+ "type": "image_caption",
1396
+ "bbox": [
1397
+ 0.796,
1398
+ 0.429,
1399
+ 0.812,
1400
+ 0.437
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "E"
1404
+ },
1405
+ {
1406
+ "type": "image",
1407
+ "bbox": [
1408
+ 0.824,
1409
+ 0.392,
1410
+ 0.863,
1411
+ 0.424
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": null
1415
+ },
1416
+ {
1417
+ "type": "image_caption",
1418
+ "bbox": [
1419
+ 0.821,
1420
+ 0.429,
1421
+ 0.848,
1422
+ 0.437
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": "F"
1426
+ },
1427
+ {
1428
+ "type": "image_footnote",
1429
+ "bbox": [
1430
+ 0.58,
1431
+ 0.445,
1432
+ 0.619,
1433
+ 0.453
1434
+ ],
1435
+ "angle": 0,
1436
+ "content": "Correct?"
1437
+ },
1438
+ {
1439
+ "type": "image",
1440
+ "bbox": [
1441
+ 0.655,
1442
+ 0.445,
1443
+ 0.667,
1444
+ 0.453
1445
+ ],
1446
+ "angle": 0,
1447
+ "content": null
1448
+ },
1449
+ {
1450
+ "type": "image_footnote",
1451
+ "bbox": [
1452
+ 0.67,
1453
+ 0.445,
1454
+ 0.719,
1455
+ 0.453
1456
+ ],
1457
+ "angle": 0,
1458
+ "content": "Correct?"
1459
+ },
1460
+ {
1461
+ "type": "image",
1462
+ "bbox": [
1463
+ 0.731,
1464
+ 0.445,
1465
+ 0.747,
1466
+ 0.453
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": null
1470
+ },
1471
+ {
1472
+ "type": "image_footnote",
1473
+ "bbox": [
1474
+ 0.75,
1475
+ 0.445,
1476
+ 0.809,
1477
+ 0.453
1478
+ ],
1479
+ "angle": 0,
1480
+ "content": "ect? Correct"
1481
+ },
1482
+ {
1483
+ "type": "image_footnote",
1484
+ "bbox": [
1485
+ 0.819,
1486
+ 0.445,
1487
+ 0.859,
1488
+ 0.453
1489
+ ],
1490
+ "angle": 0,
1491
+ "content": "Correct?"
1492
+ },
1493
+ {
1494
+ "type": "title",
1495
+ "bbox": [
1496
+ 0.585,
1497
+ 0.488,
1498
+ 0.703,
1499
+ 0.501
1500
+ ],
1501
+ "angle": 0,
1502
+ "content": "Factify-V Dataset"
1503
+ },
1504
+ {
1505
+ "type": "text",
1506
+ "bbox": [
1507
+ 0.481,
1508
+ 0.503,
1509
+ 0.816,
1510
+ 0.517
1511
+ ],
1512
+ "angle": 0,
1513
+ "content": "Does the second image entail with the first image?"
1514
+ },
1515
+ {
1516
+ "type": "image",
1517
+ "bbox": [
1518
+ 0.149,
1519
+ 0.522,
1520
+ 0.498,
1521
+ 0.625
1522
+ ],
1523
+ "angle": 0,
1524
+ "content": null
1525
+ },
1526
+ {
1527
+ "type": "image",
1528
+ "bbox": [
1529
+ 0.503,
1530
+ 0.522,
1531
+ 0.849,
1532
+ 0.625
1533
+ ],
1534
+ "angle": 0,
1535
+ "content": null
1536
+ },
1537
+ {
1538
+ "type": "image_caption",
1539
+ "bbox": [
1540
+ 0.212,
1541
+ 0.659,
1542
+ 0.786,
1543
+ 0.674
1544
+ ],
1545
+ "angle": 0,
1546
+ "content": "Fig. 4. Sampled questions from the Raven-50, Factify-V, and Winoground datasets."
1547
+ }
1548
+ ],
1549
+ [
1550
+ {
1551
+ "type": "image",
1552
+ "bbox": [
1553
+ 0.248,
1554
+ 0.237,
1555
+ 0.344,
1556
+ 0.312
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": null
1560
+ },
1561
+ {
1562
+ "type": "image",
1563
+ "bbox": [
1564
+ 0.361,
1565
+ 0.253,
1566
+ 0.758,
1567
+ 0.301
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": null
1571
+ },
1572
+ {
1573
+ "type": "text",
1574
+ "bbox": [
1575
+ 0.204,
1576
+ 0.32,
1577
+ 0.782,
1578
+ 0.366
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "User: Please tell me the similarities and differences of these two images, base on the information in the first image, please select the missing shape from the second image and tell me which shape is your choice, counting from left to right, DO NOT use code."
1582
+ },
1583
+ {
1584
+ "type": "text",
1585
+ "bbox": [
1586
+ 0.204,
1587
+ 0.384,
1588
+ 0.784,
1589
+ 0.446
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": "GPT4-V: The two images you've provided seem to be part of a visual pattern recognition or IQ test, where the goal is to determine the rule that applies to the transformations of the shapes in the first image, and then apply that rule to select the missing shape from the options in the second image."
1593
+ },
1594
+ {
1595
+ "type": "text",
1596
+ "bbox": [
1597
+ 0.204,
1598
+ 0.461,
1599
+ 0.787,
1600
+ 0.567
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "In the first image, there are three shapes: the first is a diamond with a cross inside, the second is a diamond that is rotated 45 degrees without any cross, and the third is a square with a smaller square inside. In the second image, we have a sequence of four shapes with one missing (the third position is blank). The first shape is a square with both a cross and an X inside, the second is a diamond with a cross inside, the fourth is a square with a smaller square inside, and the fifth is a diamond with both a cross and an X inside."
1604
+ },
1605
+ {
1606
+ "type": "text",
1607
+ "bbox": [
1608
+ 0.204,
1609
+ 0.583,
1610
+ 0.784,
1611
+ 0.613
1612
+ ],
1613
+ "angle": 0,
1614
+ "content": "To find the similarities and differences, we'll look at patterns in the transformations of the shapes:"
1615
+ },
1616
+ {
1617
+ "type": "text",
1618
+ "bbox": [
1619
+ 0.205,
1620
+ 0.614,
1621
+ 0.586,
1622
+ 0.628
1623
+ ],
1624
+ "angle": 0,
1625
+ "content": "The shapes alternate between a square and a diamond."
1626
+ },
1627
+ {
1628
+ "type": "text",
1629
+ "bbox": [
1630
+ 0.205,
1631
+ 0.629,
1632
+ 0.714,
1633
+ 0.644
1634
+ ],
1635
+ "angle": 0,
1636
+ "content": "- The internal lines (cross and X) seem to alternate in presence and absence."
1637
+ },
1638
+ {
1639
+ "type": "list",
1640
+ "bbox": [
1641
+ 0.205,
1642
+ 0.614,
1643
+ 0.714,
1644
+ 0.644
1645
+ ],
1646
+ "angle": 0,
1647
+ "content": null
1648
+ },
1649
+ {
1650
+ "type": "text",
1651
+ "bbox": [
1652
+ 0.203,
1653
+ 0.66,
1654
+ 0.79,
1655
+ 0.766
1656
+ ],
1657
+ "angle": 0,
1658
+ "content": "Based on these observations from the first image, we can infer that the missing shape in the second image should be a shape that is a square since it follows a diamond and does not have any internal lines, as the pattern suggests an alternation in the presence of internal lines. Considering the sequence from the second image, the missing shape would then be the third shape and should be a square without any internal lines to maintain the pattern. Counting from left to right, the missing shape is the third one in the sequence."
1659
+ },
1660
+ {
1661
+ "type": "text",
1662
+ "bbox": [
1663
+ 0.203,
1664
+ 0.812,
1665
+ 0.794,
1666
+ 0.829
1667
+ ],
1668
+ "angle": 0,
1669
+ "content": "Fig. 5. An example response generated by GPT-4V via CoCoT on the Raven-50 dataset."
1670
+ }
1671
+ ],
1672
+ [
1673
+ {
1674
+ "type": "image",
1675
+ "bbox": [
1676
+ 0.283,
1677
+ 0.226,
1678
+ 0.472,
1679
+ 0.311
1680
+ ],
1681
+ "angle": 0,
1682
+ "content": null
1683
+ },
1684
+ {
1685
+ "type": "image",
1686
+ "bbox": [
1687
+ 0.509,
1688
+ 0.228,
1689
+ 0.701,
1690
+ 0.315
1691
+ ],
1692
+ "angle": 0,
1693
+ "content": null
1694
+ },
1695
+ {
1696
+ "type": "text",
1697
+ "bbox": [
1698
+ 0.192,
1699
+ 0.315,
1700
+ 0.766,
1701
+ 0.345
1702
+ ],
1703
+ "angle": 0,
1704
+ "content": "User: Please tell me the similarities and differences of these two images, and tell me whether these two images are entailed?"
1705
+ },
1706
+ {
1707
+ "type": "text",
1708
+ "bbox": [
1709
+ 0.192,
1710
+ 0.356,
1711
+ 0.787,
1712
+ 0.416
1713
+ ],
1714
+ "angle": 0,
1715
+ "content": "GPT4-V: The two images you've provided appear to be election-related graphics from major news networks, specifically ABC News and CNN, declaring Joe Biden as the projected winner of Colorado and New York, respectively, during the 2020 United States presidential election. Here are the similarities and differences:"
1716
+ },
1717
+ {
1718
+ "type": "title",
1719
+ "bbox": [
1720
+ 0.194,
1721
+ 0.433,
1722
+ 0.279,
1723
+ 0.445
1724
+ ],
1725
+ "angle": 0,
1726
+ "content": "Similarities:"
1727
+ },
1728
+ {
1729
+ "type": "text",
1730
+ "bbox": [
1731
+ 0.194,
1732
+ 0.448,
1733
+ 0.417,
1734
+ 0.46
1735
+ ],
1736
+ "angle": 0,
1737
+ "content": "1.Both images feature Joe Biden."
1738
+ },
1739
+ {
1740
+ "type": "text",
1741
+ "bbox": [
1742
+ 0.194,
1743
+ 0.463,
1744
+ 0.57,
1745
+ 0.476
1746
+ ],
1747
+ "angle": 0,
1748
+ "content": "2.Both declare Biden as the projected winner of a state."
1749
+ },
1750
+ {
1751
+ "type": "text",
1752
+ "bbox": [
1753
+ 0.194,
1754
+ 0.479,
1755
+ 0.784,
1756
+ 0.492
1757
+ ],
1758
+ "angle": 0,
1759
+ "content": "3.The graphics use a blue and white color scheme associated with the Democratic Party."
1760
+ },
1761
+ {
1762
+ "type": "text",
1763
+ "bbox": [
1764
+ 0.194,
1765
+ 0.494,
1766
+ 0.705,
1767
+ 0.507
1768
+ ],
1769
+ "angle": 0,
1770
+ "content": "4.Both images include the news network's logo and the year of the election."
1771
+ },
1772
+ {
1773
+ "type": "text",
1774
+ "bbox": [
1775
+ 0.194,
1776
+ 0.509,
1777
+ 0.663,
1778
+ 0.523
1779
+ ],
1780
+ "angle": 0,
1781
+ "content": "5.Both use a checkmark symbol to indicate the projection of a winner."
1782
+ },
1783
+ {
1784
+ "type": "list",
1785
+ "bbox": [
1786
+ 0.194,
1787
+ 0.448,
1788
+ 0.784,
1789
+ 0.523
1790
+ ],
1791
+ "angle": 0,
1792
+ "content": null
1793
+ },
1794
+ {
1795
+ "type": "title",
1796
+ "bbox": [
1797
+ 0.194,
1798
+ 0.539,
1799
+ 0.28,
1800
+ 0.551
1801
+ ],
1802
+ "angle": 0,
1803
+ "content": "Differences:"
1804
+ },
1805
+ {
1806
+ "type": "text",
1807
+ "bbox": [
1808
+ 0.194,
1809
+ 0.554,
1810
+ 0.78,
1811
+ 0.584
1812
+ ],
1813
+ "angle": 0,
1814
+ "content": "1. The images are from different news outlets, ABC News and CNN, as indicated by their logos."
1815
+ },
1816
+ {
1817
+ "type": "text",
1818
+ "bbox": [
1819
+ 0.194,
1820
+ 0.585,
1821
+ 0.79,
1822
+ 0.613
1823
+ ],
1824
+ "angle": 0,
1825
+ "content": "2. They declare Biden as the winner in different states, Colorado in the first and New York in the second."
1826
+ },
1827
+ {
1828
+ "type": "text",
1829
+ "bbox": [
1830
+ 0.194,
1831
+ 0.616,
1832
+ 0.781,
1833
+ 0.644
1834
+ ],
1835
+ "angle": 0,
1836
+ "content": "3.The images have different designs and layouts due to varying graphic standards of the two networks."
1837
+ },
1838
+ {
1839
+ "type": "text",
1840
+ "bbox": [
1841
+ 0.194,
1842
+ 0.646,
1843
+ 0.776,
1844
+ 0.674
1845
+ ],
1846
+ "angle": 0,
1847
+ "content": "4. The first image has a graphic element that looks like a voting ballot, while the second one does not."
1848
+ },
1849
+ {
1850
+ "type": "text",
1851
+ "bbox": [
1852
+ 0.194,
1853
+ 0.677,
1854
+ 0.572,
1855
+ 0.69
1856
+ ],
1857
+ "angle": 0,
1858
+ "content": "5.The text font and style differ between the two images."
1859
+ },
1860
+ {
1861
+ "type": "list",
1862
+ "bbox": [
1863
+ 0.194,
1864
+ 0.554,
1865
+ 0.79,
1866
+ 0.69
1867
+ ],
1868
+ "angle": 0,
1869
+ "content": null
1870
+ },
1871
+ {
1872
+ "type": "text",
1873
+ "bbox": [
1874
+ 0.192,
1875
+ 0.692,
1876
+ 0.799,
1877
+ 0.768
1878
+ ],
1879
+ "angle": 0,
1880
+ "content": "Regarding whether the second image entails the first one, the answer is no. The concept of entailment in this context would mean that the second image could logically imply the information in the first. Since the images are projections for different states, the second does not imply that Biden is the projected winner of Colorado, only that he is projected to win New York. They are separate statements about the outcomes in different states."
1881
+ },
1882
+ {
1883
+ "type": "image_caption",
1884
+ "bbox": [
1885
+ 0.2,
1886
+ 0.812,
1887
+ 0.799,
1888
+ 0.829
1889
+ ],
1890
+ "angle": 0,
1891
+ "content": "Fig. 6. An example response generated by GPT-4V via CoCoT on the Factify-V dataset."
1892
+ }
1893
+ ]
1894
+ ]
2401.02xxx/2401.02582/ce2dbff1-066b-4d9a-8f19-984a20153e1d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f0d900cd4223d74576f24361eb14fe170666125baf0e5e2b2eecea2d79810ff
3
+ size 3464626
2401.02xxx/2401.02582/full.md ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # COCOT: CONTRASTIVE CHAIN-OF-THOUGHT PROMPTING FOR LARGE MULTIMODAL MODELS WITH MULTIPLE IMAGE INPUTS
2
+
3
+ Daoan Zhang $^{1,\ast}$ , Junming Yang $^{2,\ast}$ , Hanjia Lyu $^{1,\ast}$ , Zijian Jin $^{3}$ , Yuan Yao $^{1}$ , Mingkai Chen $^{4}$ , Jiebo Luo $^{1}$
4
+
5
+ <sup>1</sup> University of Rochester, <sup>2</sup> Nanjing University of Posts and Telecommunications,
6
+
7
+ $^{3}$ New York University, $^{4}$ Stony Brook University,
8
+
9
+ * These authors contributed equally to this work.
10
+
11
+ # ABSTRACT
12
+
13
+ When exploring the development of Artificial General Intelligence (AGI), a critical task for these models involves interpreting and processing information from multiple image inputs. However, Large Multimodal Models (LMMs) encounter two issues in such scenarios: (1) a lack of fine-grained perception, and (2) a tendency to blend information across multiple images. We first extensively investigate the capability of LMMs to perceive fine-grained visual details when dealing with multiple input images. The research focuses on two aspects: first, image-to-image matching (to evaluate whether LMMs can effectively reason and pair relevant images), and second, multi-image-to-text matching (to assess whether LMMs can accurately capture and summarize detailed image information). We conduct evaluations on a range of both open-source and closed-source large models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL. To enhance model performance, we further develop a Contrastive Chain-of-Thought (CoCoT) prompting approach based on multi-input multimodal models. This method requires LMMs to compare the similarities and differences among multiple image inputs, and then guide the models to answer detailed questions about multi-image inputs based on the identified similarities and differences. Our experimental results showcase CoCoT's proficiency in enhancing the multi-image comprehension capabilities of large multimodal models.
14
+
15
+ Index Terms—Large Multimodal Model, Multimodal Prompting, Large Language Model
16
+
17
+ # 1. INTRODUCTION
18
+
19
+ Recent advancements in Large Language Models (LLMs) [1, 2, 3] have sparked optimism in the pursuit of Artificial General Intelligence (AGI). Given the pivotal role of vision in human information acquisition, its integration is crucial for AGI's perceptual capabilities. To bridge the gap between textual and visual modalities, researchers are experimenting with aligning language with vision [4, 5] and directly encoding visual inputs into discrete tokens [6, 7]. These efforts have demonstrated the substantial potential of large multimodal
20
+
21
+ models in processing multimodal content. However, they still fall short of human-like perception of the world [8, 9]. One significant challenge is the loss of image detail when using natural language, a medium less precise than visual data. Complex visual information, such as subtle lighting shifts or intricate patterns, often requires comprehensive verbal description. Another hurdle is understanding the relationship between multiple image inputs. Language-based descriptions of relationships and interactions within and across images can become challenging, necessitating explanations of both individual elements and their spatial and contextual ties. This complexity often results in ambiguous or overly extensive explanations, highlighting the limitations of current models in emulating human-like perception. To address these two issues, researchers have developed various multimodal prompting strategies [10, 11] in an attempt to guide LMMs in extracting essential information from the visual content for effective multi-image understanding. Although these methods have shown proficiency in comprehending single-image contexts, they encounter obstacles when it comes to discerning relationships between multiple images. This difficulty primarily stems from an insufficient focus on key information, which requires joint consideration of all images involved.
22
+
23
+ In our study, we introduce Contrastive Chain-of-Thought (CoCoT), a novel multimodal prompting strategy designed to overcome the limitations of existing prompting strategies and enhance LMMs' performance in multi-image tasks. CoCoT prompts LMMs to discern and articulate the similarities and differences among various inputs, laying the groundwork for answering detailed, multi-image-based questions (Fig. 1). This method sharpens the models' focus, particularly on the distinctions between inputs, ensuring comprehensive capture of nuanced, question-relevant information during summarization. We rigorously evaluate CoCoT in two multi-image tasks. The first task, image-to-image matching, assesses the LMMs' competence in logically associating and pairing relevant images. The second task, image-to-text matching, tests the LMMs' ability to accurately capture and summarize detailed image data. Our evaluations span both widely-used open-source models and leading-edge closed-source models.
24
+
25
+ ![](images/41f476dff395b09b957c09628c1bbbe733413d75719c1818c7f7ef1f02813621.jpg)
26
+ Fig. 1. Comparison between different multimodal prompting strategies. The unique components in each prompting strategy's corresponding response are highlighted in varied colors. Note that GPT-4V is used in this example.
27
+
28
+ To summarize, our main contributions are:
29
+
30
+ - We find that most current models do not perform well in fine-grained multi-image tasks.
31
+ - To address the issues with existing methods, we propose a novel Contrastive Chain-of-Thought (CoCoT) prompting strategy to enhance models' understanding of the relationships between multiple image inputs.
32
+ - Our proposed method produces significant improvement for both open-source and closed-source models.
33
+
34
+ # 2. RELATED WORK
35
+
36
+ Large Multimodal Models. Inspired by the advancements of LLMs (e.g., LLAMA [1]), LMMs offer a promising way towards AGI with multimodal information. These models blend the textual reasoning prowess of LLMs with the image and video comprehension of Vision-and-Language models. This fusion enables LMMs to handle complex tasks requiring both a profound understanding and expressive generation across various modalities. Several open-source LMMs like LLAVA [4] have emerged, demonstrating competence in tasks such as image captioning and visual question-answering. However, their architectural limitations restrict their understanding and reasoning to a single image. Conversely, models like OPENFLAMINGO [12], and MMICL [13] employ specialized architectures enabling the processing of multiple image features, which better mirrors
37
+
38
+ real-world scenarios. Closed-source LMMs such as GPT-4V [14] and GEMINI [15] go beyond basic object descriptions to capture the scene's context [11], emotions [13], and relationships [16]. A common technique to enhance performance is fine-tuning, but applying similar methods to LMMs presents computation challenges [17]. To overcome this, we propose a novel approach to directly enable detailed analysis and reasoning on images without additional training data.
39
+
40
+ Multimodal Prompting Methods. Within the domain of LLMs, several language prompt methods have been established to enhance inference capabilities and ensure accurate results during prediction. These include zero-shot [18], few-shot [19], and Chain-of-Thought (CoT) [10, 11] approaches. Recently, research has begun exploring the application of prompting techniques in the multimodal domain to improve the comprehension and reasoning abilities of LMMs for image data. Current multimodal prompts employed in LMMs often exhibit limitations in capturing the intricate interrelationships between visual and language information, particularly when faced with multi-image inputs. As shown in the example in Fig. 1, they are not able to identify the critical action of the boy throwing the ball. To overcome this challenge, we propose a novel prompting method that directs LMMs to extract and analyze essential information, requiring a holistic consideration of all the input images.
41
+
42
+ ![](images/6f65d9f25776d137f57f0cc0861c039b515d4915da044a73807dad7b532ab52c.jpg)
43
+ Fig. 2. Different CoT-based methods and their performance in extracting information from images under various conditions, with GPT-4V being used in the experiments. Left: Utilizing CCoT to generate image information; Middle: CoCoT prompting between images with a big domain gap; Right: CoCoT prompting between images with a small domain gap.
44
+
45
+ # 3. CONTRASTIVE CHAIN-OF-THOUGHT
46
+
47
+ # 3.1. Motivation Analysis
48
+
49
+ Traditional CoT-based prompting methods for LMMs can be categorized into two types. The first type is based on text understanding, such as DDCoT (i.e., Duty-Distinct Chain-of-Thought) [10], which decomposes a question into subquestions for a step-by-step response. The second type is based on image understanding, like CCoT (i.e., Compositional Chain-of-Thought) [11], which generates a scene graph of the image to provide answers. However, while processing images, the text-based CoT does not enable LMMs to directly acquire and comprehend the detailed information in images. As shown in Fig. 1, DDCoT does not enable the LMM to recognize that the kid in the second image is not throwing a basketball. The image-based CCoT merely extracts basic information about the main objects in the image, also overlooking significant details. As shown in Fig. 1, CCoT generates a series of scene graphs unrelated to the question. Existing CoT-based prompting methods struggle to notice the details when answering questions about images rich in detail. Therefore, an effective prompting method should enable LMMs to discern and understand the details in images, and subsequently answer questions based on this understanding.
50
+
51
+ # 3.2. Methodology
52
+
53
+ We focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. Initially, we examine the extent to which LMMs based on CCoT can extract information from images, as illustrated in Fig. 2. GPT-4V, utilizing CCoT, is limited to identifying entities, their characteristics, and straightforward details like events and relationships between entities. Drawing inspiration from contrastive learning, our approach encourages LMMs to discern similarities and differences within images. We discover that these models are capable of engaging with more complex information, such as reasoning, even when there is a considerable difference in the domain between the images being compared and the original. For instance, they might deduce that an image's scene likely follows a storm and recognize a negative emotional tone in it. When comparing similar images, focusing on the similarities and differences of images effectively highlights the contrasts, such as recognizing more severe damage in one image compared to another, or differentiating the causes of car damage between two images, thereby effectively facilitating causal reasoning. Consequently, we develop the Contrastive Chain-of-Thought prompting. As shown in Fig. 1, this approach, similarly starting from an image perspective, initially compares the similarities and differences between various image inputs. It then directs LMMs to answer questions based on the insights gathered from such comparisons.
54
+
55
+ # 4. EXPERIMENTS AND RESULTS
56
+
57
+ # 4.1. Experiment Setup
58
+
59
+ Datasets. We evaluate the effectiveness of CoCoT on two fine-grained multi-image tasks: (1) image-to-image matching and (2) multi-image-to-text matching. Both tasks are well-suited for assessing whether the CoT-based method enables LMMs to acquire more fine-grained information from multiple image inputs. The image-to-image matching task employs the Raven-50 [20, 21] and Factify2 [22] datasets. This task tests the models' ability to identify and interpret visual details, requiring them to determine the degree of match between different images. For the multi-image-to-text matching task, we use Winoground [16]. This task requires LMMs to effectively pair similar images with their corresponding textual descriptions, or alternatively, to align similar texts with the corresponding images. Details including dataset statistics and preprocessing specifics are discussed in Appendix A.
60
+
61
+ Baselines. We compare CoCoT prompting to two state-of-the-art methods in CoT-based multimodal prompting. This includes DDCoT [10] and CCoT [11]. Additionally, we benchmark CoCoT against the standard prompting baseline, which does not incorporate any CoT instructions. Note that all the experiments are conducted under the zero-shot setting. Example prompts and answers can be found in Fig. 1.
62
+
63
+ Language Models. We evaluate different prompting strategies on two open-source LMMs: OPENFLAMINGO [12] and MMICL [13], as well as two proprietary models including GPT-4V [14] and GEMINI [15]. Due to API restrictions of GPT4-V, we only evaluate the standard and CoCoT prompting for it. For the setting of generation, we use the default configuration for each model. We use beam search with beam width of 3 for OPENFLAMINGO. In the case of MMICL, the beam width is set to 8. For GEMINI, we opt for the API of Gemini Pro Vision under the default settings which include a temperature of 0.4, TopK set to 32, TopP at 1, and a maximum length of 4,096. For GPT-4V, we use the default settings of the web version as of December 30, 2023.
64
+
65
+ # 4.2. Main Results
66
+
67
+ # 4.2.1. Image-to-image Matching
68
+
69
+ The task of image-to-image matching requires the model to extract information from two images simultaneously and then determine under a prompt whether the information from both images matches, as exemplified in Fig. 3. LLMs are expected to select the correct answer from the given choices. In addition to the aforementioned methods, we include another random choice baseline for comparative reference. Accuracy of LMMs with different prompting methods is shown in Table 1.
70
+
71
+ CoCoT significantly improves LMMs' performance in the image-to-image matching task. Most models show improved performance when DDCoT and CCoT are employed,
72
+
73
+ ![](images/e8949dcb1f9ae7f977c139746e6958aef8c7c362b24d2d439501c5a23b644a83.jpg)
74
+ Fig. 3. An example question from the image-to-image matching task, sourced from the Raven-50 [20, 21] dataset.
75
+
76
+ Table 1. Accuracy of LMMs employing different prompting strategies in the image-to-image matching task. The best performance within each LMM is highlighted in bold.
77
+
78
+ <table><tr><td></td><td>Raven-50</td><td>Factify-V</td></tr><tr><td>Random Choice</td><td>17.00</td><td>50.00</td></tr><tr><td>OPENFLAMINGO</td><td>24.00</td><td>54.00</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>24.00</td><td>58.40</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>24.00</td><td>63.20</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>26.00</td><td>65.00</td></tr><tr><td>MMICL</td><td>22.00</td><td>64.60</td></tr><tr><td>MMICL + DDCoT</td><td>10.00</td><td>68.40</td></tr><tr><td>MMICL + CCoT</td><td>26.00</td><td>73.20</td></tr><tr><td>MMICL + CoCoT</td><td>26.00</td><td>77.00</td></tr><tr><td>GEMINI</td><td>18.00</td><td>58.00</td></tr><tr><td>GEMINI + DDCoT</td><td>12.00</td><td>65.40</td></tr><tr><td>GEMINI + CCoT</td><td>20.00</td><td>80.20</td></tr><tr><td>GEMINI + CoCoT</td><td>22.00</td><td>77.80</td></tr><tr><td>GPT-4V</td><td>30.00</td><td>74.00</td></tr><tr><td>GPT-4V + CoCoT</td><td>45.00</td><td>80.60</td></tr></table>
79
+
80
+ but the extent of improvement is not as significant as with CoCoT in most cases. Furthermore, regarding the Raven-50 dataset, which comprises non-natural images made up of various shapes, surprisingly, GEMINI emerges as the model with the poorest performance in our evaluations when GPT-4V performs the best which surpasses all models, including the open-source ones like OPENFLAMINGO and MMICL.
81
+
82
+ For the Factify-V dataset featuring natural images, GEMINI without CoT outperforms OPENFLAMINGO in similar conditions. However, when CoT is incorporated, GEMINI's performance is almost on par with that of GPT4-V under similar conditions. This outcome differs from the results on the Raven-50 dataset, suggesting that GEMINI inherently possesses the capability to extract detailed information from natural images. Its full potential in this aspect is not fully demonstrated without the use of prompts.
83
+
84
+ In summary, our analysis of the image-to-image matching task reveals a consistent enhancement in performance across most models upon integrating various types of CoT-based prompting. This improvement underscores the ability of the visual components within LMMs to concentrate on details in terms of the task at hand. These details are subsequently processed by the LMMs for in-depth analysis, following the CoT-based prompting approach. Notably, in a majority of cases, CoCoT prompting elicits LMMs to achieve state-of-the-art performance on both natural and artificial datasets, surpass
85
+
86
+ ing other CoT-based strategies. This showcases the efficacy of CoCoT in guiding LMMs to accurately extract and analyze task-relevant information from images, facilitating enhanced comparative and analytical reasoning within these models.
87
+
88
+ # 4.2.2. Multi-image-to-text Matching
89
+
90
+ Compared to the image-to-image matching task, the multi-image-to-text matching task requires models to precisely extract information from images and match it with text. An exemplar question can be found in Fig. 1. In particular, Winoground dataset is used for this task. Performance on Winoground (shown in Table 2) is assessed using three distinct metrics, each examining a different facet of the models' abilities to reason with both vision and language. The first metric, known as the text score, evaluates the model's capability to accurately choose the right caption when provided with an image. The second metric is the image score, assessing a model's ability to correctly identify the appropriate image when presented with a caption. The last metric is a composite score that integrates the first two metrics. In this group score, a case is considered correct if the model successfully achieves both the accurate text score and image score.
91
+
92
+ CoCoT boosts LMMs' performance in the multi-image-to-text matching task, achieving substantial gains. It outperforms other CoT-based methods in the majority of scenarios. This indicates that when comparing the similarities and differences of images, LMMs can better match with the text by identifying subtle differences in the input image pairs. The example in Fig. 1 also shows that methods like DDCoT and CCoT may miss key information, possibly as a result of misdirected focus.
93
+
94
+ GEMINI's performance is still the worst, indicating that although GEMINI's visual encoder can extract detailed information from the image, the model is not able to effectively summarize the information in the image, resulting in a poor match with the text. GPT-4V's performance on this task is also inferior to MMICL, indicating that GPT-4V also struggles to effectively summarize detailed information within images, particularly when the input image pairs are very similar.
95
+
96
+ All current models exhibit a significant gap compared to human performance. The reasons for this situation could include several possibilities: 1) The visual encoder's capabilities are insufficient; 2) Large models are unable to extract detailed information from the visual encoder, thereby failing in reasoning; and 3) Large models themselves are inadequate, leading to incorrect reasoning. Based on current experimental results, the visual encoder of LMMs is actually capable of recognizing some detailed information. However, due to the differences in the latent spaces between the visual encoder and the large language models, as well as the generalization issues of LLMs, LMMs are unable to fully understand images, resulting in mediocre performance across various tasks.
97
+
98
+ Table 2. Accuracy of LMMs employing different prompting strategies in the multi-image-to-text matching task. The best performance within each LMM is highlighted in bold.
99
+
100
+ <table><tr><td></td><td>Text</td><td>Image</td><td>Group</td></tr><tr><td>MTurk Human</td><td>89.50</td><td>88.50</td><td>85.50</td></tr><tr><td>Random Choice</td><td>25.00</td><td>25.00</td><td>16.67</td></tr><tr><td>OPENFLAMINGO</td><td>39.00</td><td>41.25</td><td>33.25</td></tr><tr><td>OPENFLAMINGO + DDCoT</td><td>47.50</td><td>47.25</td><td>39.00</td></tr><tr><td>OPENFLAMINGO + CCoT</td><td>42.50</td><td>27.50</td><td>20.00</td></tr><tr><td>OPENFLAMINGO + CoCoT</td><td>58.25</td><td>55.25</td><td>41.50</td></tr><tr><td>MMICL</td><td>46.50</td><td>40.75</td><td>37.75</td></tr><tr><td>MMICL + DDCoT</td><td>46.75</td><td>45.00</td><td>36.75</td></tr><tr><td>MMICL + CCoT</td><td>51.00</td><td>48.00</td><td>47.50</td></tr><tr><td>MMICL + CoCoT</td><td>64.25</td><td>52.50</td><td>50.75</td></tr><tr><td>GEMINI</td><td>30.75</td><td>26.00</td><td>25.00</td></tr><tr><td>GEMINI + DDCoT</td><td>45.00</td><td>25.00</td><td>23.75</td></tr><tr><td>GEMINI + CCoT</td><td>22.50</td><td>33.00</td><td>20.75</td></tr><tr><td>GEMINI + CoCoT</td><td>40.00</td><td>32.50</td><td>27.75</td></tr><tr><td>GPT-4V</td><td>54.50</td><td>42.50</td><td>37.75</td></tr><tr><td>GPT-4V + CoCoT</td><td>58.50</td><td>49.50</td><td>44.50</td></tr></table>
101
+
102
+ Table 3. Ablation study of the similarities and differences varaints of CoCoT on the Factify-V dataset.
103
+
104
+ <table><tr><td></td><td>MMICL</td><td>GEMINI</td></tr><tr><td>No prompt</td><td>64.60</td><td>58.00</td></tr><tr><td>+ Similarities</td><td>75.60</td><td>60.80</td></tr><tr><td>+ Differences</td><td>63.40</td><td>65.40</td></tr><tr><td>+ CoCoT</td><td>77.00</td><td>77.80</td></tr></table>
105
+
106
+ # 4.3. Ablation Study
107
+
108
+ CoCoT instructs LMMs to identify the similarities and differences across multiple image inputs first before providing an answer. In our ablation study, we break down the prompts into two distinct components: 1) a prompt that only requests the identification of similarities, and 2) a prompt that solely focuses on extracting the differences. As shown in Table. 3, we can observe that for GEMINI, the performance improves to some extent with the addition of either similarities or differences prompts alone, but not as much as when all prompts are included. For MMICL, adding only the differences prompts leads to a minimal decrease in performance, but the best results are achieved when both prompts are incorporated.
109
+
110
+ # 5. DISCUSSIONS AND CONCLUSIONS
111
+
112
+ In this study, we address the challenges faced by large multimodal models in processing detailed visual information from multiple images. We have developed the Contrastive Chain-of-Thought (CoCoT) approach, a novel prompting strategy that significantly enhances LMMs' ability to discern fine-grained details in multi-image tasks. Our experiments with various models, including GPT-4V, GEMINI, OPENFLAMINGO, and MMICL, demonstrate that CoCoT im
113
+
114
+ proves performance in image-to-image matching and multi-image-to-text tasks. This study contributes to the field of Artificial General Intelligence (AGI), offering new possibilities in areas requiring precise image interpretation. However, CoCoT, due to its requirement for other images for comparison, might introduce unnecessary noise. Future research should focus on refining CoCoT for more complex scenarios and integrating it with other AI technologies to further advance multimodal understanding and AGI development.
115
+
116
+ # 6. REFERENCES
117
+
118
+ [1] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al., "Llama: Open and efficient foundation language models," arXiv preprint arXiv:2302.13971, 2023.
119
+ [2] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al., "Training language models to follow instructions with human feedback," Advances in Neural Information Processing Systems, vol. 35, pp. 27730-27744, 2022.
120
+ [3] Daoan Zhang, Weitong Zhang, Bing He, Jianguo Zhang, Chenchen Qin, and Jianhua Yao, "Dnagpt: A generalized pretrained tool for multiple dna sequence analysis tasks," bioRxiv, pp. 2023-07, 2023.
121
+ [4] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee, “Visual instruction tuning,” arXiv preprint arXiv:2304.08485, 2023.
122
+ [5] Yunlong Tang, Jinrui Zhang, Xiangchen Wang, Teng Wang, and Feng Zheng, “Llmva-gebc: Large language model with video adapter for generic event boundary captioning,” arXiv preprint arXiv:2306.10354, 2023.
123
+ [6] Yushi Hu, Hang Hua, Zhengyuan Yang, Weijia Shi, Noah A Smith, and Jiebo Luo, “Promptcap: Prompt-guided task-aware image captioning,” arXiv preprint arXiv:2211.09699, 2022.
124
+ [7] Hang Hua, Xingjian Li, Dejing Dou, Cheng-Zhong Xu, and Jiebo Luo, “Fine-tuning pre-trained language models with noise stability regularization,” arXiv preprint arXiv:2206.05658, 2022.
125
+ [8] Hanjia Lyu, Jina Huang, Daoan Zhang, Yongsheng Yu, Xinyi Mou, Jinsheng Pan, Zhengyuan Yang, Zhongyu Wei, and Jiebo Luo, “Gpt-4v (ision) as a social media analysis engine,” arXiv preprint arXiv:2311.07547, 2023.
126
+ [9] Yongsheng Yu, Dawei Du, Libo Zhang, and Tiejian Luo, "Unbiased multi-modality guidance for image inpainting," in European Conference on Computer Vision. Springer, 2022, pp. 668-684.
127
+ [10] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang, "Ddcot: Duty-distinct chain-of-thought
128
+
129
+ prompting for multimodal reasoning in language models," arXiv preprint arXiv:2310.16436, 2023.
130
+ [11] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig, "Compositional chain-of-thought prompting for large multimodal models," arXiv preprint arXiv:2311.17076, 2023.
131
+ [12] Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, et al., “Open-flamingo: An open-source framework for training large autoregressive vision-language models,” arXiv preprint arXiv:2308.01390, 2023.
132
+ [13] Haozhe Zhao, Zefan Cai, Shuzheng Si, Xiaojian Ma, Kaikai An, Liang Chen, Zixuan Liu, Sheng Wang, Wenjuan Han, and Baobao Chang, "Mmicl: Empowering vision-language model with multi-modal in-context learning," arXiv preprint arXiv:2309.07915, 2023.
133
+ [14] OpenAI, “GPT-4 technical report,” CoRR, vol. abs/2303.08774, 2023.
134
+ [15] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al., "Gemini: A family of highly capable multimodal models," arXiv preprint arXiv:2312.11805, 2023.
135
+ [16] Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross, “Winoground: Probing vision and language models for visio-linguistic compositionality,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 5238–5248.
136
+ [17] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al., “Mmbench: Is your multi-modal model an all-around player?” arXiv preprint arXiv:2307.06281, 2023.
137
+ [18] Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa, “Large language models are zero-shot reasoners,” Advances in neural information processing systems, vol. 35, pp. 22199-22213, 2022.
138
+ [19] Renrui Zhang, Xiangfei Hu, Bohao Li, Siyuan Huang, Hanqiu Deng, Yu Qiao, Peng Gao, and Hongsheng Li, "Prompt, generate, then cache: Cascade of foundation models makes strong few-shot learners," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 15211-15222.
139
+ [20] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu, “Raven: A dataset for relational and analogical visual reasoning,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 5317-5327.
140
+ [21] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al., "Language
141
+
142
+ is not all you need: Aligning perception with language models," arXiv preprint arXiv:2302.14045, 2023.
143
+ [22] S Suryavardan, Shreyash Mishra, Parth Patwa, Megha Chakraborty, Anku Rani, Aishwarya Reganti, Aman Chadha, Amitava Das, Amit Sheth, Manoj Chinnakotla, et al., "Factify 2: A multimodal fake news and satire news dataset," arXiv preprint arXiv:2304.03897, 2023.
144
+
145
+ # A. DATASET DESCRIPTION
146
+
147
+ # A.1. Winoground
148
+
149
+ The Winoground [16] task involves matching images and captions which contains 400 groups of image-caption pairs. Each group contains two similar image-caption pairs. This task is challenging because the captions have the same words but in different sequences. LMMs must analyze both images and texts to identify subtle differences and understand the implied references. The Winoground is chosen to test if LMMs can comprehend fine-grained image information to text. Example questions are shown in Fig. 4. There are two tasks in the Winoground dataset: 1) given two images, the model is required to find out which image can match the given caption; 2) given two pieces of text, the model is required to find out which text can match the given image.
150
+
151
+ # A.2. Raven-50
152
+
153
+ The Raven-50 [20, 21] test is a common tool for assessing the nonverbal reasoning capabilities of LMMs. This test demands both visual acuity and logical reasoning to decipher the connections between images. In each scenario, participants are presented with either 3 or 8 images as inputs, alongside 6 potential answer images, each with a distinct solution. The goal is to correctly identify the appropriate image. Example questions are shown in Fig. 4. Note that the evaluation metric for OPENFLAMINGO and MMICL on Raven-50 dataset is to calculate the logits of the output for each image pair; while for GPT-4V and GEMINI, we directly let the model choose the correct result and calculate the accuracy.
154
+
155
+ # A.3. Facty-V
156
+
157
+ The Factify2 [22] dataset features 35,000 data pairs for training, and 7,500 pairs each for validation and testing. Every data pair includes a claim and a corresponding document, both of which are made up of an image, text, and OCR-generated text from the image. These pairs are categorized into one of five labels: "support multimodal", "support text", "refute", "insufficient multimodal", or "insufficient text". Specifically, we randomly sample 500 cases in the test set, 100 for each of the 5 categories. We only use the images in the dataset in our experiments where the labels are reorganized into "support image" and "refute". The generated subset is called Factify-V. Example questions are shown in Fig. 4. The task involves prompting the model to determine whether the pair of input images are contextually entailed.
158
+
159
+ # B. ADDITIONAL DISCUSSIONS OF MOTIVATION
160
+
161
+ As shown in Fig. 1, DDCoT, leaning towards a language perspective in handling images, first decouples the original question and image information into sub-questions. It then
162
+
163
+ prompts LMMs to answer these sub-questions, generating sub-answers, and finally, LMMs use these sub-questions and sub-answers to respond to the original question. CCoT, more image-oriented, initially directs LMMs to generate a Scene Graph (SG) based on image information. LMMs then use the SG's image information in conjunction with the user's question to find an answer. Given that the above methods are not effective in catching detailed information, we focus on how to enable LMMs to extract more detailed information from images, especially when the images are very similar. To address this, CoCoT is designed to guide LMMs in identifying both the similarities and the nuanced differences between images, facilitating a more in-depth and accurate interpretation of visual content.
164
+
165
+ # C. MORE EXAMPLES
166
+
167
+ We further provide example responses of CoCoT based on GPT-4V on different datasets, as shown in Fig. 5 and Fig. 6.
168
+
169
+ Winoground Dataset
170
+ ![](images/9819ad3d613e673cea754bdc3a62ec6e694819972d980e360293799f42488bf9.jpg)
171
+ Q: Which image match the caption: "some plants surrounding a lightbulb"? A: The first one.
172
+
173
+ ![](images/2e989e8ffbb08e3fc016ff0696e903fc12abddf1e0893e614b81919e58c54183.jpg)
174
+
175
+ # Raven-50 Dataset
176
+
177
+ ![](images/ff4748cebed9a1ffc52e1d19d97c0009842a729fcf94d52fbea788b32064edf3.jpg)
178
+
179
+ ![](images/7802c79e0891b880b288a4cd347f8bbcccfbdca788e0a35dcd1067dcc9fd8fce.jpg)
180
+
181
+ ![](images/d870364626723eab7f2f855664dd708a3bd1582e5df3bfc3fa549d399bd1f0de.jpg)
182
+
183
+ ![](images/e0a0ddbfc9a6d2b7e20b47ffa0124292ff869d680a4bbdd3e3acc5f7befa47f9.jpg)
184
+ Correct?
185
+
186
+ ![](images/b98561e4fd4ca592fd66b5c5cde62f5fd40f167f0360804482f176f50c86086d.jpg)
187
+
188
+ ![](images/ac865b1ff40bf5c1cd77edf5216e580bc68f5e2def169e7d9709e19e9f45961e.jpg)
189
+
190
+ ![](images/7845c56ab0a99ae46f753a1797b6855d3fbcb20360a5ffd2e55a45ec84bc4624.jpg)
191
+
192
+ ![](images/c30bf4084612694f7d4170775c882d7c6a26db7038c410a68d700ebd1b9fb3bc.jpg)
193
+
194
+ ![](images/56ed5790a1e6b07141999021cbedf8b407856ba3da958540cb81dfe9159eadc0.jpg)
195
+
196
+ A
197
+ ![](images/a9b5cc7754fae19ae6b6e5d40efa24b8fadddcb47a2bc5635e5d63f6b826e2f2.jpg)
198
+ Do you agree the following image is:
199
+
200
+ ![](images/9d4a18fe50cdd0bf3d777e6377be5c5db6f61db2099a2d67e1b573db649c49c1.jpg)
201
+ B
202
+
203
+ ![](images/4550a5969e563eb3f99d300ac0cfd9eb960b216f0d5a2087c38f34b20d4a07ee.jpg)
204
+
205
+ ![](images/c514ca84bfc42059e5e23bfc6758e67fd6a988c1d131eabfe54e38a2bcecaa64.jpg)
206
+ D
207
+
208
+ ![](images/0435442be1fd1f8551bbe0af344b306a3f00fcd7487a8f1d10aa94fc02913a5a.jpg)
209
+ E
210
+
211
+ F
212
+ ![](images/3c718387264e80cedd5b30954a9e5ae994965691622f55eabe9f0dca4b6c69bd.jpg)
213
+ Correct?
214
+
215
+ ![](images/3dfbc2ede994ae594690ca411336b720a1e030e5a21f9a5864720d33b660a7f3.jpg)
216
+ Correct?
217
+
218
+ ![](images/f754fd6e85932b99fc6d252c28f25e1fbd39fc738edf71d8d3ae1eaccd95d004.jpg)
219
+ ect? Correct
220
+
221
+ # Factify-V Dataset
222
+
223
+ Does the second image entail with the first image?
224
+
225
+ ![](images/73ba1bf357e9a70abac01cb280dcad7e3e37bc6a1e87f53870b67a00ce1130b3.jpg)
226
+ Fig. 4. Sampled questions from the Raven-50, Factify-V, and Winoground datasets.
227
+
228
+ ![](images/4cc3d0345db176004b99dfbc058e795d63597b749c68f7aba7e917a131a62857.jpg)
229
+
230
+ ![](images/5aab48c1442e1122dfa016732a6b6626c0da2df58eff150365507a3ca4185cda.jpg)
231
+
232
+ ![](images/8404064298ed89fc6629b4d0063d343d9e333f4657982b032becd134d06befab.jpg)
233
+
234
+ User: Please tell me the similarities and differences of these two images, base on the information in the first image, please select the missing shape from the second image and tell me which shape is your choice, counting from left to right, DO NOT use code.
235
+
236
+ GPT4-V: The two images you've provided seem to be part of a visual pattern recognition or IQ test, where the goal is to determine the rule that applies to the transformations of the shapes in the first image, and then apply that rule to select the missing shape from the options in the second image.
237
+
238
+ In the first image, there are three shapes: the first is a diamond with a cross inside, the second is a diamond that is rotated 45 degrees without any cross, and the third is a square with a smaller square inside. In the second image, we have a sequence of four shapes with one missing (the third position is blank). The first shape is a square with both a cross and an X inside, the second is a diamond with a cross inside, the fourth is a square with a smaller square inside, and the fifth is a diamond with both a cross and an X inside.
239
+
240
+ To find the similarities and differences, we'll look at patterns in the transformations of the shapes:
241
+
242
+ The shapes alternate between a square and a diamond.
243
+ - The internal lines (cross and X) seem to alternate in presence and absence.
244
+
245
+ Based on these observations from the first image, we can infer that the missing shape in the second image should be a shape that is a square since it follows a diamond and does not have any internal lines, as the pattern suggests an alternation in the presence of internal lines. Considering the sequence from the second image, the missing shape would then be the third shape and should be a square without any internal lines to maintain the pattern. Counting from left to right, the missing shape is the third one in the sequence.
246
+
247
+ Fig. 5. An example response generated by GPT-4V via CoCoT on the Raven-50 dataset.
248
+
249
+ ![](images/8312ee044b7d341ad834f97736789a0b659aea16078c1ee3aec4a3010490d276.jpg)
250
+ Fig. 6. An example response generated by GPT-4V via CoCoT on the Factify-V dataset.
251
+
252
+ ![](images/4c3edd3018c74499ee368b1203ca10733a7e818267a54873a6f84753fb564e53.jpg)
253
+
254
+ User: Please tell me the similarities and differences of these two images, and tell me whether these two images are entailed?
255
+
256
+ GPT4-V: The two images you've provided appear to be election-related graphics from major news networks, specifically ABC News and CNN, declaring Joe Biden as the projected winner of Colorado and New York, respectively, during the 2020 United States presidential election. Here are the similarities and differences:
257
+
258
+ # Similarities:
259
+
260
+ 1.Both images feature Joe Biden.
261
+ 2.Both declare Biden as the projected winner of a state.
262
+ 3.The graphics use a blue and white color scheme associated with the Democratic Party.
263
+ 4.Both images include the news network's logo and the year of the election.
264
+ 5.Both use a checkmark symbol to indicate the projection of a winner.
265
+
266
+ # Differences:
267
+
268
+ 1. The images are from different news outlets, ABC News and CNN, as indicated by their logos.
269
+ 2. They declare Biden as the winner in different states, Colorado in the first and New York in the second.
270
+ 3.The images have different designs and layouts due to varying graphic standards of the two networks.
271
+ 4. The first image has a graphic element that looks like a voting ballot, while the second one does not.
272
+ 5.The text font and style differ between the two images.
273
+
274
+ Regarding whether the second image entails the first one, the answer is no. The concept of entailment in this context would mean that the second image could logically imply the information in the first. Since the images are projections for different states, the second does not imply that Biden is the projected winner of Colorado, only that he is projected to win New York. They are separate statements about the outcomes in different states.
2401.02xxx/2401.02582/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044199c16738fba1576fa308aceeead31b14176da429d776c03044254c14867f
3
+ size 672448
2401.02xxx/2401.02582/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02634/182bf922-2484-43f6-8ebc-7c29837a0264_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de58d67aa379c6e436d35d289544ab77a908cf596e10b3a252024940d0765648
3
+ size 21984503
2401.02xxx/2401.02634/full.md ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AG-ReID.v2: Bridging Aerial and Ground Views for Person Re-identification
2
+
3
+ Huy Nguyen, Kien Nguyen, Sridha Sridharan, Clinton Fookes
4
+
5
+ School of Electrical Engineering and Robotics
6
+
7
+ Queensland University of Technology, Brisbane, Australia 4000
8
+
9
+ thanhnathuy.nguyen@hdr.quit.edu.au, {k.nguyenthanh, s.sridharan, c.fookes} @qut.edu.au
10
+
11
+ Abstract—Aerial-ground person re-identification (Re-ID) presents unique challenges in computer vision, stemming from the distinct differences in viewpoints, poses, and resolutions between high-altitude aerial and ground-based cameras. Existing research predominantly focuses on ground-to-ground matching, with aerial matching less explored due to a dearth of comprehensive datasets. To address this, we introduce AG-ReID.v2, a dataset specifically designed for person Re-ID in mixed aerial and ground scenarios. This dataset comprises 100,502 images of 1,615 unique individuals, each annotated with matching IDs and 15 soft attribute labels. Data were collected from diverse perspectives using a UAV, stationary CCTV, and smart glasses-integrated camera, providing a rich variety of intra-identity variations. Additionally, we have developed an explainable attention network tailored for this dataset. This network features a three-stream architecture that efficiently processes pairwise image distances, emphasizes key top-down features, and adapts to variations in appearance due to altitude differences. Comparative evaluations demonstrate the superiority of our approach over existing baselines. We plan to release the dataset and algorithm source code publicly, aiming to advance research in this specialized field of computer vision. For access, please visit https://github.com/huynguyen792/AG-ReID.v2.
12
+
13
+ Index Terms—Person re-identification, aerial-ground imagery, UAV, CCTV, smart glasses, video surveillance, attribute-guided, three-stream network
14
+
15
+ # I. INTRODUCTION
16
+
17
+ Person Re-identification (ReID) is a technique in computer vision that identifies and matches individuals across images or videos captured by multiple, non-overlapping cameras [1]–[5]. This method is advantageous in surveillance systems as it does not depend on high-resolution biometric data, such as facial features, which are often required for more precise identification methods. Person ReID has diverse applications including video surveillance, retail management, search and rescue operations, healthcare services, and public safety initiatives. These applications demonstrate the utility of person ReID in enhancing safety measures and optimizing resource distribution in various environments.
18
+
19
+ The development of advanced airborne platforms and imaging sensors has significantly advanced the field of aerial person ReID [6]. These platforms, in comparison with traditional ground-based systems, offer notable advantages, such as increased scale and mobility, and the ability to perform both covert and overt observations [7], [8]. High-altitude aerial cameras are particularly effective, capable of capturing extensive areas with reduced occlusion and demonstrating versatility
20
+
21
+ ![](images/f7b3b2a6415ae6634e72dc5f5acf248093779e9d8e40dc4d4b76bed80402bf27.jpg)
22
+
23
+ ![](images/398ed9abf8a0f13b8d855024f18ea5c94d18d1b3d9c8feb9cd0602f96e468e12.jpg)
24
+ (a) Aerial camera.
25
+ (b) CCTV camera.
26
+
27
+ ![](images/784fd434d3c5a61c899a35948cdf5b33e651c261ce8c87a393a5a2da53bec0f8.jpg)
28
+ (c) Wearable camera.
29
+ Fig. 1. Aerial (a), CCTV (b), and wearable camera (c) perspectives vary in resolution, occlusion, and lighting in the AG-ReID.v2 dataset.
30
+
31
+ in various operational conditions [9]–[11]. The integration of multi-modal sensors, including visual, thermal, and LiDAR technologies, further enhances the effectiveness of aerial ReID, improving target visibility and robustness [11], [12].
32
+
33
+ Traditionally, research in aerial person ReID has primarily focused on matching within aerial imagery [7], [13], [14]. However, the integration of aerial and ground images for person ReID poses distinct challenges, largely due to differences in viewpoints, poses, and resolutions. This area, while crucial, lacks comprehensive and publicly available datasets suitable for such cross-domain studies. Existing datasets, such as the one by Schumann et al. [15], face limitations in terms of accessibility, scale, and diversity of collection environments.
34
+
35
+ Addressing these challenges, this paper introduces the AG-ReID.v2 dataset, an extension of our previous AG-ReID.v1 dataset [16]. AG-ReID.v2 encompasses a broader range of aerial and ground imagery, providing a diverse, publicly accessible resource for ReID research. This dataset, meticulously compiled, reflects a variety of real-world scenarios, particularly suited for surveillance and monitoring applications.
36
+
37
+ Furthermore, we propose a novel three-stream architecture for aerial-ground person ReID, designed to address the specific challenges in this domain. Each stream within our model
38
+
39
+ focuses on particular aspects such as localized attributes and soft-biometric markers, enabling a more precise and interpretable matching process. This innovative approach represents a significant step forward in addressing the complexities of aerial-ground person ReID. Images from the AG-ReID.v2 dataset, illustrating these aspects, are shown in Figure 1.
40
+
41
+ The dataset comprises various image types and modalities, including:
42
+
43
+ - Aerial images were obtained using DJI M600 Pro drones, equipped with XT2 sensors and $24\mathrm{mm}$ lenses., flown at altitudes from 15 to 45 meters. These images offer an elevated perspective of scenes and individuals, aiding in person identification and tracking from an aerial viewpoint.
44
+ - Ground-based CCTV images taken by a standard low-resolution CCTV camera positioned at key locations on a campus. These provide a traditional ground-level perspective, similar to standard CCTV systems.
45
+ - Images from a wearable camera, attached to smart glasses, providing a first-person viewpoint. These high-resolution images capture dynamic details like head and body posture, as well as contextual information about the environment and surrounding elements.
46
+
47
+ This diverse collection of aerial and ground-based images forms a comprehensive dataset suitable for developing and testing person ReID models in aerial-ground settings. The aerial images, taken from higher altitudes, introduce unique challenges in ReID due to differences in viewpoint, pose, and resolution, which affect the appearance of individuals compared to ground-level views. Ground images are sourced from two types of cameras—CCTV and wearable—offering variability in resolution, perspective, mobility, and lighting conditions. This diversity accurately reflects the complexities encountered in real-world surveillance and person ReID scenarios.
48
+
49
+ The AG-ReID.v2 dataset incorporates 15 soft-biometric attributes per individual, as illustrated in Figure 3. These attributes cover various aspects including age, gender, and clothing style. They serve as supplementary information to facilitate attribute recognition and improve Re-identification (ReID) system efficacy.
50
+
51
+ In this work, we introduce a novel method for person re-identification (ReID) to accompany our dataset, employing a three-stream architecture tailored for the specific challenges posed by integrating aerial and ground images. This architecture emphasizes local features, such as the head region and soft-biometric attributes, to enhance the ReID process's explainability. The model incorporates a simplified localization layer in the elevated-view attention stream, opting out of using complex pose estimators. This layer functions as an adaptive mechanism, dynamically shifting focus between global and head-specific features in response to the input. Such an approach enables the system to discern and prioritize essential features for accurate person re-identification. This method not only bolsters performance in scenarios involving aerial and ground ReID but also provides insights into the pivotal attributes that drive the identification process.
52
+
53
+ In summary, the key contributions of our study are:
54
+
55
+ ![](images/862c30de45206c90b7a7434552ec7b7714b7b7441a7ab2f5e8feb4b536cd4ca0.jpg)
56
+ Fig. 2. Data Collection Areas for the AG-ReID.v2 dataset.
57
+
58
+ - Introduction of the AG-ReID.v2 dataset: The AG-ReID.v2 dataset integrates images from both aerial (UAV) and ground sources (CCTV and wearable cameras), providing a more comprehensive view compared to datasets focused solely on ground or aerial perspectives. This dataset includes a large volume of images representing a significant number of unique identities and is enhanced with 15 soft attributes, contributing to its depth and applicability for diverse analytical purposes.
59
+ - A three-stream person ReID model with an explainable elevated-view attention mechanism: We present a novel three-stream architecture tailored for the challenges specific to aerial-ground person ReID. This model features an elevated-view attention mechanism to address aerial-ground perspective challenges, and an explanation component for visualizing appearance differences, thereby augmenting the model's interpretability.
60
+ - Comprehensive experimental analysis: An extensive evaluation of our model using the AG-ReID.v2 dataset demonstrates its effectiveness, showing improvement over existing ReID models. This analysis underscores the practical utility of our approach in aerial-ground person ReID scenarios.
61
+ - Public dataset and code release: In an effort to support research in this field, we are making the AG-ReID.v2 dataset and the code for our baseline person ReID system available to the public.
62
+
63
+ The structure of this paper is organized into several sections: Section II provides a review of the relevant literature. Section III details the AG-ReID.v2 dataset. In Section IV, we introduce our proposed method, focusing on an explainable approach for aerial-ground re-identification. Section V reports on the experimental setup and results. Finally, Section VI summarizes our findings and contributions.
64
+
65
+ # II. RELATED WORK
66
+
67
+ # A. Datasets for Person ReID
68
+
69
+ In the field of Person Re-identification (ReID), a variety of datasets have been developed, each with unique characteristics suited for different research scenarios:
70
+
71
+ TABLEI COMPARISON OF AG-REID.v2 WITH OTHER REID DATASETS. OUR AG-REID.v2 LEADS IN IDENTITIES, IMAGES AND PLATFORMS.
72
+
73
+ <table><tr><td rowspan="2">Attributes</td><td colspan="2">Ground-Ground</td><td colspan="2">Aerial-Aerial</td><td colspan="2">Aerial-Ground</td></tr><tr><td>Market-1501</td><td>DukeMTMC-reID</td><td>PRAI-1581</td><td>UAV-Human</td><td>AG-ReID.v1</td><td>AG-ReID.v2 (ours)</td></tr><tr><td># IDs</td><td>1,501</td><td>1,404</td><td>1,581</td><td>1,144</td><td>388</td><td>1,615</td></tr><tr><td># Images</td><td>32,668</td><td>36,411</td><td>39,461</td><td>41,290</td><td>21,983</td><td>100,502</td></tr><tr><td># Attributes</td><td>×</td><td>×</td><td>×</td><td>7</td><td>15</td><td>15</td></tr><tr><td>Backgrounds</td><td>×</td><td>×</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Occlusion</td><td>×</td><td>×</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Camera Views</td><td>fixed</td><td>fixed</td><td>mobile</td><td>mobile</td><td>mixed</td><td>mixed</td></tr><tr><td>Platforms</td><td>CCTV</td><td>CCTV</td><td>UAV</td><td>UAV</td><td>Dual</td><td>Triple</td></tr><tr><td>Altitude</td><td>&lt; 10m</td><td>&lt; 10m</td><td>20 ~ 60m</td><td>2 ~ 8m</td><td>15 ~ 45m</td><td>15 ~ 45m</td></tr><tr><td># UAVs</td><td>0</td><td>0</td><td>2</td><td>1</td><td>1</td><td>1</td></tr></table>
74
+
75
+ - Ground-Ground Person ReID Datasets: Ground-ground datasets are commonly used in ReID studies. For example, Market-1501 [17], established in 2015, contains 32,668 images from 1,501 individuals. In comparison, DukeMTMC-reID [18] (retracted in June 2019) comprises 36,411 images but represents a slightly smaller pool of 1,404 individuals. These datasets illustrate the varying scopes and scales prevalent in ground-based ReID research.
76
+ - Aerial-Aerial Person ReID Datasets: The interest in aerial-based ReID datasets is growing. PRAI-1581 [14], released in 2019, includes 39,461 images of 1,581 subjects. Another significant contribution is the UAV-Human dataset [7], introduced in 2021. It encompasses 41,290 images of 1,144 individuals, captured by a drone flying at altitudes between 2 to 8 meters over various locations and times. This dataset's versatility makes it a valuable resource for a range of surveillance applications.
77
+ - Aerial-Ground Person ReID Dataset: To address the evolving needs of surveillance technologies, datasets combining aerial and ground views have been developed. Our earlier work, AG-ReID.v1 [16], focused on integrating UAV and CCTV camera perspectives. Extending this, the current study introduces AG-ReID.v2, an enhanced dataset that surpasses existing datasets in terms of diversity and scale. AG-ReID.v2 features 100,502 images of 1,615 unique individuals, captured using CCTV, UAVs, and wearable devices. The images, recorded from heights of 15 to 45 meters, include 15 distinct attributes per individual. Table I provides a comprehensive comparison of AG-ReID.v2 with other datasets, highlighting its unique contributions to the ReID research landscape.
78
+
79
+ # B. Person ReID Approaches
80
+
81
+ Person Re-identification in computer vision aims to consistently identify individuals across varied camera views. Key models in this domain include BoT [19], which integrates label smoothing, random erasing, and auto-augmentation, MGN [20], characterized by its unique triplet loss function and multi-granularity feature utilization, and SBS [21], enhancing triplet loss for improved accuracy. These models typically employ backbone architectures such as ResNet [22], OSNet
82
+
83
+ [23], and ViT [24], and are evaluated on datasets like Market1501, DukeMTMC-reID, and MSMT17. The advent of aerial-aerial person ReID, supported by datasets such as PRAI-1581 [14] and UAV-Human [7], has introduced new approaches, including subspace pooling [14] and the DG-NET framework [25] [7]. Our research extends these foundations, combining HRNet-18 [26] and Swin Transformer models [27] [28], aiming to enhance person re-identification capabilities.
84
+
85
+ Aerial-Aerial Person ReID The PRAI-1581 [14] and UAV-Human [7] datasets have catalyzed new algorithmic developments for aerial-aerial matching. Zhang et al. [14] introduced subspace pooling to generate concise, discriminative features for aerial ReID. Zheng et al.'s DG-NET [25], applied to the UAV-Human dataset [7], focuses on enhancing ReID embeddings through a joint learning framework that combines ReID learning with synthetic data generation.
86
+
87
+ Multi-stream Person ReID Architecture Multi-stream architectures are increasingly utilized in person re-identification, addressing various aspects of the matching challenge. For instance, Chung et al. [29] proposed a two-stream architecture, segregating spatial and temporal information learning. Xie et al. [30] introduced a three-stream architecture that merges features from RGB, low-resolution, and grayscale images to tackle image quality variation. Khatun et al. [31] developed a four-stream architecture, applying dual identification and verification losses on four input images to optimize intraclass and inter-class distances. These multi-stream designs illustrate the effectiveness of integrating different data types or features in enhancing the accuracy and robustness of person re-identification systems.
88
+
89
+ # III. AG-REID.V2 DATASET
90
+
91
+ # A. Dataset Collection
92
+
93
+ Our dataset is collected on a university campus using three cameras: a UAV, a CCTV, and a wearable camera. Each operates in distinct non-overlapping areas, as illustrated in Figure 2. Specifically, the UAV camera captures aerial images in the red-marked area, the CCTV camera records ground images in the blue area, and the wearable camera operates within the yellow area.
94
+
95
+ In the AG-ReID.v2 dataset, data collection was conducted using a variety of recording devices to capture a comprehensive range of real-world pedestrian activities. The DJI M600
96
+
97
+ TABLE II SPECIFICATIONS OF CAMERAS USED FOR DATA COLLECTION
98
+
99
+ <table><tr><td>Device</td><td>Brand</td><td>Model</td><td>Resolution</td><td>FPS</td><td>Altitude</td></tr><tr><td>CCTV</td><td>Bosch</td><td>N/A</td><td>800 × 600</td><td>30</td><td>≈ 3m</td></tr><tr><td>Wearable</td><td>Vuzix</td><td>M4000</td><td>4K</td><td>30</td><td>≈ 1.5m</td></tr><tr><td>UAV</td><td>DJI</td><td>XT2</td><td>3840 × 2160</td><td>30</td><td>15 ~ 45m</td></tr></table>
100
+
101
+ Pro UAV, equipped with a DJI XT2 camera, was designated to record video in the red area, offering a high 4K resolution at 30 frames per second (FPS). In a different setting, the Bosch CCTV camera was responsible for capturing footage in the blue area, with a resolution of $800 \times 600$ pixels at the same frame rate of 30 FPS. Additionally, the person wearing the Vuzix M4000 smart glasses, who was positioned in the yellow area, remained stationary during the recording sessions. This stationary stance was a strategic decision to ensure consistent and stable image quality, particularly critical for the pedestrian subjects being recorded. The stable recording minimizes the potential variables that could arise from the movement of the camera operator, thereby maintaining the clarity and consistency of the image quality. Such high-quality recording is essential for the effectiveness of pedestrian detection and recognition models, which are a central objective of our dataset. By utilizing this diverse array of recording methods and equipment, the AG-ReID.v2 dataset provides a robust and realistic platform for training and enhancing machine learning models for person re-identification. The specific details of the camera equipment used, including brands, models, resolutions, frame rates, and recording zones, are detailed in Table II for further reference.
102
+
103
+ The AG-ReID.v2 dataset was methodically compiled over a period of 5 months, comprising 20 distinct data collection sessions. These sessions were not held consecutively; instead, they were strategically planned on non-consecutive days, allowing for adjustments based on varying weather conditions and area restrictions. To maintain uniform environmental conditions, the data collection was structured into specific time windows: Morning sessions were conducted from $8:45\mathrm{am}$ to $10:00\mathrm{am}$ , and afternoon sessions from $3:30\mathrm{pm}$ to $4:45\mathrm{pm}$ . This schedule was deliberately chosen to capture the unique atmospheric and lighting conditions prevalent in early mornings and late afternoons.
104
+
105
+ During each session, UAV flights were conducted at three different altitudes—15m, 35m, and 45m Above Ground Level (AGL)—with each flight lasting 15 minutes. This systematic approach was instrumental in collecting a wide array of both aerial and ground-based images, capturing pedestrians in diverse lighting scenarios such as sunny versus rainy weather and direct sunlight versus shade. The dataset robustly addresses real-world challenges encountered in pedestrian detection and recognition, including issues related to occlusion, blur, resolution, and viewpoint variations. It also comprises images depicting pedestrians in various states of motion and captured from multiple angles, thereby significantly enhancing the dataset's utility for developing and refining pedestrian detection and recognition models. These models are specif
106
+
107
+ ![](images/03cf24fd75b504d01691ec6a67047a9be872ac62baa7101c9d38d0d4a316dccf.jpg)
108
+ Fig. 3. 15 soft-biometric labels in the AG-ReID.v2 dataset.
109
+
110
+ ically designed to navigate the complexities of real-world environments, addressing challenges like motion blur, occlusion, varying resolutions, and diverse viewpoints, as highlighted in the literature [32], [33]. The intermittent and meticulously timed nature of these data collection sessions was crucial in ensuring the dataset's diversity, making it a valuable asset for advancing research in pedestrian detection and recognition technologies.
111
+
112
+ In total, the dataset includes 100,502 images and 1,615 unique identities. The use of multiple cameras and altitudes provides a wide range of variations in viewpoint, lighting, and background, making it an ideal dataset for evaluating the robustness and generalization of person re-identification models.
113
+
114
+ # B. Annotations
115
+
116
+ In the AG-ReID.v2 dataset, we utilize the YOLO detector [34] and the StrongSORT tracker [23] for person detection and tracking in video data, saving an image every 30 frames. Manual correction is applied to address inaccuracies, such as incorrectly identified objects. The dataset's primary focus is on short-term re-identification from various platforms, including ground and aerial perspectives, without tracking individuals over extended periods. Thus, changes in clothing or accessories are not a primary consideration in this dataset. Annotators match and label individuals across cameras, and the dataset includes manual attribute annotations, as indicated in Figure 3. These attributes are based on 15 soft-biometric labels from [13], [35], relevant due to their UAV data collection similarities. The dataset's top 20 attributes are presented in Figure 4, and a comparative analysis with other public datasets is shown in Table I, along with exemplar images in Figure 5.
117
+
118
+ The AG-ReID.v2 dataset's selection of 15 attributes is informed by a thorough comparative analysis with existing datasets in the ground, aerial, and aerial-ground categories. This analysis, detailed in Table III, highlights the unique aspects of AG-ReID.v2 and the rationale behind its attribute selection.
119
+
120
+ The AG-ReID.v2 dataset incorporates a diverse range of attributes, covering physical characteristics, appearance, and accessories, to ensure comprehensive coverage suitable for various applications. The dataset balances ground-level details with aerial-specific requirements, making it ideal for
121
+
122
+ TABLE III COMPARATIVE ANALYSIS OF ATTRIBUTES IN AG-REID.v2 AND OTHER DATASETS
123
+
124
+ <table><tr><td rowspan="2">Attributes</td><td colspan="2">Ground-Ground</td><td colspan="2">Aerial-Aerial</td><td colspan="2">Aerial-Ground</td></tr><tr><td>Market-1501</td><td>DukeMTMC-reID</td><td>P-DESTRE</td><td>UAV-Human</td><td>AG-ReID.v1</td><td>AG-ReID.v2 (ours)</td></tr><tr><td>Gender</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Age</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Height</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Body Volume</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Ethnicity</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Hair Color</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Hairstyle</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Beard</td><td>×</td><td>×</td><td>×</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Moustache</td><td>×</td><td>×</td><td>×</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Glasses</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Head Accessories</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Upper Body Clothing</td><td>✓</td><td>✓</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Lower Body Clothing</td><td>✓</td><td>✓</td><td>×</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Feet</td><td>×</td><td>×</td><td>✓</td><td>×</td><td>✓</td><td>✓</td></tr><tr><td>Accessories</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
125
+
126
+ ![](images/e30b8d24c5f16013ff62cc36b6f3f6b0caf62b7c8f4c2d8b43eb912067cc7266.jpg)
127
+ Fig. 4. Top 20 attribute distribution in our dataset.
128
+
129
+ cross-domain applications. Unique attributes like beard and moustache offer finer granularity in person re-identification, especially in ground-level images. Inclusion of ethnicity and a broad range of age categories enhances the dataset's applicability across diverse demographics. The selection of attributes like footwear type and accessories aligns well with the requirements of aerial-ground integrated surveillance systems, combining detailed close-up information with features discernible from a distance.
130
+
131
+ # C. Dataset Features
132
+
133
+ Diverse Identities. The dataset is characterized by a comprehensive range of identities, represented in images captured using three types of cameras: an aerial camera and two ground-based cameras, specifically CCTV and wearable cameras. This combination results in significant differences between aerial and ground views, mirroring real-world scenarios. The dataset's unique configuration presents a complex challenge in cross-matching individuals, a task that is more straightforward in conventionally captured datasets.
134
+
135
+ Variations in Altitude. The AG-ReID.v2 dataset incorporates images captured at various altitudes, ranging from 15 to 45 meters, using Unmanned Aerial Vehicles (UAVs). This range in altitude leads to a broad spectrum of image
136
+
137
+ ![](images/d6de6da1060a22e93067b32a64a07374eaf989dc33df8a86db930690a480ba90.jpg)
138
+ Fig. 5. Example images from two ground-ground datasets, Market-1501 [17] and DukeMTMC-reID [18], alongside two aerial-aerial datasets, P-DESTRE [13] and UAV-Human [7], in comparison with our aerial-ground dataset, AG-ReID.v2. The images from AG-ReID.v2 highlight distinct challenges associated with reconciling perspective variances between ground-based (bottom row) and aerial-based (top row) images of individuals. This contrast is not as prevalent in the other datasets, which are confined to a single domain, either aerial or ground.
139
+
140
+ ![](images/2befb20cfeebeb224f121dd72e5344a152643cb7098e7000d88b071bd4f1f720.jpg)
141
+
142
+ ![](images/22dd00bbc88b6b2584b03132c34a2b5ec71af25005a4cd8fc96c4a987e63d4ae.jpg)
143
+
144
+ ![](images/021e7614ebbf2d1b6bd365e816fe9beb942794ed77127d2625c65f4e1d45457a.jpg)
145
+ Fig. 6. The distribution of imagery data across UAV flying altitudes.
146
+
147
+ scales and offers diverse perspectives of subjects from elevated positions. The impact of this altitude diversity on the imagery is illustrated in Figure 6.
148
+
149
+ Resolution diversity. The variations in camera resolutions and distances between the camera and subjects lead to distinct size differences in the cropped images of individuals. For instance, UAV-captured images range between 31x59 and 371x678 pixels, while CCTV-derived crops span from 22x23 to 172x413 pixels. Notably, the wearable camera produces images with dimensions comparable to both the UAV and CCTV. The dataset mostly features smaller-sized crops, making person reID particularly challenging. This variability is
150
+
151
+ illustrated in Figure 7, displaying body sizes recorded by the three cameras. The UAV captures sizes from 43 to 739 pixels, wearable cameras document from 25 to 1080 pixels, and CCTV images range from 23 to 622 pixels.
152
+
153
+ ![](images/55ddf82a53bc1ee8af35fd8bda2a198baabf4e53a6b91b2a26ab4f73cb547a83.jpg)
154
+ Fig. 7. Distributions of the body heights (in pixels) across three cameras (aerial, wearable, CCTV) in the AG-ReID.v2 dataset.
155
+
156
+ Challenges. Illustrated in Figure 8, our dataset introduces key challenges to person re-identification using UAVs. These include diverse scale variations due to the elevated UAV view, resolution variability where even 4K images may display low resolution at higher altitudes, and significant occlusions caused by trees and poles, or the presence of other individuals. Variable lighting conditions are another challenge, resulting from recordings at different times of the day, especially morning and afternoon. Additionally, the dataset captures noticeable motion blur from rapid subject movements and showcases an array of subject poses, from walking to biking, indicating the diversity of real-world scenarios. Our classification approach is visual-based: an image is categorized as "low resolution" if key features are indistinct, "occlusion" if a subject is significantly obscured, and "motion blur" if there is prominent blur from movement. This methodology aims to accurately represent the complexities of real-world UAV surveillance.
157
+
158
+ # D. Ethical Approval
159
+
160
+ Our research team has secured ethics committee approval related to the "Multi-modal surveillance and video analytics" project, under the Human category. This authorization, valid
161
+
162
+ ![](images/d8fca6890dfa493bab0a59122c523cfddbdfd06f0816ad8083bdefade218554d.jpg)
163
+ (a) Low Resolution
164
+
165
+ ![](images/2ee172ebc74597835700a0037e236c3801b5d8979d2bb42870466734d5d38685.jpg)
166
+ (b) Blur
167
+
168
+ ![](images/ceb0bd7eef46036628866a9269278e63028f8986cabc3b6768a0198a376037bc.jpg)
169
+ (c) Partially Occluded
170
+
171
+ ![](images/12bcb2760cf732d9f21a020fa0fc24e51894817c67a087ffa16df14627a96c57.jpg)
172
+ (d) Pose
173
+
174
+ ![](images/b5001e8a7f63ee76e95b54f7149bb2d360bad335db7dcd4bab769c27dade400a.jpg)
175
+ (e) Illumination
176
+
177
+ ![](images/f8eb1bfad03db1cb9f180eb238e4e454337948da0d12a617ed63ee574982e846.jpg)
178
+ (f) Elevated Viewpoint
179
+ Fig. 8. Examples of the key challenges in the AG-ReID.v2 dataset.
180
+
181
+ until February 13, 2025, allows us to acquire and process video content with human subjects from both stationary and mobile cameras. To prioritize privacy, we have implemented facial pixelation on all captured videos, ensuring individual identities remain protected.
182
+
183
+ # IV. THREE-STREAM AERIAL-GROUND REID
184
+
185
+ To address the complexities inherent in aerial-ground person ReID, our proposed model incorporates a three-stream architecture, as illustrated in Figure 9. This architecture consists of a transformer-based ReID stream for feature extraction, an elevated-view attention stream for detailed head region analysis, and an explainable ReID stream that utilizes attribute attention maps for refined feature representation. Stream 1 efficiently processes feature maps for discriminative analysis. Stream 2 focuses on augmenting head region features, crucial for analysis from aerial perspectives. In Stream 3, attribute feature maps are generated by amalgamating the output of Stream 1 with attribute attention maps, enhancing the model's interpretability. The model computes two distinct types of distances: metric distances derived from the features of Stream 1 and attribute-guided distances from Stream 3, both employing Generalized Mean Pooling (GeM). The model's training leverages cross-entropy and triplet losses, implemented in an integrated end-to-end approach. Further details of each stream are succinctly summarized in Table IV and elaborated in subsequent sections.
186
+
187
+ # A. Stream 1: Transformer-based Person ReID
188
+
189
+ Our approach in developing a person ReID network, denoted as $\mathcal{F}(\cdot)$ , primarily utilizes the Stronger Baseline (SBS) model [36] as the foundational framework. The flexibility of our methodology allows for the integration of alternative models like MGN [20] or BOT [19], adapting to various application needs.
190
+
191
+ We chose the Vision Transformer (ViT) as our model's backbone due to its effectiveness in person ReID tasks. Introduced by Dosovitskiy et al. [24], ViT processes image patches as sequences, a method particularly apt for handling lower-resolution images common in aerial data.
192
+
193
+ Other architectures like ResNet50 [22] and OSNet [37] were considered. ResNet50 is known for its deep residual learning, and OSNet for its efficient feature learning. However, ViT's ability to extract meaningful information from lower-resolution images made it the most suitable choice for our aerial imagery dataset.
194
+
195
+ Acknowledging existing ViT-based ReID methods like TransReid [38] and FED [32], we noted their respective strengths and limitations. TransReid excels in global context but may lack in local feature extraction, while FED is effective against occlusions but less versatile in diverse datasets. Our model aims to balance global context and local feature extraction, offering robust performance across various scenarios.
196
+
197
+ The model's mechanics involve extracting feature maps $(F_{i},F_{j})$ from each image pair $(x_{i},y_{j})$ . Using Generalized Mean Pooling (GMP), we convert these maps into feature vectors $(f_{i},f_{j})$ , which form the basis for calculating pairwise
198
+
199
+ ![](images/75e6bdf96cc41a95b0cbb9d456e0328dbc937ddb2b333bd13f7b7b5444eced25.jpg)
200
+ Fig. 9. The architecture of our explainable elevated-view person ReID model utilizing a Vision Transformer (ViT) backbone is shown. The architecture comprises three streams: Stream 1 calculates the pairwise distance between input pair images, Stream 2 uses localization layers to crop the head region and extract local representations through attention layers, Stream 3 utilizes person attributes to account for variations in appearance due to varying flying altitudes and view angles between aerial and ground cameras. The final representation for ReID is generated by assembling the features from Stream 1 and the head features from Stream 2 through an adaptive module. Stream 3 is equipped with an Attribute Decomposed Head (ADH) module that produces attribute-guided attention maps for each attribute.
201
+
202
+ distances $d_{i,j}$ . This metric is crucial in Stream 3 of our explainable ReID network for computing metric distillation loss, ensuring consistency across different streams of the network.
203
+
204
+ # B. Stream 2: Elevated-view Attention Stream
205
+
206
+ Our proposed method is designed to enhance person re-identification techniques, specifically tailored for the AGReID.v2 dataset. Building upon the work of Xu et al., in [39], our model introduces a novel three-stream approach with a focus on an explainable model that accentuates the elevated-view perspective in person re-identification. This structured framework aims to provide clear and interpretable insights for improved identification from aerial viewpoints.
207
+
208
+ In our model, we adopt a different architecture from that of Xu et al., which uses a ResNet50 backbone for a global stream. Instead, our model utilizes the Vision Transformer Network, which is specifically designed to incorporate an elevated-view stream. This stream includes a head feature extraction mechanism, supported by a localization layer inspired by the Spatial Transformer Network [40]. This localization layer is engineered to focus on the head region by performing spatial manipulations such as zooming, shifting, and cropping. These spatial transformations are defined by specific mathematical formulas that adjust the image to focus on the region of interest, as shown in the following equation:
209
+
210
+ $$
211
+ \left( \begin{array}{l} x _ {i} ^ {s} \\ y _ {i} ^ {s} \end{array} \right) = \left[ \begin{array}{c c c} s _ {x} & 0 & t _ {x} \\ 0 & s _ {y} & t _ {y} \end{array} \right] \left( \begin{array}{l} x _ {i} ^ {t} \\ y _ {i} ^ {t} \end{array} \right), \tag {1}
212
+ $$
213
+
214
+ where the location of each pixel $i^{th}$ within an image is determined by its original coordinates $(x_i^s, y_i^s)$ and transformed coordinates $(x_i^t, y_i^t)$ . Through the use of scaling parameters $(s_x, s_y)$ and translation parameters $(t_x, t_y)$ , we calibrate the image's scale and position. The region of interest, particularly the head, is normalized into a consistent shape to extract a feature map with dimensions $C \times H \times W$ .
215
+
216
+ Our approach modifies traditional ReID methods by segmenting the feature map into three distinct horizontal sections. Each section is processed by a dedicated attention layer. The mathematical formulation of the attention layer is defined as follows:
217
+
218
+ $$
219
+ d _ {i} = \sigma \left(U _ {i} \operatorname {R e L U} \left(W _ {i} X _ {i}\right)\right), \tag {2}
220
+ $$
221
+
222
+ where $\sigma$ represents the sigmoid activation function. In this context, $U_{i}$ is responsible for increasing dimensionality, ReLU (Rectified Linear Unit) introduces non-linearity, $W_{i}$ serves to reduce dimensions, and $X_{i}$ denotes the respective input slice of the feature map.
223
+
224
+ The output of the attention layer, when applied to the input $X_{i}$ , is described by the equation:
225
+
226
+ $$
227
+ A _ {i} = X _ {i} + X _ {i} \cdot d _ {i}, \tag {3}
228
+ $$
229
+
230
+ which illustrates the element-wise multiplication of $X_{i}$ with its attention scores $d_{i}$ . This computation produces an enhanced feature representation $A_{i}$ , combining the original input with its attention-modified counterpart.
231
+
232
+ TABLE IV STREAMLINED SUMMARY OF THREE-STREAM ARCHITECTURE IN AG-REID.V2
233
+
234
+ <table><tr><td>Stream</td><td>Description</td><td>Function</td><td>Contribution</td></tr><tr><td>Stream 1</td><td>ViT for aerial image features.</td><td>Feature map extraction.</td><td>Metric consistency and distance.</td></tr><tr><td>Stream 2</td><td>Head region focus with spatial manipulation.</td><td>Head feature enhancement.</td><td>Aerial view identification.</td></tr><tr><td>Stream 3</td><td>ADH for feature refinement.</td><td>Attention maps for attributes.</td><td>Model interpretability and analysis.</td></tr></table>
235
+
236
+ The combined output from these layers, denoted as $A_{i}$ , leads to the formation of the final head feature $f_{h}$ , articulated as:
237
+
238
+ $$
239
+ f _ {h i} = A _ {i} \cdot \xi \left(\sum_ {n = 1} ^ {C} A _ {i} ^ {n}\right), \tag {4}
240
+ $$
241
+
242
+ where each channel $n$ of the feature $A_{i}$ is aggregated and subsequently multiplied by the channel-specific features to generate the head feature representation $f_{h}$ . This feature is further processed through a fully connected layer, resulting in an embedded feature $f_{e}$ with dimensions $N \times 2$ , where $N$ represents the batch size.
243
+
244
+ To assemble the final feature vector $f$ , we integrate the weighted head feature $f_{h}$ with the base feature $f_{t}$ from Stream 1 as follows:
245
+
246
+ $$
247
+ \left\{ \begin{array}{l} f = \left(f _ {t} \cdot w _ {1}\right) \oplus \left(f _ {h} \cdot w _ {2}\right) \\ f _ {w} = \left[ \begin{array}{l l} w _ {1} & w _ {2} \end{array} \right] \end{array} , \right. \tag {5}
248
+ $$
249
+
250
+ with $\oplus$ indicating the concatenation operation. The resultant feature vector $f$ encompasses a comprehensive and refined representation of individuals, optimizing the effectiveness of the re-identification process.
251
+
252
+ # C. Stream 3: Explainable ReID Stream
253
+
254
+ Our explainable ReID network, represented as $\mathcal{G}(\cdot)$ , is structurally similar to the conventional ReID model $\mathcal{F}(\cdot)$ , with both models utilizing initial layers that focus on key visual features such as texture and color, which are vital for recognizing attributes. The distinctiveness of $\mathcal{G}(\cdot)$ is highlighted by integrating the Attribute Decomposition Head (ADH), which is an advancement over the traditional model architecture.
255
+
256
+ This Attributes Decompose Head (ADH) is positioned after the final layer of the network $\mathcal{G}(\cdot)$ , serving as a crucial component for feature refinement. It begins with a convolutional layer of dimensions $\frac{C}{8} \times 3 \times 3$ , where $C$ is the number of channels from the preceding convolutional layer in $\mathcal{G}(\cdot)$ . This is followed by a convolution of $M \times 1 \times 1$ , where $M$ denotes the number of attributes. The activation function $\delta(\cdot)$ is employed here, which is vital for the generation of attribute-guided attention maps (AAMs). These AAMs are instrumental in providing a deeper understanding of the model's focus on specific attributes, thereby enhancing the explainability of the network's decision-making process.
257
+
258
+ The activation function utilized in our model is defined as follows:
259
+
260
+ $$
261
+ \delta (x) = \left\{ \begin{array}{l l} \mathcal {K} \cdot (x + 1) ^ {\mathcal {T}}, & \text {f o r} x > 0 \\ \mathcal {K} \cdot e ^ {x}, & \text {f o r} x \leq 0 \end{array} , \right. \tag {6}
262
+ $$
263
+
264
+ where $\mathcal{K}$ and $\mathcal{T}$ is a scaling factor between 0 and 1, and $x$ is the input from the preceding convolutional layer in the Attributes Decompose Head (ADH) module. This function is designed to enhance the model's focus on relevant attribute regions while mitigating potential biases.
265
+
266
+ Attribute Attention Maps (AAMs) are derived from the ADH module. Unlike general spatial attention mechanisms that target larger spatial areas, AAMs provide a detailed attention distribution geared towards individual attributes. These maps are represented as $A_{i}$ and $A_{j}$ , with dimensions $\in R^{M \times w \times h}$ , where each element $A_{i}^{k}$ or $A_{j}^{k}$ indicates the level of attention allocated to the $k^{th}$ attribute.
267
+
268
+ The attribute-specific feature maps, denoted as $F_{i}^{k}$ and $F_{j}^{k}$ , are formulated as:
269
+
270
+ $$
271
+ \left\{ \begin{array}{l} F _ {i} ^ {k} = F _ {i} \bigotimes A ^ {k} \\ F _ {j} ^ {k} = F _ {j} \bigotimes A ^ {k} \end{array} , \right. \tag {7}
272
+ $$
273
+
274
+ where $A_{i}^{k}, A_{j}^{k}$ are the attribute attention maps, and $\otimes$ signifies the element-wise multiplication. Each image yields $M$ attribute-guided feature maps, with the $k^{th}$ attribute enhancing relevant pixels while diminishing others. These feature maps are further refined into attribute-guided feature vectors $f_{i}^{k}$ , $f_{j}^{k}$ through Generalized Mean Pooling (GMP), facilitating the measurement of attribute-specific distances.
275
+
276
+ # D. Loss Function for the Explainable Three-Stream ReID Network
277
+
278
+ Our network employs a composite loss function, $L$ , which integrates various loss components tailored to different aspects of the model. This integrated loss is expressed as:
279
+
280
+ $$
281
+ L = L _ {d} + \alpha L _ {p 1} + \beta L _ {p 2} + \alpha L _ {\text {t r i p l e t}} + \beta L _ {c e}, \tag {8}
282
+ $$
283
+
284
+ In this formulation, $L_{d}$ is the Metric Distillation Loss, critical for distance metric learning. $L_{p1}$ and $L_{p2}$ collectively constitute the Attribute Prior Loss, addressing attribute-specific features. $L_{triplet}$ represents the Triplet Loss, focusing on the relative distances between different data points. $L_{ce}$ , the Cross-Entropy Loss, aids in classification tasks. The parameters $\alpha$ and $\beta$ are used to balance these different loss components, ensuring an optimal combination for effective training.
285
+
286
+ # 1) ID Losses:
287
+
288
+ a) Triplet Loss: The Triplet Loss function is designed to ensure that, for any given anchor sample, the distance to a positive sample (similar to the anchor) is smaller than the distance to a negative sample (dissimilar to the anchor) by a predefined margin $p$ . It is defined as:
289
+
290
+ $$
291
+ \mathcal {L} _ {\text {t r i p l e t}} (i, j, k) = \max \left(p + d _ {i j} - d _ {j k}, 0\right), \tag {9}
292
+ $$
293
+
294
+ where $d_{ij}$ and $d_{jk}$ measure the metric distances between the respective samples.
295
+
296
+ b) Cross-Entropy Loss: The Cross-Entropy Loss is a widely used loss function for classification tasks. It quantifies the discrepancy between the predicted probability distribution of the model and the true distribution, represented as:
297
+
298
+ $$
299
+ \mathcal {L} _ {c e} = - \frac {1}{N} \sum_ {i = 1} ^ {N} p _ {i} \log q _ {i}, \tag {10}
300
+ $$
301
+
302
+ where $N$ is the number of training samples, $q_{i}$ is the predicted probability of the model for the $i^{th}$ sample, and $p_i$ represents the corresponding one-hot encoded true label.
303
+
304
+ 2) Metric Distillation Loss: Influenced by the work of Chen et al. [41], function $\mathcal{G}(\cdot)$ decomposes the distance $d_{i,j}$ , calculated by the target model $\mathcal{F}(\cdot)$ , into components influenced by individual attributes. This decomposition is mathematically formulated as:
305
+
306
+ $$
307
+ d _ {i, j} \approx \hat {d} _ {i, j} = \sum_ {k = 1} ^ {M} d _ {i, j} ^ {k}, \tag {11}
308
+ $$
309
+
310
+ where $M$ denotes the total number of attributes, and $d_{i,j}^{k}$ represents the distance between $x_{i}$ and $x_{j}$ influenced by the $k^{th}$ attribute. The reconstructed distance $\hat{d}_{i,j}$ is an approximation of $d_{i,j}$ as calculated by our model.
311
+
312
+ We define the metric distillation loss, diverging from standard distillation techniques typically used for classification, as:
313
+
314
+ $$
315
+ L _ {d} = \left| d _ {i, j} - \sum_ {k = 1} ^ {M} d _ {i, j} ^ {k} \right|, \tag {12}
316
+ $$
317
+
318
+ aiming to ensure consistency between the overall distance metrics generated by the target model and the attribute-influenced distances produced by our explainable model $\mathcal{G}(\cdot)$ .
319
+
320
+ 3) Attribute Prior Loss: The Attribute Prior Loss, inspired by Chen et al. [41], is designed to emphasize the unique attributes of individuals. This loss function is particularly effective in cases where attributes are only weakly labeled. It focuses on distinct features that set individuals apart, such as unique accessories, rather than common characteristics like similar clothing. For a pair of input images with attributes $(x_{i},y_{i},a_{i})$ and $(x_{j},y_{j},a_{j})$ , we calculate the pairwise attribute vector $a_{i,j}$ as follows:
321
+
322
+ $$
323
+ a _ {i, j} = a _ {i} \oplus a _ {j}, \tag {13}
324
+ $$
325
+
326
+ where $\oplus$ indicates the Exclusive OR operation. This vector $a_{i,j}$ helps in identifying both the shared and unique attributes between $x_{i}$ and $x_{j}$ .
327
+
328
+ The Attribute Prior Loss incorporates constraints related to the influence of exclusive and shared attributes, expressed as:
329
+
330
+ $$
331
+ \left\{ \begin{array}{l} \sum_ {e = 1} ^ {M _ {E}} \frac {d _ {i , j} ^ {e}}{d _ {i , j} ^ {\tilde {c}}} \geq \left(\frac {M _ {E}}{M}\right) ^ {\vartheta} \\ \sum_ {c = 1} ^ {N - M _ {E}} \frac {d _ {i , j} ^ {c}}{d _ {i , j} ^ {\tilde {c}}} \leq 1 - \left(\frac {M _ {E}}{M}\right) ^ {\vartheta} \end{array} \right. \tag {14}
332
+ $$
333
+
334
+ Here, $\vartheta$ is a balancing parameter, $d_{i,j}^{e}$ represents the distance for exclusive attributes, $d_{i,j}^{c}$ for shared attributes, $M_E$ is the number of exclusive attributes, and $M$ is the total number of attributes.
335
+
336
+ TABLEV STATISTICS OF THE TESTING SET FOR THE AG-REID.V2 DATASET.
337
+
338
+ <table><tr><td>Cam</td><td>Subset</td><td>IDs</td><td>Images</td></tr><tr><td>Aerial</td><td>Query</td><td>534</td><td>2,356</td></tr><tr><td>CCTV</td><td>Gallery</td><td>534</td><td>6,347</td></tr><tr><td>Aerial</td><td>Query</td><td>519</td><td>2,209</td></tr><tr><td>Wearable</td><td>Gallery</td><td>519</td><td>12,912</td></tr><tr><td>CCTV</td><td>Query</td><td>534</td><td>1,811</td></tr><tr><td>Aerial</td><td>Gallery</td><td>534</td><td>14,362</td></tr><tr><td>Wearable</td><td>Query</td><td>519</td><td>2,340</td></tr><tr><td>Aerial</td><td>Gallery</td><td>519</td><td>12,568</td></tr></table>
339
+
340
+ The Attribute Prior Loss is divided into two components: $L_{p1}$ and $L_{p2}$ . This division allows for a detailed approach to accounting for the impact of both exclusive and shared attributes in the ReID process.
341
+
342
+ a) First Part $L_{p1}$ : The expression for $L_{p1}$ is given by:
343
+
344
+ $$
345
+ \begin{array}{l} L _ {p 1} = \max \left(0, \left(\frac {M _ {E}}{M}\right) ^ {v} - \sum_ {e = 1} ^ {M _ {E}} \frac {d _ {i , j} ^ {e}}{\hat {d} _ {i , j}}\right), \tag {15} \\ + \max \left(0, \sum_ {c = 1} ^ {M - M _ {E}} \frac {d _ {i , j} ^ {c}}{\hat {d} _ {i , j}} - 1 + \left(\frac {M _ {E}}{M}\right) ^ {v}\right) \\ \end{array}
346
+ $$
347
+
348
+ where $M_E$ represents the number of exclusive attributes and $M$ is the total number of attributes. This part of the loss function focuses on the proportional influence of exclusive attributes, ensuring that they have a more significant impact on the overall distance compared to common attributes.
349
+
350
+ b) Second Part $L_{p2}$ : The equation for $L_{p2}$ is:
351
+
352
+ $$
353
+ \begin{array}{l} L _ {p 2} = \sum_ {e = 1} ^ {M _ {E}} \max \left(0, e ^ {- \lambda} \frac {\left(\frac {M _ {E}}{M}\right) ^ {v}}{M _ {E}} - \frac {d _ {i , j} ^ {e}}{\hat {d} _ {i , j}}\right) \tag {16} \\ + \sum_ {c = 1} ^ {M - M _ {E}} \max \left(0, \frac {d _ {i , j} ^ {c}}{\hat {d} _ {i , j}} - e ^ {\lambda} \frac {1 - \left(\frac {M _ {E}}{M}\right) ^ {v}}{M - M _ {E}}\right) \\ \end{array}
354
+ $$
355
+
356
+ with the value of $\lambda$ determined as:
357
+
358
+ $$
359
+ \lambda = \frac {1}{2} \ln \frac {M - M _ {E} \left(\frac {M _ {E}}{M}\right) ^ {v}}{M _ {E} \left(1 - \left(\frac {M _ {E}}{M}\right) ^ {v}\right)}. \tag {17}
360
+ $$
361
+
362
+ This component sets specific thresholds for the contributions of each type of attribute, emphasizing the lower bound for exclusive attributes and the upper limit for common ones.
363
+
364
+ By using these two loss functions, $L_{p1}$ and $L_{p2}$ , our model finely tunes the balance between the collective and individual influences of attributes, enhancing both accuracy and interpretability in person ReID tasks.
365
+
366
+ # V. EXPERIMENTS
367
+
368
+ In section V-A, we outline the process of splitting our dataset for training and evaluation. We then present the implementation details and evaluation metrics in Section V-B. In Section V-C, we evaluate popular baseline methods and state-of-the-art ReID models with various modalities datasets. Finally, in Section V-D, we report the performance of our proposed explainable elevated-view attention ReID method on the AG-ReID.v2 dataset.
369
+
370
+ TABLE VI PERFORMANCE COMPARISON OF BASELINE AND STATE-OF-THE-ART PERSON REID METHODS ON DIFFERENT DATASETS: MARKET-1501 (GROUND-GROUND), UAV-HUMAN (AERIAL-AERIAL), AND AG-REID.v2 (AERIAL-GROUND). IN THIS CONTEXT, G REPRESENTS GROUND, A FOR AERIAL, C FOR CCTV, AND W FOR WEARABLE. THE AG-REID.v2 DATASET TESTS INCLUDE CROSS-DOMAIN ADAPTATIONS: $\mathbf{A}\rightarrow \mathbf{C}$ (AERIAL TO CCTV), $\mathbf{A}\rightarrow \mathbf{W}$ (AERIAL TO WEARABLE), $\mathbf{C}\rightarrow \mathbf{A}$ (CCTV TO AERIAL), AND $\mathbf{W}\rightarrow \mathbf{A}$ (WEARABLE TO AERIAL).
371
+
372
+ <table><tr><td rowspan="2">Model</td><td colspan="2">G → G</td><td colspan="2">A → A</td><td colspan="2">A → C</td><td colspan="2">A → W</td><td colspan="2">C → A</td><td colspan="2">W → A</td></tr><tr><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td></tr><tr><td>Swin [27]</td><td>79.70</td><td>92.75</td><td>67.37</td><td>68.23</td><td>57.66</td><td>68.76</td><td>56.15</td><td>68.49</td><td>57.70</td><td>68.80</td><td>53.90</td><td>64.40</td></tr><tr><td>HRNet-18 [26]</td><td>76.65</td><td>90.83</td><td>64.52</td><td>65.48</td><td>65.07</td><td>75.21</td><td>66.17</td><td>76.26</td><td>66.16</td><td>76.25</td><td>66.17</td><td>76.25</td></tr><tr><td>SwinV2 [28]</td><td>82.99</td><td>92.93</td><td>69.15</td><td>70.12</td><td>66.09</td><td>76.44</td><td>69.09</td><td>80.08</td><td>62.14</td><td>77.11</td><td>65.61</td><td>74.53</td></tr><tr><td>MGN (R50) [20]</td><td>86.90</td><td>95.70</td><td>70.40</td><td>70.38</td><td>70.17</td><td>82.09</td><td>78.66</td><td>88.14</td><td>72.41</td><td>84.21</td><td>73.73</td><td>84.06</td></tr><tr><td>BoT (R50) [19]</td><td>83.95</td><td>94.77</td><td>63.41</td><td>62.48</td><td>71.49</td><td>80.73</td><td>75.98</td><td>86.06</td><td>69.67</td><td>79.46</td><td>72.41</td><td>82.69</td></tr><tr><td>BoT (R50) + Attributes</td><td>84.92</td><td>95.37</td><td>64.11</td><td>63.28</td><td>72.19</td><td>81.43</td><td>76.68</td><td>86.66</td><td>70.37</td><td>80.15</td><td>73.11</td><td>83.29</td></tr><tr><td>SBS (R50) [36]</td><td>88.20</td><td>95.40</td><td>65.93</td><td>66.38</td><td>72.04</td><td>81.96</td><td>78.94</td><td>88.14</td><td>73.89</td><td>84.10</td><td>75.01</td><td>84.66</td></tr><tr><td>SBS (R50) + Attributes</td><td>88.90</td><td>96.00</td><td>66.63</td><td>67.38</td><td>72.74</td><td>82.56</td><td>79.64</td><td>88.74</td><td>74.59</td><td>84.80</td><td>75.71</td><td>85.26</td></tr><tr><td>V2E (ViT) - Ours</td><td>90.25</td><td>96.35</td><td>71.47</td><td>72.75</td><td>80.72</td><td>88.77</td><td>84.85</td><td>93.62</td><td>78.51</td><td>87.86</td><td>80.11</td><td>88.61</td></tr></table>
373
+
374
+ ![](images/451bbedada0c6a7abbbbfee8e7853c64f87d6ec9d88f3d314c453e8b79cbaf4b.jpg)
375
+ (a) Successful ReID Matches
376
+ Fig. 10. Examples highlighting our model's performance on AG-ReID.v2.
377
+
378
+ ![](images/875268933cdfd02fee0a3b66f5f130c06e473f84e328e9ae0647a9cf7dcaf03b.jpg)
379
+ (b) ReID Mismatches
380
+
381
+ # A. Dataset Partition
382
+
383
+ The AG-ReID.v2 dataset is evenly divided between training and testing sets, both following a 1:1 ratio. The training set has 807 unique identities with 51,530 images. In contrast, the testing set features the next 808 identities, totaling 48,972 images. For testing, the data is split into two main categories: aerial-ground and ground-aerial, highlighting the focus on aerial-ground matching. Within these categories, each identity can have between one to six images from one camera as queries. The gallery set uses images from the other camera. This division method is inspired by the Market-1501 dataset but with slight changes in the number of images per identity. Detailed information about the testing subsets is presented in Table V.
384
+
385
+ # B. Implementation and Evaluation Metrics
386
+
387
+ In our study, we employed established person re-identification (ReID) techniques, including BoT Baseline [19], StrongerBaseline (SBS) [36], MGN [20], HRNet-18 [26], Swin [27], and SwinV2 [28]. These ReID models were built
388
+
389
+ on various backbones such as ResNet-50, Vision Transformer (ViT), OSNet, HRNet-18, Swin, and SwinV2, each pre-trained on the ImageNet dataset [42]. The models using ResNet-50 and OSNet backbones were optimized with the Adam algorithm [43] at a learning rate of $10^{-4}$ . In contrast, models with HRNet-18, Swin, and SwinV2 backbones utilized the SGD optimizer at a rate of $5^{-2}$ . The ViT backbone model was trained with the SGD optimizer at a learning rate of $10^{-3}$ .
390
+
391
+ Additionally, the component $\mathcal{G}(\cdot)$ , functioning as a crucial part of our V2E network, was rigorously trained on the Market-1501, UAV-Human, and AG-ReID.v2 datasets. For these datasets, the number of attributes, denoted as $M$ , were established at 28, 38, and 88, respectively. The hyperparameters, specified in Equation 8, were set as $\alpha = 10.0$ and $\beta = 50.0$ . Additionally, the parameter $\upsilon$ in Equation 14 was determined to be 0.5, while $\kappa$ and $\mathcal{T}$ in Equation 6 were assigned values of $1 / M$ and 0.5, respectively. The chosen mini-batch size for the training was configured as $6\times 4$ , implying the inclusion of 6 IDs, each with 4 samples. In line with the StrongerBaseline (SBS) method [36], our V2E model integrates shared convolutional layers with the main ReID model. This integration facilitates effective pattern recognition, particularly for elevated camera angles, and enables comprehensive performance analysis under varying camera perspectives. By sharing convolutional layers between the ReID and V2E models, we achieved a reduction in computational demands during inference, thereby enhancing the overall efficiency and interpretability of our ReID model.
392
+
393
+ For evaluation, we employed the mean Average Precision (mAP) [17] and Cumulative Matching Characteristics (CMC-k) [44] metrics. The mAP metric averages the precision across different recall levels, considering multiple ground truth labels, while the CMC-k metric gauges the likelihood of correct identification within the top-k results, with our report focusing on rank-1 performance.
394
+
395
+ # C. Comparison with State-of-the-art Models
396
+
397
+ In this section, a comparative analysis is conducted to assess the efficacy of various Re-identification (ReID) models on three datasets, summarized in Table VI. We begin with
398
+
399
+ the Strong Baseline (BoT) model which is built upon the ResNet50 architecture, as described in [19].
400
+
401
+ In the Aerial to CCTV dataset, the BoT model achieved an mAP of $71.49\%$ and a Rank-1 accuracy of $80.73\%$ . When evaluated on the Aerial to Wearable dataset, there was an improvement with an mAP of $75.98\%$ and Rank-1 accuracy of $86.06\%$ . In the Ground to Aerial scenario, which considers queries from both CCTV and wearable platforms, the BoT model reported an mAP of $69.67\%$ and a Rank-1 accuracy of $79.46\%$ for the CCTV-Aerial pairing, and an mAP of $72.41\%$ with a Rank-1 accuracy of $82.69\%$ for the Wearable-Aerial configuration. Notably, on the Market-1501 dataset, which is solely ground-based, the BoT achieved an mAP of $83.95\%$ and a Rank-1 accuracy of $94.77\%$ . This disparity accentuates the inherent complexities of ReID involving aerial data.
402
+
403
+ Further analyses were conducted on models such as HRNet-18 and SwinV2. HRNet-18 [26] produced satisfactory results but remained behind BoT in the Aerial to CCTV and Aerial to Wearable datasets, with mAP values of $65.07\%$ and $66.17\%$ , and Rank-1 accuracies of $75.21\%$ and $76.26\%$ , respectively. SwinV2 [28] was more competitive, achieving an mAP of $66.09\%$ and a Rank-1 accuracy of $76.44\%$ in the Aerial to CCTV dataset, and an mAP of $69.09\%$ with a Rank-1 accuracy of $80.08\%$ in the Aerial to Wearable dataset. A detailed comparison with Swin [27] can offer further insights into the differential performance metrics of SwinV2.
404
+
405
+ Our proposed V2E model presented noteworthy advancements. In the Aerial to CCTV dataset, V2E recorded an mAP of $80.72\%$ and a Rank-1 accuracy of $88.77\%$ . This trend persisted in the Aerial to Wearable dataset where V2E achieved an mAP of $84.85\%$ and a Rank-1 accuracy of $93.62\%$ .
406
+
407
+ # D. Ablation Studies
408
+
409
+ To offer a more comprehensive understanding of the contribution of each component, Table VII presents the outcomes of our Explainable Elevated-View Attention (EP+EVA) technique on the AG-ReID.v2 dataset.
410
+
411
+ The synergy of the Vision Transformer (ViT) with the Explainable (EP) component manifested a marked improvement in the aerial-to-CCTV setting. The mAP surged from $77.03\%$ to $79\%$ , registering a growth of $1.97\%$ . Concurrently, the Rank-1 accuracy experienced a $2.3\%$ increment, escalating from $85.4\%$ to $87.7\%$ .
412
+
413
+ Expanding upon the role of each component:
414
+
415
+ 1) Vision Transformer (ViT) Backbone: The ViT backbone is a key component of our architecture. Our ablation study demonstrates its effectiveness, particularly in its compatibility with EP and EVA components. This integration contributes to enhanced re-identification performance in diverse scenarios.
416
+
417
+ 2) Elevated-View Attention (EVA) Mechanism: Incorporating the EVA mechanism with the ViT backbone resulted in a measurable improvement in performance metrics: an increase in mAP by $1.67\%$ , reaching $78.70\%$ and a rise in Rank-1 accuracy by $1.33\%$ , achieving $86.73\%$ . The EVA mechanism effectively addresses challenges specific to aerial imagery and refines reID
418
+
419
+ methods through optimized partition and attention layers.
420
+
421
+ 3) Explainable Stream's Attention Map: The attention map within the Explainable Stream is designed to locate distinct attributes or features. This targeted focus enhances the model's capability to provide insights and improves overall performance, distinguishing it from conventional spatial attention mechanisms.
422
+
423
+ Our results are visually represented in Figures 10 and 11, which complement our analytical findings. Figure 10 displays query images alongside their top-5 gallery matches, with correct matches bordered in red and mismatches in blue. This figure highlights both accurate and erroneous matches in the aerial-to-CCTV context (top two rows), and in the CCTV-to-aerial context (bottom rows). Common causes of misidentification, including similar clothing, postures, and camera perspectives, are evident in these examples. In contrast, Figure 11 illustrates the impact of different attributes on the accuracy of our model in both aerial-to-ground and ground-to-aerial scenarios.
424
+
425
+ # E. Discussion
426
+
427
+ Our study extends the foundational work of Chen et al. [41] by introducing a transformer-based architecture tailored for aerial-ground person re-identification. As detailed in Stream 1 IV-A, our approach surpasses the limitations of Chen et al.'s methodology [41] in processing low-resolution images, a common challenge in aerial imagery. This capability significantly enhances the model's effectiveness in aerial-ground ReID scenarios.
428
+
429
+ In terms of performance, our model, featuring the V2E network, demonstrates substantial improvements in both mAP and Rank-1 accuracy, particularly in aerial to CCTV and aerial to wearable datasets. This indicates a marked advancement in the model's ability to handle complex aerial-ground ReID tasks.
430
+
431
+ Distinct from Chen et al. [41], our model incorporates several innovations: the use of 2D Adaptive Average Pooling for improved feature extraction, Automatic Mixed Precision (AMP) Training with the GradScale method for optimized computational efficiency and precision, and the training on 88 Binary Vector Attributes derived from 15 soft attribute labels for a more detailed identity analysis. These features collectively enhance the model's robustness and accuracy, setting it apart from previous methodologies.
432
+
433
+ Finally, our comprehensive evaluations of various backbones, including ResNet, ViT, and OSNet, led to the selection of ViT as the most suitable for our dataset and application. This choice highlights the model's adaptability and effectiveness in aerial-ground ReID applications.
434
+
435
+ # VI. CONCLUSION
436
+
437
+ In this paper, we present a novel contribution to Aerial-Ground ReID research through the introduction of an expanded dataset, building upon our prior work. This dataset is unique in its integration of diverse data sources: aerial, CCTV, and, notably, wearable technologies. It comprises 1,615
438
+
439
+ TABLE VII PERFORMANCE OF THE PROPOSED EXPLAINABLE ELEVATED-VIEW ATTENTION METHOD ON AG-REID.V2 DATASET. VIT DENOTES VISION TRANSFORMER BACKBONE, EP DENOTES EXPLAINABLE PROCESSING, EVA DENOTES ELEVATED-VIEW ATTENTION. PERFORMANCE IMPROVEMENT OVER VIT ARE INDICATED WITH BOLD TEXT, WITH THE HIGHEST IMPROVEMENT HIGHLIGHTED IN RED.
440
+
441
+ <table><tr><td rowspan="2">Model</td><td colspan="2">Aerial → CCTV</td><td colspan="2">Aerial → Wearable</td><td colspan="2">CCTV → Aerial</td><td colspan="2">Wearable → Aerial</td></tr><tr><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td></tr><tr><td>ViT</td><td>77.03</td><td>85.40</td><td>80.48</td><td>89.77</td><td>75.90</td><td>84.65</td><td>76.59</td><td>84.27</td></tr><tr><td>ViT+EP</td><td>79(+1.97)</td><td>87.7(+2.3)</td><td>83.14(+2.66)</td><td>93.67(+3.9)</td><td>78.24(+2.34)</td><td>87.35(+2.7)</td><td>79.08(+2.49)</td><td>87.73(+3.46)</td></tr><tr><td>ViT+EVA</td><td>78.70(+1.67)</td><td>86.73(+1.33)</td><td>81.97(+1.49)</td><td>90.5(+0.73)</td><td>76.23(+0.33)</td><td>85.1(+0.45)</td><td>77.8(+1.21)</td><td>85.83(+1.56)</td></tr><tr><td>ViT+EVA+EP</td><td>80.72(+3.69)</td><td>88.77(+3.37)</td><td>84.85(+4.37)</td><td>93.62(+3.85)</td><td>78.51(+2.61)</td><td>87.86(+3.21)</td><td>80.11(+3.52)</td><td>88.61(+4.34)</td></tr></table>
442
+
443
+ ![](images/999eacf6dbba18a0b42afaf1606ce16007940ddac5f710578fa91e64b95e223a.jpg)
444
+ Fig. 11. Model's improved Rank-1 accuracy: aerial-to-ground (left) vs. ground-to-aerial (right), influenced by attributes.
445
+
446
+ distinct person identities represented by a total of 100,502 images. To effectively tackle the specific challenges introduced by wearable data, our approach incorporates a three-stream attribute-based ReID methodology. This methodology is distinguished by an explainable elevated-view attention mechanism, primarily focused on the head region and attribute attention maps. This strategic emphasis enables our approach to overcome traditional limitations in ReID tasks. Our methodology demonstrates marked improvements in rank accuracy and overall performance. In line with our commitment to supporting ongoing research in this field, we will make the enriched dataset and the baseline code freely available to the research community.
447
+
448
+ # ACKNOWLEDGMENTS
449
+
450
+ This study, funded by the Australian Research Council (ARC) Discovery (Project No. DP200101942) and a QUT Postgraduate Research Award, acknowledges the support of the Research Engineering Facility (REF) team at QUT for their expertise and research infrastructure, which have been instrumental in enabling this project. Ethics approval for this study has been secured, and to ensure privacy, participant facial regions were pixelated.
451
+
452
+ # REFERENCES
453
+
454
+ [1] M. Ye, J. Shen, G. Lin, T. Xiang, L. Shao, and S. C. H. Hoi, "Deep learning for person re-identification: A survey and outlook," IEEE transactions on pattern analysis and machine intelligence, vol. PP, 2021.
455
+
456
+ [2] Z. Wang, Z. Wang, Y. Wu, J. Wang, and S. Satoh, "Beyond intramodality discrepancy: A comprehensive survey of heterogeneous person re-identification," in International Joint Conference on Artificial Intelligence, 2020.
457
+ [3] L. Zheng, Y. Yang, and A. Hauptmann, "Person re-identification: Past, present and future," ArXiv, vol. abs/1610.02984, 2016.
458
+ [4] W. Liu, X. Chang, L. Chen, D. Q. Phung, X. Zhang, Y. Yang, and A. G. Hauptmann, “Pair-based uncertainty and diversity promoting early active learning for person re-identification,” ACM Transactions on Intelligent Systems and Technology (TIST), vol. 11, pp. 1–15, 2020.
459
+ [5] Z. Li, W. Liu, X. Chang, L. Yao, M. Prakash, and H. Zhang, "Domain-aware unsupervised cross-dataset person re-identification," in Advanced Data Mining and Applications, 2019.
460
+ [6] K. Nguyen, C. Fookes, S. Sridharan, Y. Tian, F. Liu, X. Liu, and A. Ross, "The state of aerial surveillance: A survey," CoRR, vol. abs/2201.03080, 2022. [Online]. Available: https://arxiv.org/abs/2201.03080
461
+ [7] T. Li, J. Liu, W. Zhang, Y. Ni, W. Wang, and Z. Li, "Uav-human: A large benchmark for human behavior understanding with unmanned aerial vehicles," IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 16261-16270, 2021.
462
+ [8] X. Yu, Y. Gong, N. Jiang, Q. Ye, and Z. Han, "Scale match for tiny person detection," IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1246-1254, 2020.
463
+ [9] F. Granelli, C. Sacchi, R. Bassoli, R. Cohen, and I. Ashkenazi, "A dynamic and flexible architecture based on uavs for border security and safety," in Advanced Technologies for Security Applications. Dordrecht: Springer Netherlands, 2020, pp. 295-306.
464
+ [10] A. Singh, D. Patil, and S. N. Omkar, "Eye in the sky: Real-time drone surveillance system (dss) for violent individuals identification using scatternet hybrid deep learning network," 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1710-17108, 2018.
465
+ [11] G. Soldi, D. Gaglione, N. Forti, A. D. Simone, F. C. Daffina, G. Bottini, D. Quattrociocchi, L. M. Millefiori, P. Braca, S. Carniel, P. K. Willett, A. Iodice, D. Riccio, and A. Farina, "Space-based global maritime surveillance. part i: Satellite technologies," IEEE Aerospace and Electronic Systems Magazine, vol. 36, pp. 8-28, 2021.
466
+ [12] F. Greenwood, “Can a police drone recognize your face?” https://slate.com/technology/2020/07/police-drone-facial-recognition.html, 2021. [Online]. Available: https://slate.com/technology/2020/07/police-drone-facial-recognition.html
467
+ [13] S. V. A. Kumar, E. Yaghoubi, A. Das, B. S. Harish, and H. Proença, "The p-destre: A fully annotated dataset for pedestrian detection, tracking, and short/long-term re-identification from aerial devices," IEEE Transactions on Information Forensics and Security, vol. 16, pp. 1696–1708, 2021.
468
+ [14] S. Zhang, Q. Zhang, X. Wei, P. Wang, B. Jiao, and Y. Zhang, "Person re-identification in aerial imagery," IEEE Transactions on Multimedia, vol. 23, pp. 281-291, 2019.
469
+ [15] A. Schumann and J. Metzler, "Person re-identification across aerial and ground-based cameras by deep feature fusion," in *Defense + Security*, 2017.
470
+ [16] H. Nguyen, K. Nguyen, S. Sridharan, and C. Fookes, “Aerial-ground person re-id,” 2023.
471
+ [17] L. Zheng, L. Shen, L. Tian, S. Wang, J. Wang, and Q. Tian, "Scalable person re-identification: A benchmark," IEEE International Conference on Computer Vision (ICCV), pp. 1116–1124, 2015.
472
+ [18] M. Gou, S. Karanam, W. Liu, O. I. Camps, and R. J. Radke, "Dukemtmc4reid: A large-scale multi-camera person re-identification dataset," IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1425-1434, 2017.
473
+
474
+ [19] H. Luo, Y. Gu, X. Liao, S. Lai, and W. Jiang, "Bag of tricks and a strong baseline for deep person re-identification," IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1487-1495, 2019.
475
+ [20] G. Wang, Y. Yuan, X. Chen, J. Li, and X. Zhou, "Learning discriminative features with multiple granularities for person re-identification," Proceedings of the 26th ACM international conference on Multimedia, 2018.
476
+ [21] F. Qi, B. Yan, L. Cao, and H. Wang, "Stronger baseline for person re-identification," *ArXiv*, vol. abs/2112.01059, 2021.
477
+ [22] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2015.
478
+ [23] M. Broström, “Real-time multi-camera multi-object tracker using yolov5 and strongsort with osnet,” https://github.com/mikel-brostrom/Yolov5_StrongSORT_OSNet, 2022.
479
+ [24] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, J. Uszkoreit, and N. Houltsy, "An image is worth 16x16 words: Transformers for image recognition at scale," in International Conference on Representation Learning (ICLR), vol. abs/2010.11929, 2021.
480
+ [25] Z. Zheng, X. Yang, Z. Yu, L. Zheng, Y. Yang, and J. Kautz, "Joint discriminative and generative learning for person re-identification," IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2133-2142, 2019.
481
+ [26] J. Wang, K. Sun, T. Cheng, B. Jiang, C. Deng, Y. Zhao, D. Liu, Y. Mu, M. Tan, X. Wang, W. Liu, and B. Xiao, "Deep high-resolution representation learning for visual recognition," IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 43, pp. 3349-3364, 2019.
482
+ [27] Z. Liu, Y. Lin, Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, and B. Guo, "Swin transformer: Hierarchical vision transformer using shifted windows," ArXiv, vol. abs/2103.14030, 2021.
483
+ [28] Z. Liu, H. Hu, Y. Lin, Z. Yao, Z. Xie, Y. Wei, J. Ning, Y. Cao, Z. Zhang, L. Dong, F. Wei, and B. Guo, "Swin transformer v2: Scaling up capacity and resolution," in International Conference on Computer Vision and Pattern Recognition (CVPR), 2022.
484
+ [29] D. Chung, K. Tahrboub, and E. J. Delp, "A two stream siamese convolutional neural network for person re-identification," IEEE International Conference on Computer Vision (ICCV), pp. 1992–2000, 2017.
485
+ [30] J. Xie, Y. Ge, J. Zhang, S. Huang, C. Feiyu, and H. Wang, “Low-resolution assisted three-stream network for person re-identification,” The Visual Computer, vol. 38, pp. 2515–2525, 2021.
486
+ [31] A. Khatun, S. Denman, S. Sridharan, and C. Fookes, “Joint identification-verification for person re-identification: A four stream deep learning approach with improved quartet loss function,” Comput. Vis. Image Underst., vol. 197-198, p. 102989, 2020.
487
+ [32] Z. Wang, F. Zhu, S. Tang, R. Zhao, L. He, and J. Song, “Feature erasing and diffusion network for occluded person re-identification,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 4754–4763.
488
+ [33] A. Khatun, S. Denman, S. Sridharan, and C. Fookes, “Pose-driven attention-guided image generation for person re-identification,” Pattern Recognition, vol. 137, p. 109246, 2023.
489
+ [34] C. Li, L. Li, H. Jiang, K. Weng, Y. Geng, L. Li, Z. Ke, Q. Li, M. Cheng, W. Nie, Y. Li, B. Zhang, Y. Liang, L. Zhou, X. Xu, X. Chu, X. Wei, and X. Wei, "Yolov6: A single-stage object detection framework for industrial applications," 2022.
490
+ [35] Y. Lin, L. Zheng, Z. Zheng, Y. Wu, and Y. Yang, “Improving person re-identification by attribute and identity learning,” ArXiv, vol. abs/1703.07220, 2019.
491
+ [36] L. He, X. Liao, W. Liu, X. Liu, P. Cheng, and T. Mei, "Fastreid: A pytorch toolbox for general instance re-identification," ArXiv, vol. abs/2006.02631, 2020.
492
+ [37] K. Zhou, Y. Yang, A. Cavallaro, and T. Xiang, "Omni-scale feature learning for person re-identification," IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3701-3711, 2019.
493
+ [38] S. He, H. Luo, P. Wang, F. Wang, H. Li, and W. Jiang, "Transreid: Transformer-based object re-identification," IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14993-15002, 2021.
494
+ [39] B. Xu, L. He, X. Liao, W. Liu, Z. Sun, and T. Mei, "Black re-id: A head-shoulder descriptor for the challenging problem of person re-identification," Proceedings of the 28th ACM International Conference on Multimedia, 2020.
495
+ [40] M. Jaderberg, K. Simonyan, A. Zisserman, and K. Kavukcuoglu, "Spatial transformer networks," in NIPS, 2015.
496
+
497
+ [41] X. Chen, X. Liu, W. Liu, X. Zhang, Y. Zhang, and T. Mei, "Explainable person re-identification with attribute-guided metric distillation," 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 11793-11802, 2021.
498
+ [42] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei, "Imagenet: A large-scale hierarchical image database," 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255, 2009.
499
+ [43] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” CoRR, vol. abs/1412.6980, 2015.
500
+ [44] X. Wang, G. Doretto, T. B. Sebastian, J. Rittscher, and P. H. Tu, "Shape and Appearance Context Modeling," 2007 IEEE 11th International Conference on Computer Vision, pp. 1-8, 2007.
501
+
502
+ ![](images/dfe9a0a5e346716ce261f8cd452443e9b58ea7063024b4973413ebb4aaa8d8ad.jpg)
503
+
504
+ Huy Nguyen received the B.Eng. (Hons) degree in Computer and Software Systems from Queensland University of Technology (QUT). Currently, he is pursuing a Ph.D. in computer vision at QUT. His research interests include computer vision, aerial surveillance and its application using deep learning models.
505
+
506
+ ![](images/e0cfee6af683b901d993aa7cc1fd1301d6d91a769250aa98126386ded491f7b7.jpg)
507
+
508
+ Kien Nguyen is a Senior Research Fellow at Queensland University of Technology. He has been conducting research in surveillance and biometrics for the last 10 years, and has published his research in high quality journals and conferences in the area. His research interests are in application of computer vision and deep learning techniques to the areas of biometrics, surveillance and scene understanding. He has been serving as an Associate Editor for IEEE Access, and Image and Vision Computing.
509
+
510
+ ![](images/bc43a8065482ef8dfda5ebd3c76e43c1674e9e7f47e3cc973f4ad004113a3743.jpg)
511
+
512
+ Sridha Sridharan obtained his MSc degree from the University of Manchester, UK and his PhD degree from University of New South Wales, Australia. He is currently a Professor at Queensland University of Technology (QUT) where he leads the research program in Signal Processing, Artificial Intelligence and Vision Technologies (SAIVT).
513
+
514
+ ![](images/d9a61d2f93169ece3fe6ec22669dce70b62ebb43c8809394999dfffbe8beca23.jpg)
515
+
516
+ Clinton Fookes is a Professor in Vision and Signal Processing at the Queensland University of Technology. He holds a BEng (Aero/Av), an MBA, and a PhD in computer vision. He actively researches across computer vision, machine learning, signal processing and pattern recognition areas. He serves on the editorial boards for the IEEE Transactions on Image Processing, Pattern Recognition, and the IEEE Transactions on Information Forensics and Security. He is a Senior Member of the IEEE, an Australian Institute of Policy and Science Young Tall
517
+
518
+ Poppy, an Australian Museum Eureka Prize winner, and a Senior Fulbright Scholar.
2401.02xxx/2401.02634/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be456a34a36733be9e1de81e802ff6581194e5fcc002ebff0f9eba423492c573
3
+ size 979164
2401.02xxx/2401.02634/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02644/584a905e-d292-4ca6-b5dd-1de3d5f6db06_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e45be0c6f3705610451fb0304540130376d9fa5bfbed16aae7c025581fc4b276
3
+ size 2293443
2401.02xxx/2401.02644/full.md ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SIMPLE HIERARCHICAL PLANNING WITH DIFFUSION
2
+
3
+ Chang Chen $^{1}$ , Fei Deng $^{1}$ , Kenji Kawaguchi $^{2}$ , Caglar Gulcehre $^{3,4*}$ , Sungjin Ahn $^{5\dagger}$
4
+
5
+ $^{1}$ Rutgers University, $^{2}$ National University of Singapore, $^{3}$ EPFL
6
+ 4 Google DeepMind, 5 KAIST
7
+
8
+ # ABSTRACT
9
+
10
+ Diffusion-based generative methods have proven effective in modeling trajectories with offline datasets. However, they often face computational challenges and can falter in generalization, especially in capturing temporal abstractions for long-horizon tasks. To overcome this, we introduce the Hierarchical Diffuser, a simple, fast, yet surprisingly effective planning method combining the advantages of hierarchical and diffusion-based planning. Our model adopts a "jumpy" planning strategy at the higher level, which allows it to have a larger receptive field but at a lower computational cost—a crucial factor for diffusion-based planning methods, as we have empirically verified. Additionally, the jumpy sub-goals guide our low-level planner, facilitating a fine-tuning stage and further improving our approach's effectiveness. We conducted empirical evaluations on standard offline reinforcement learning benchmarks, demonstrating our method's superior performance and efficiency in terms of training and planning speed compared to the non-hierarchical Diffuser as well as other hierarchical planning methods. Moreover, we explore our model's generalization capability, particularly on how our method improves generalization capabilities on compositional out-of-distribution tasks.
11
+
12
+ # 1 INTRODUCTION
13
+
14
+ Planning has been successful in control tasks where the dynamics of the environment are known (Sutton & Barto, 2018; Silver et al., 2016). Through planning, the agent can simulate numerous action sequences and assess potential outcomes without interacting with the environment, which can be costly and risky. When the environment dynamics are unknown, a world model (Ha & Schmidhuber, 2018; Hafner et al., 2018; 2019) can be learned to approximate the true dynamics. Planning then takes place within the world model by generating future predictions based on actions. This type of model-based planning is considered more data-efficient than model-free methods and tends to transfer well to other tasks in the same environment (Moerland et al., 2023; Hamrick et al., 2020).
15
+
16
+ For temporally extended tasks with sparse rewards, the planning horizon should be increased accordingly (Nachum et al., 2019; Vezhnevets et al., 2017b; Hafner et al., 2022). However, this may not be practical as it requires an exponentially larger number of samples of action sequences to cover all possible plans adequately. Gradient-based trajectory optimization addresses this issue but can encounter credit assignment problems. A promising solution is to use hierarchical planning (Singh, 1992; Pertsch et al., 2020; Sacerdoti, 1974; Knoblock, 1990), where a high-level plan selects subgoals that are several steps apart, and low-level plans determine actions to move from one subgoal to the next. Both the high-level plan and each of the low-level plans are shorter than the original flat plan, leading to more efficient sampling and gradient propagation.
17
+
18
+ Conventional model-based planning typically involves separate world models and planners. However, the learned reward model can be prone to hallucinations, making it easy for the planner to exploit it (Talvitie, 2014). Recently, Janner et al. (2022b) proposed Diffuser, a framework where a single diffusion probabilistic model Sohl-Dickstein et al. (2015); Ho et al. (2020); Song et al. (2021) is learned to serve as both the world model and the planner. It generates the states and actions in the full plan in parallel through iterative refinement, thereby achieving better global coherence. Furthermore,
19
+
20
+ it also allows leveraging the guided sampling strategy Dhariwal & Nichol (2021) to provide the flexibility of adapting to the objective of the downstream task at test time.
21
+
22
+ Despite such advantages of Diffuser, how to enable hierarchical planning in the diffusion-based approach remains elusive to benefit from both diffusion-based and hierarchical planning simultaneously. Lacking this ability, Diffuser is computationally expensive and sampling inefficient due to the current dense and flat planning scheme. Moreover, we empirically found that the planned trajectories produced by Diffuser have inadequate coverage of the dataset distribution. This deficiency is particularly detrimental to diffusion-based planning.
23
+
24
+ In this paper, we propose the Hierarchical Diffuser, a simple framework that enables hierarchical planning using diffusion models. The proposed model consists of two diffusers: one for high-level subgoal generation and another for low-level subgoal achievement. To implement this framework, we first split each training trajectory into segments of equal length and consider the segment's split points as subgoals. We then train the two diffusers simultaneously. The high-level diffuser is trained on the trajectories consisting of only subgoals, which allows for a "jumpy" subgoal planning strategy and a larger receptive field at a lower computational cost. This sparseness reduces the diffusion model's burden of learning and sampling from high-dimensional distributions of dense trajectories, making learning and sampling more efficient. The low-level diffuser is trained to model only the segments, making it the subgoal achiever and facilitating a fine-tuning stage that further improves our approach's effectiveness. At test time, the high-level diffuser plans the jumpy subgoals first, and then the low-level diffuser achieves each subgoal by planning actions.
25
+
26
+ The contributions of this work are as follows. First, we introduce a diffusion-based hierarchical planning framework for decision-making problems. Second, we demonstrate the effectiveness of our approach through superior performance compared to previous methods on standard offline-RL benchmarks, as well as efficient training and planning speed. For example, our proposed method outperforms the baseline by $12.0\%$ on Maze2D tasks and $9.2\%$ on MuJoCo locomotion tasks. Furthermore, we empirically identify a key factor influencing the performance of diffusion-based planning methods, and showcase our method's enhanced generalization capabilities on compositional out-of-distribution tasks. Lastly, we provide a theoretical analysis of the generalization performance.
27
+
28
+ # 2 PRELIMINARIES
29
+
30
+ # 2.1 DIFFUSION PROBABILISTIC MODELS
31
+
32
+ Diffusion probabilistic models (Sohl-Dickstein et al., 2015; Ho et al., 2020; Song et al., 2021) have achieved state-of-the-art generation quality on various image generation tasks (Dhariwal & Nichol, 2021; Rombach et al., 2022; Ramesh et al., 2022; Sahara et al., 2022). They model the data generative process as $M$ steps of iterative denoising, starting from a Gaussian noise $\mathbf{x}_M\sim \mathcal{N}(\mathbf{0},\mathbf{I})$ :
33
+
34
+ $$
35
+ p _ {\theta} \left(\mathbf {x} _ {0}\right) = \int p \left(\mathbf {x} _ {M}\right) \prod_ {m = 0} ^ {M - 1} p _ {\theta} \left(\mathbf {x} _ {m} \mid \mathbf {x} _ {m + 1}\right) d \mathbf {x} _ {1: M}. \tag {1}
36
+ $$
37
+
38
+ Here, $\mathbf{x}_{1:M}$ are latent variables of the same dimensionality as the data $\mathbf{x}_0$ , and
39
+
40
+ $$
41
+ p _ {\theta} \left(\mathbf {x} _ {m} \mid \mathbf {x} _ {m + 1}\right) = \mathcal {N} \left(\mathbf {x} _ {m}; \boldsymbol {\mu} _ {\theta} \left(\mathbf {x} _ {m + 1}\right), \sigma_ {m} ^ {2} \mathbf {I}\right) \tag {2}
42
+ $$
43
+
44
+ is commonly a Gaussian distribution with learnable mean and fixed covariance. The posterior of the latents is given by a predefined diffusion process that gradually adds Gaussian noise to the data:
45
+
46
+ $$
47
+ q \left(\mathbf {x} _ {m} \mid \mathbf {x} _ {0}\right) = \mathcal {N} \left(\mathbf {x} _ {m}; \sqrt {\bar {\alpha} _ {m}} \mathbf {x} _ {0}, (1 - \bar {\alpha} _ {m}) \mathbf {I}\right), \tag {3}
48
+ $$
49
+
50
+ where the predefined $\bar{\alpha}_m\to 0$ as $m\to \infty$ , making $q(\mathbf{x}_M\mid \mathbf{x}_0)\approx \mathcal{N}(\mathbf{0},\mathbf{I})$ for a sufficiently large $M$ .
51
+
52
+ In practice, the learnable mean $\mu_{\theta}(\mathbf{x}_m)$ is often parameterized as a linear combination of the latent $\mathbf{x}_m$ and the output of a noise-prediction U-Net $\epsilon_{\theta}(\mathbf{x}_m)$ (Ronneberger et al., 2015). The training objective is simply to make $\epsilon_{\theta}(\mathbf{x}_m)$ predict the noise $\epsilon$ that was used to corrupt $\mathbf{x}_0$ into $\mathbf{x}_m$ :
53
+
54
+ $$
55
+ \mathcal {L} (\theta) = \mathbb {E} _ {\mathbf {x} _ {0}, m, \epsilon} \left[ \| \epsilon - \epsilon_ {\theta} (\mathbf {x} _ {m}) \| ^ {2} \right], \tag {4}
56
+ $$
57
+
58
+ where $\mathbf{x}_m = \sqrt{\bar{\alpha}_m}\mathbf{x}_0 + \sqrt{1 - \bar{\alpha}_m}\pmb {\epsilon},\pmb {\epsilon}\sim \mathcal{N}(\mathbf{0},\mathbf{I})$
59
+
60
+ # 2.2 DIFFUSER: PLANNING WITH DIFFUSION
61
+
62
+ Diffuser (Janner et al., 2022b) is a pioneering model for learning a diffusion-based planner from offline trajectory data. It has shown superior long-horizon planning capability and test-time flexibility. The key idea is to format the trajectories of states and actions into a two-dimensional array, where each column consists of the state-action pair at a single timestep:
63
+
64
+ $$
65
+ \mathbf {x} = \left[ \begin{array}{l l l l} \mathbf {s} _ {0} & \mathbf {s} _ {1} & \dots & \mathbf {s} _ {T} \\ \mathbf {a} _ {0} & \mathbf {a} _ {1} & \dots & \mathbf {a} _ {T} \end{array} \right]. \tag {5}
66
+ $$
67
+
68
+ Diffuser then trains a diffusion probabilistic model $p_{\theta}(\mathbf{x})$ from an offline dataset. After training, $p_{\theta}(\mathbf{x})$ is able to jointly generate plausible state and action trajectories through iterative denoising. Importantly, $p_{\theta}(\mathbf{x})$ does not model the reward, and therefore is task-agnostic. To employ $p_{\theta}(\mathbf{x})$ to do planning for a specific task, Diffuser trains a separate guidance function $\mathcal{I}_{\phi}(\mathbf{x})$ , and samples the planned trajectories from a perturbed distribution:
69
+
70
+ $$
71
+ \tilde {p} _ {\theta} (\mathbf {x}) \propto p _ {\theta} (\mathbf {x}) \exp \left(\mathcal {J} _ {\phi} (\mathbf {x})\right). \tag {6}
72
+ $$
73
+
74
+ Typically, $\mathcal{I}_{\phi}(\mathbf{x})$ estimates the expected return of the trajectory, so that the planned trajectories will be biased toward those that are plausible and also have high returns. In practice, $\mathcal{I}_{\phi}(\mathbf{x})$ is implemented as a regression network trained to predict the return $R(\mathbf{x})$ of the original trajectory $\mathbf{x}$ given a noise-corrupted trajectory $\mathbf{x}_m$ as input:
75
+
76
+ $$
77
+ \mathcal {L} (\phi) = \mathbb {E} _ {\mathbf {x}, m, \epsilon} \left[ \| R (\mathbf {x}) - \mathcal {J} _ {\phi} (\mathbf {x} _ {m}) \| ^ {2} \right], \tag {7}
78
+ $$
79
+
80
+ where $R(\mathbf{x})$ can be obtained from the offline dataset, $\mathbf{x}_m = \sqrt{\bar{\alpha}_m}\mathbf{x} + \sqrt{1 - \bar{\alpha}_m}\pmb {\epsilon},\pmb {\epsilon}\sim \mathcal{N}(\mathbf{0},\mathbf{I})$
81
+
82
+ Sampling from $\tilde{p}_{\theta}(\mathbf{x})$ is achieved similarly as classifier guidance (Dhariwal & Nichol, 2021; Sohl-Dickstein et al., 2015), where the gradient $\nabla_{\mathbf{x}_m}\mathcal{I}_{\phi}$ is used to guide the denoising process (Equation 2) by modifying the mean from $\mu_{m}$ to $\tilde{\mu}_{m}$ :
83
+
84
+ $$
85
+ \boldsymbol {\mu} _ {m} \leftarrow \boldsymbol {\mu} _ {\theta} (\mathbf {x} _ {m + 1}), \quad \tilde {\boldsymbol {\mu}} _ {m} \leftarrow \boldsymbol {\mu} _ {m} + \omega \sigma_ {m} ^ {2} \nabla_ {\mathbf {x} _ {m}} \mathcal {I} _ {\phi} (\mathbf {x} _ {m}) | _ {\mathbf {x} _ {m} = \boldsymbol {\mu} _ {m}}. \tag {8}
86
+ $$
87
+
88
+ Here, $\omega$ is a hyperparameter that controls the scaling of the gradient. To ensure that the planning trajectory starts from the current state $\mathbf{s}$ , Diffuser sets $\mathbf{s}_0 = \mathbf{s}$ in each $\mathbf{x}_m$ during the denoising process. After sampling a full trajectory, Diffuser executes the first action in the environment, and replans at the next state $\mathbf{s}'$ . In simple environments where replanning is unnecessary, the planned action sequence can be directly executed.
89
+
90
+ # 3 HIERARCHICAL DIFFUSER
91
+
92
+ While Diffuser has demonstrated competence in long-horizon planning and test-time flexibility, we have empirically observed that its planned trajectories inadequately cover the dataset distribution, potentially missing high-return trajectories. Besides, the dense and flat planning scheme of the standard Diffuser is computationally expensive, especially when the planning horizon is long. Our key observation is that hierarchical planning could be an effective way to address these issues. To achieve this, we propose Hierarchical Diffuser, a simple yet effective framework that enables hierarchical planning while maintaining the benefits of diffusion-based planning. As shown in Figure 1, it consists of two Diffusers: one for high-level subgoal generation (Section 3.1) and the other for low-level subgoal achievement (Section 3.2).
93
+
94
+ # 3.1 SPARSE DIFFUSER FOR SUBGOAL GENERATION
95
+
96
+ To perform hierarchical planning, the high-level planner needs to generate a sequence of intermediate states $(\mathbf{g}_1,\dots ,\mathbf{g}_H)$ that serve as subgoals for the low-level planner to achieve. Here, $H$ denotes the planning horizon. Instead of involving complicated procedures for finding high-quality subgoals (Li et al., 2023) or skills (Rajeswar et al., 2023; Laskin et al., 2021), we opt for a simple approach that repurposes Diffuser for subgoal generation with minimal modification. In essence, we define the subgoals to be every $K$ -th states and model the distribution of subsampled trajectories:
97
+
98
+ $$
99
+ \mathbf {x} ^ {\mathrm {S D}} = \left[ \begin{array}{l l l l} \mathbf {s} _ {0} & \mathbf {s} _ {K} & \dots & \mathbf {s} _ {H K} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \end{array} \right] =: \left[ \begin{array}{l l l l} \mathbf {g} _ {0} & \mathbf {g} _ {1} & \dots & \mathbf {g} _ {H} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \end{array} \right]. \tag {9}
100
+ $$
101
+
102
+ ![](images/e84f81a0b580f8cc578a2717b883e9adaa86e28e80b046f9600d70d5681a64f1.jpg)
103
+ Figure 1: Test and train-time differences between Diffuser models. Hierarchical Diffuser (HD) is a general hierarchical diffusion-based planning framework. Unlike the Diffuser's training process (A, left), the HD's training phase reorganizes the training trajectory into two components: a sub-goal trajectory and dense segments. These components are then utilized to train the high-level and low-level denoising networks in parallel (B, left). During the testing phase, in contrast to Diffuser (A, right), HD initially generates a high-level plan consisted of sub-goals, which is subsequently refined through the low-level planner (B, right).
104
+
105
+ We name the resulting model Sparse Diffuser (SD). While using every $K$ -th states as subgoals is a simplifying assumption, it is widely adopted in hierarchical RL due to its practical effectiveness (Zhang et al., 2023; Hafner et al., 2022; Li et al., 2022; Mandlekar et al., 2020; Vezhnevets et al., 2017a). We will empirically show that, despite this simplicity, our approach is effective and efficient in practice, substantially outperforming HDMI (Li et al., 2023), a state-of-the-art method that adaptively selects subgoals.
106
+
107
+ The training procedure of Sparse Diffuser is almost the same as Diffuser. The only difference is that we need to provide the subsampled data $\mathbf{x}^{\mathrm{SD}}$ to the diffusion probabilistic model $p_{\theta_{\mathrm{SD}}}(\mathbf{x}^{\mathrm{SD}})$ and the guidance function $\mathcal{I}_{\phi_{\mathrm{SD}}}(\mathbf{x}^{\mathrm{SD}})$ . It is important to note that, although the guidance function uses the subsampled data as input, it is still trained to predict the return of the full trajectory. Therefore, its gradient $\nabla_{\mathbf{x}^{\mathrm{SD}}} \mathcal{I}_{\phi_{\mathrm{SD}}}$ will direct toward a subgoal sequence that is part of high-return trajectories. However, due to the missing states and actions, the return prediction may become less accurate than Diffuser. In all of our experiments, we found that even if this is the case, it does not adversely affect task performance when compared to Diffuser. Moreover, our investigation suggests that including dense actions in $\mathbf{x}^{\mathrm{SD}}$ can improve return prediction and, in some environments, further improve task performance. We provide a detailed description in Section Section 3.3 and an ablation study in Section 4.3.
108
+
109
+ It is worth noting that Sparse Diffuser can itself serve as a standalone planner, without the need to involve any low-level planner. This is because Sparse Diffuser can generate the first action $\mathbf{a}_0$ of the plan, which is sufficient if we replan at each step. Interestingly, Sparse Diffuser already greatly outperforms Diffuser, mainly due to its increased receptive field (Section 4.3). While the receptive field of Diffuser can also be increased, this comes with hurting generalization performance and efficiency due to the increased model size (Appendix E and F).
110
+
111
+ # 3.2 FROM SPARSE DIFFUSER TO HIERARCHICAL DIFFUSER
112
+
113
+ While Sparse Diffuser can be used as a standalone planner, it only models the environment dynamics at a coarse level. This is beneficial for generating a high-level plan of subgoals, but it is likely that some low-level details are not taken into consideration. Therefore, we use a low-level planner to further refine the high-level plan, carving out the optimal dense trajectories that go from one subgoal to the next. This also allows us to avoid per-step replanning when it is not necessary. We call this two-level model Hierarchical Diffuser (HD).
114
+
115
+ Low-level Planner. The low-level planner is simply implemented as a Diffuser $p_{\theta}(\mathbf{x}^{(i)})$ trained on trajectory segments $\mathbf{x}^{(i)}$ between each pair of adjacent subgoals $\mathbf{g}_i = \mathbf{s}_{iK}$ and $\mathbf{g}_{i + 1} = \mathbf{s}_{(i + 1)K}$ :
116
+
117
+ $$
118
+ \mathbf {x} ^ {(i)} = \left[ \begin{array}{l l l l} \mathbf {s} _ {i K} & \mathbf {s} _ {i K + 1} & \dots & \mathbf {s} _ {(i + 1) K} \\ \mathbf {a} _ {i K} & \mathbf {a} _ {i K + 1} & \dots & \mathbf {a} _ {(i + 1) K} \end{array} \right], \quad 0 \leq i < H. \tag {10}
119
+ $$
120
+
121
+ We also train a low-level guidance function $\mathcal{I}_{\phi}(\mathbf{x}^{(i)})$ that predicts the return $R(\mathbf{x}^{(i)})$ for each segment. The low-level Diffuser and guidance function are both shared across all trajectory segments, and they can be trained in parallel with the high-level planner.
122
+
123
+ Hierarchical Planning. After training the high-level and low-level planners, we use them to perform hierarchical planning as follows. Given a starting state $\mathbf{g}_0$ , we first use the high-level planner to generate subgoals $\mathbf{g}_{1:H}$ . This can be achieved by sampling from the perturbed distribution $\tilde{p}_{\theta_{\mathrm{SD}}}(\mathbf{x}^{\mathrm{SD}}) \propto p_{\theta_{\mathrm{SD}}}(\mathbf{x}^{\mathrm{SD}}) \exp(\mathcal{I}_{\phi_{\mathrm{SD}}}(\mathbf{x}^{\mathrm{SD}}))$ , and then discarding the actions. Since the actions generated by the high-level planner are not used anywhere, in practice we remove the actions from subsampled trajectories $\mathbf{x}^{\mathrm{SD}}$ when training the high-level planner. In other words, we redefine
124
+
125
+ $$
126
+ \mathbf {x} ^ {\mathrm {S D}} = \left[ \begin{array}{l l l l} \mathbf {s} _ {0} & \mathbf {s} _ {K} & \dots & \mathbf {s} _ {H K} \end{array} \right] =: \left[ \begin{array}{l l l l} \mathbf {g} _ {0} & \mathbf {g} _ {1} & \dots & \mathbf {g} _ {H} \end{array} \right]. \tag {11}
127
+ $$
128
+
129
+ Next, for each pair of adjacent subgoals $\mathbf{g}_i$ and $\mathbf{g}_{i + 1}$ , we use the low-level planner to generate a dense trajectory that connects them, by sampling from the distribution $\tilde{p}_{\theta}(\mathbf{x}^{(i)})\propto p_{\theta}(\mathbf{x}^{(i)})\exp (\mathcal{I}_{\phi}(\mathbf{x}^{(i)}))$ . To ensure that the generated $\mathbf{x}^{(i)}$ indeed has $\mathbf{g}_i$ and $\mathbf{g}_{i + 1}$ as its endpoints, we set $\mathbf{s}_{iK} = \mathbf{g}_i$ and $\mathbf{s}_{(i + 1)K} = \mathbf{g}_{i + 1}$ in each denoising step during sampling. Importantly, all low-level plans $\{\mathbf{x}^{(i)}\}_{i = 0}^{H - 1}$ can be generated in parallel. In environments that require per-step replanning, we only need to sample $\mathbf{x}^{(0)}\sim \tilde{p}_{\theta}(\mathbf{x}^{(0)})$ , then execute the first action $\mathbf{a}_0$ in the environment, and replan at the next state. We highlight the interaction between the high-level and low-level planners in Appendix B.
130
+
131
+ # 3.3 IMPROVING RETURN PREDICTION WITH DENSE ACTIONS
132
+
133
+ Sparse Diffuser with Dense Actions (SD-DA). The missing states and actions in the subsampled trajectories $\mathbf{x}^{\mathrm{SD}}$ might pose difficulties in accurately predicting returns in certain cases. Therefore, we investigate a potential model improvement that subsamples trajectories with sparse states and dense actions. The hypothesis is that the dense actions can implicitly provide information about what has occurred in the intermediate states, thereby facilitating return prediction. Meanwhile, the sparse states preserve the model's ability to generate subgoals. We format the sparse states and dense actions into the following two-dimensional array structure:
134
+
135
+ $$
136
+ \mathbf {x} ^ {\mathrm {S D - D A}} = \left[ \begin{array}{c c c c} \mathbf {s} _ {0} & \mathbf {s} _ {K} & \dots & \mathbf {s} _ {H K} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \\ \mathbf {a} _ {1} & \mathbf {a} _ {K + 1} & \dots & \mathbf {a} _ {H K + 1} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf {a} _ {K - 1} & \mathbf {a} _ {2 K - 1} & \dots & \mathbf {a} _ {(H + 1) K - 1} \end{array} \right] =: \left[ \begin{array}{c c c c} \mathbf {g} _ {0} & \mathbf {g} _ {1} & \dots & \mathbf {g} _ {H} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \\ \mathbf {a} _ {1} & \mathbf {a} _ {K + 1} & \dots & \mathbf {a} _ {H K + 1} \\ \vdots & \vdots & \ddots & \vdots \\ \boldsymbol {\mathbf {a}} _ {K - 1} & \boldsymbol {\mathbf {a}} _ {2 K - 1} & \dots & \boldsymbol {\mathbf {a}} _ {(H + 1) K - 1} \end{array} \right],
137
+ $$
138
+
139
+ where $\mathbf{a}_{\geq HK}$ in the last column are included for padding. Training proceeds similarly as Sparse Diffuser, where we train a diffusion model $p_{\theta_{\mathrm{SD - DA}}}\left(\mathbf{x}^{\mathrm{SD - DA}}\right)$ to capture the distribution of $\mathbf{x}^{\mathrm{SD - DA}}$ in the offline dataset and a guidance function $\mathcal{J}_{\phi_{\mathrm{SD - DA}}}\left(\mathbf{x}^{\mathrm{SD - DA}}\right)$ to predict the return of the full trajectory.
140
+
141
+ Hierarchical Diffuser with Dense Actions (HD-DA). This is obtained by replacing the high-level planner in Hierarchical Diffuser with SD-DA. The subgoals are generated by sampling from $\tilde{p}_{\theta_{\mathrm{SD - DA}}}\big(\mathbf{x}^{\mathrm{SD - DA}}\big)\propto p_{\theta_{\mathrm{SD - DA}}}\big(\mathbf{x}^{\mathrm{SD - DA}}\big)\exp \big(\mathcal{J}_{\phi_{\mathrm{SD - DA}}}\big(\mathbf{x}^{\mathrm{SD - DA}}\big)\big)$ , and then discarding the actions.
142
+
143
+ # 3.4 THEORETIC ANALYSIS
144
+
145
+ Theorem 1 in Appendix F demonstrates that the proposed method can improve the generalization capability of the baseline. Moreover, our analysis also sheds light on the tradeoffs in the value of $K$ and the kernel size. With a larger value of $K$ , it is expected to have a better generalization gap for the diffusion process but a more loss of state-action details to perform RL tasks. With a larger kernel size, we expect a worse generalization gap for the diffusion process but a better receptive field to perform RL tasks. See Appendix F for more details.
146
+
147
+ Table 1: Long-horizon Planning. HD combines the benefits of both hierarchical and diffusion-based planning, achieving the best performance across all tasks. HD results are averaged over 100 planning seeds.
148
+
149
+ <table><tr><td rowspan="2" colspan="2">Environment</td><td colspan="3">Flat Learning Methods</td><td colspan="4">Hierarchical Learning Methods</td></tr><tr><td>MPPI</td><td>IQL</td><td>Diffuser</td><td>IRIS</td><td>HiGoC</td><td>HDMI</td><td>HD (Ours)</td></tr><tr><td>Maze2D</td><td>U-Maze</td><td>33.2</td><td>47.4</td><td>113.9±3.1</td><td>-</td><td>-</td><td>120.1±2.5</td><td>128.4±3.6</td></tr><tr><td>Maze2D</td><td>Medium</td><td>10.2</td><td>34.9</td><td>121.5±2.7</td><td>-</td><td>-</td><td>121.8±1.6</td><td>135.6±3.0</td></tr><tr><td>Maze2D</td><td>Large</td><td>5.1</td><td>58.6</td><td>123.0±6.4</td><td>-</td><td>-</td><td>128.6±2.9</td><td>155.8±2.5</td></tr><tr><td colspan="2">Single-task Average</td><td>16.2</td><td>47.0</td><td>119.5</td><td>-</td><td>-</td><td>123.5</td><td>139.9</td></tr><tr><td>Multi2D</td><td>U-Maze</td><td>41.2</td><td>24.8</td><td>128.9±1.8</td><td>-</td><td>-</td><td>131.3±1.8</td><td>144.1±1.2</td></tr><tr><td>Multi2D</td><td>Medium</td><td>15.4</td><td>12.1</td><td>127.2±3.4</td><td>-</td><td>-</td><td>131.6±1.9</td><td>140.2±1.6</td></tr><tr><td>Multi2D</td><td>Large</td><td>8.0</td><td>13.9</td><td>132.1±5.8</td><td>-</td><td>-</td><td>135.4±2.5</td><td>165.5±0.6</td></tr><tr><td colspan="2">Multi-task Average</td><td>21.5</td><td>16.9</td><td>129.4</td><td>-</td><td>-</td><td>132.8</td><td>149.9</td></tr><tr><td>AntMaze</td><td>U-Maze</td><td>-</td><td>62.2</td><td>76.0±7.6</td><td>89.4±2.4</td><td>91.2±1.9</td><td>-</td><td>94.0±4.9</td></tr><tr><td>AntMaze</td><td>Medium</td><td>-</td><td>70.0</td><td>31.9±5.1</td><td>64.8±2.6</td><td>79.3±2.5</td><td>-</td><td>88.7±8.1</td></tr><tr><td>AntMaze</td><td>Large</td><td>-</td><td>47.5</td><td>0.0±0.0</td><td>43.7±1.3</td><td>67.3±3.1</td><td>-</td><td>83.6±5.8</td></tr><tr><td colspan="2">AntMaze Average</td><td>-</td><td>59.9</td><td>36.0</td><td>66.0</td><td>79.3</td><td>-</td><td>88.8</td></tr></table>
150
+
151
+ # 4 EXPERIMENTS
152
+
153
+ In our experiment section, we illustrate how and why the Hierarchical Diffuser (HD) improves Diffuser through hierarchical planning. We start with our main results on the D4RL (Fu et al., 2020) benchmark. Subsequent sections provide an in-depth analysis, highlighting the benefits of a larger receptive field (RF) for diffusion-based planners for offline RL tasks. However, our compositional out-of-distribution (OOD) task reveals that, unlike HD, Diffuser struggles to augment its RF without compromising the generalization ability. Lastly, we report HD's efficiency in accelerating both the training time and planning time compared with Diffuser. The performance of HD across different $K$ values is detailed in the Appendix C. For the sake of reproducibility, we provide implementation and hyper-parameter details in Appendix A and we will release our code upon acceptance.
154
+
155
+ # 4.1 LONG-HORIZON PLANNING
156
+
157
+ We first highlight the advantage of hierarchical planning on long-horizon tasks. Specifically, we evaluate on Maze2D and AntMaze (Fu et al., 2020), two sparse-reward navigation tasks that can take hundreds of steps to accomplish. The agent will receive a reward of 1 when it reaches a fixed goal, and no reward elsewhere, making it challenging for even the best model-free algorithms (Janner et al., 2022b). The AntMaze adds to the challenge by having higher-dimensional state and action space. Following Diffuser (Janner et al., 2022b), we also evaluate multi-task flexibility on Multi2D, a variant of Maze2D that randomizes the goal for each episode.
158
+
159
+ Results. As shown in Table 1, Hierarchical Diffuser (HD) significantly outperforms previous state of the art across all tasks. The flat learning methods MPPI (Williams et al., 2016), IQL (Kostrikov et al., 2022), and Diffuser generally lag behind hierarchical learning methods, demonstrating the advantage of hierarchical planning. In addition, the failure of Diffuser in AntMaze-Large indicates that Diffuser struggles to simultaneously handle long-horizon planning and high-dimensional state and action space. Within hierarchical methods, HD outperforms the non-diffusion-based IRIS (Mandlekar et al., 2020) and HiGoC (Li et al., 2022), showing the benefit of planning with diffusion in the hierarchical setting. Compared with the diffusion-based HDMI (Li et al., 2023) that uses complex subgoal extraction procedures and more advanced model architectures, HD achieves $>20\%$ performance gain on Maze2D-Large and Multi2D-Large despite its simplicity.
160
+
161
+ # 4.2 OFFLINE REINFORCEMENT LEARNING
162
+
163
+ We further demonstrate that hierarchical planning generally improves offline reinforcement learning even with dense rewards and short horizons. We evaluate on Gym-MuJoCo and FrankaKitchen (Fu et al., 2020), which emphasize the ability to learn from data of varying quality and to generalize to unseen states, respectively. We use HD-DA as it outperforms HD in the dense reward setting. In addition to Diffuser and HDMI, we compare to leading methods in each task domain, including model-free BCQ (Fujimoto et al., 2019), BEAR (Kumar et al., 2019), CQL (Kumar et al., 2020), IQL (Kostrikov et al., 2022), Decision Transformer (DT; Chen et al., 2021), model-based MoReL (Ki
164
+
165
+ Table 2: Offline Reinforcement Learning. HD-DA achieves the best overall performance. Results are averaged over 5 planning seeds. Following Kostrikov et al. (2022), we emphasize in bold scores within $5\%$ of maximum.
166
+
167
+ <table><tr><td colspan="2">Gym Tasks</td><td>BC</td><td>CQL</td><td>IQL</td><td>DT</td><td>TT</td><td>MOReL</td><td>Diffuser</td><td>HDMI</td><td>HD-DA (Ours)</td></tr><tr><td>Med-Expert</td><td>HalfCheetah</td><td>55.2</td><td>91.6</td><td>86.7</td><td>86.8</td><td>95.0</td><td>53.3</td><td>88.9±0.3</td><td>92.1±1.4</td><td>92.5±0.3</td></tr><tr><td>Med-Expert</td><td>Hopper</td><td>52.5</td><td>105.4</td><td>91.5</td><td>107.6</td><td>110.0</td><td>108.7</td><td>103.3±1.3</td><td>113.5±0.9</td><td>115.3±1.1</td></tr><tr><td>Med-Expert</td><td>Walker2d</td><td>107.5</td><td>108.8</td><td>109.6</td><td>108.1</td><td>101.9</td><td>95.6</td><td>106.9±0.2</td><td>107.9±1.2</td><td>107.1±0.1</td></tr><tr><td>Medium</td><td>HalfCheetah</td><td>42.6</td><td>44.0</td><td>47.4</td><td>42.6</td><td>46.9</td><td>42.1</td><td>42.8±0.3</td><td>48.0±0.9</td><td>46.7±0.2</td></tr><tr><td>Medium</td><td>Hopper</td><td>52.9</td><td>58.5</td><td>66.3</td><td>67.6</td><td>61.1</td><td>95.4</td><td>74.3±1.4</td><td>76.4±2.6</td><td>99.3±0.3</td></tr><tr><td>Medium</td><td>Walker2d</td><td>75.3</td><td>72.5</td><td>78.3</td><td>74.0</td><td>79.0</td><td>77.8</td><td>79.6±0.6</td><td>79.9±1.8</td><td>84.0±0.6</td></tr><tr><td>Med-Replay</td><td>HalfCheetah</td><td>36.6</td><td>45.5</td><td>44.2</td><td>36.6</td><td>41.9</td><td>40.2</td><td>37.7±0.5</td><td>44.9±2.0</td><td>38.1±0.7</td></tr><tr><td>Med-Replay</td><td>Hopper</td><td>18.1</td><td>95.0</td><td>94.7</td><td>82.7</td><td>91.5</td><td>93.6</td><td>93.6±0.4</td><td>99.6±1.5</td><td>94.7±0.7</td></tr><tr><td>Med-Replay</td><td>Walker2d</td><td>26.0</td><td>77.2</td><td>73.9</td><td>66.6</td><td>82.6</td><td>49.8</td><td>70.6±1.6</td><td>80.7±2.1</td><td>84.1±2.2</td></tr><tr><td colspan="2">Average</td><td>51.9</td><td>77.6</td><td>77.0</td><td>74.7</td><td>78.9</td><td>72.9</td><td>77.5</td><td>82.6</td><td>84.6</td></tr><tr><td colspan="2">Kitchen Tasks</td><td>BC</td><td>BCQ</td><td>BEAR</td><td>CQL</td><td>IQL</td><td>RvS-G</td><td>Diffuser</td><td>HDMI</td><td>HD-DA (Ours)</td></tr><tr><td>Partial</td><td>FrankaKitchen</td><td>33.8</td><td>18.9</td><td>13.1</td><td>49.8</td><td>46.3</td><td>46.5</td><td>56.2±5.4</td><td>-</td><td>73.3±1.4</td></tr><tr><td>Mixed</td><td>FrankaKitchen</td><td>47.5</td><td>8.1</td><td>47.2</td><td>51.0</td><td>51.0</td><td>40.0</td><td>50.0±8.8</td><td>69.2±1.8</td><td>71.7±2.7</td></tr><tr><td colspan="2">Average</td><td>40.7</td><td>13.5</td><td>30.2</td><td>50.4</td><td>48.7</td><td>43.3</td><td>53.1</td><td>-</td><td>72.5</td></tr></table>
168
+
169
+ Table 3: Ablation on Model Variants. SD yields an improvement over Diffuser, and the incorporation of low-level refinement in HD provides further enhancement in performance compared to SD.
170
+
171
+ <table><tr><td>Dataset</td><td>Diffuser</td><td>SD</td><td>HD</td></tr><tr><td>Gym-MuJoCo</td><td>77.5</td><td>80.7</td><td>81.7</td></tr><tr><td>Maze2D</td><td>119.5</td><td>133.4</td><td>139.9</td></tr><tr><td>Multi2D</td><td>129.4</td><td>145.8</td><td>149.9</td></tr></table>
172
+
173
+ Table 4: Guidance Function Learning. The included dense action helps learn guidance function, resulting in better RL performance.
174
+
175
+ <table><tr><td rowspan="2">Dataset</td><td colspan="2">\( \mathcal{I}_{\phi} \)</td><td colspan="2">RL Performance</td></tr><tr><td>HD</td><td>HD-DA</td><td>HD</td><td>HD-DA</td></tr><tr><td>Hopper</td><td>101.7</td><td>88.8</td><td>93.4±3.1</td><td>94.7±0.7</td></tr><tr><td>Walker2d</td><td>166.1</td><td>133.0</td><td>77.2±3.3</td><td>84.1±2.2</td></tr><tr><td>HalfCheetah</td><td>228.5</td><td>208.2</td><td>37.5±1.7</td><td>38.1±0.7</td></tr></table>
176
+
177
+ dambi et al., 2020), Trajectory Transformer (TT; Janner et al., 2021), and Reinforcement Learning via Supervised Learning (RvS; Emmons et al., 2022).
178
+
179
+ Results. As shown in Table 2, HD-DA achieves the best average performance, significantly outperforming Diffuser while also surpassing the more complex HDMI. Notably, HD-DA obtains $>35\%$ improvement on FrankaKitchen over Diffuser, demonstrating its superior generalization ability.
180
+
181
+ # 4.3 ANALYSIS
182
+
183
+ To obtain a deeper understanding on HD improvements over Diffuser, we start our analysis with ablation studies on various model configurations. Insights from this analysis guide us to investigate the impact of effective receptive field on RL performance, specifically for diffusion-based planners. Furthermore, we introduce a compositional out-of-distribution (OOD) task to demonstrate HD's compositional generalization capabilities. We also evaluate HD's performance on varied jumpy step $K$ values to test its robustness and adaptability.
184
+
185
+ SD already outperforms Diffuser. HD further improves SD via low-level refinement. This can be seen from Table 3, where we report the performance of Diffuser, SD, and HD averaged over Maze2D, Multi2D, and Gym-MuJoCo tasks respectively. As mentioned in Section 3.1, here we use SD as a standalone planner. In the following, we investigate potential reasons why SD outperforms Diffuser.
186
+
187
+ Large kernel size improves diffusion-based planning for in-distribution tasks. A key difference between SD and Diffuser is that the subsampling in SD increases its effective receptive field. This leads us to hypothesize that a larger receptive field may be beneficial for modeling the data distribution, resulting in better performance. To test this hypothesis, we experiment with different kernel sizes of Diffuser, and report the averaged performance on Maze2D, Multi2D, and Gym-MuJoCo in Figure 2. We find that Diffuser's performance generally improves as the kernel size increases up to a certain threshold. (Critical drawbacks associated with increasing Diffuser's kernel sizes will be discussed in detail in the subsequent section.) Its best performance is comparable to SD, but remains inferior to HD. In Figure 3, we further provide a qualitative comparison of the model's coverage of the data distribution. We plot the actual executed trajectories when the agent follows the model-generated plans. Our results show that HD is able to generate plans that cover all distinct paths between the
188
+
189
+ start and goal state, exhibiting a distribution closely aligned with the dataset. Diffuser has a much worse coverage of the data distribution, but can be improved with a large kernel size.
190
+
191
+ ![](images/24c9f7d9b0fc4d89f8183854a732243929d9b600f94c1728375a86bbec83aacc.jpg)
192
+ Figure 2: Impact of Kernel Size. Results of the impact of kernel size on performance of Diffuser in offline RL indicates that reasonably enlarging kernel size can improve the performance.
193
+
194
+ ![](images/5c6d1775621bf6f9173787d91b42e804c5541acfe577be21dd598d1b9be2a820.jpg)
195
+
196
+ ![](images/db70aec2f04e546683c191f266f72643f311076d5a95b1932a63bf3b87c8e14a.jpg)
197
+
198
+ ![](images/92634af17b0297ae767fd1492a1c3ec71aa4386afce4ddb8d7fa686e8a67bcd8.jpg)
199
+ Figure 3: Coverage of Data Distribution. Empirically, we observed that Diffuser exhibits insufficient coverage of the dataset distribution. We illustrate this with an example featuring three distinct paths traversing from the start to the goal state. While Diffuser struggles to capture these divergent paths, both our method and Diffuser with an increased receptive field successfully recover this distribution.
200
+
201
+ Large kernel size hurts out-of-distribution generalization. While increasing the kernel size appears to be a simple way to improve Diffuser, it has many drawbacks such as higher memory consumption and slower training and planning. Most importantly, it introduces more model parameters, which can adversely affect the model's generalization capability. We demonstrate this in a task that requires the model to produce novel plans between unseen pairs of start and goal states at test time, by stitching together segments of training trajectories. We report the task success rate in Table 5, as well as the discrepancy between generated plans and optimal trajectories measured with cosine similarity and mean squared error (MSE). HD succeeds in all tasks, generating plans that are closest to the optimal trajectories, while Diffuser variants fail this task completely. Details can be found in Appendix E.
202
+
203
+ Table 5: Out-Of-Distribution (OOD) Task Performance. Only Hierarchical Diffuser (HD) can solve the compositional OOD task and generate plans that are most close to the optimal.
204
+
205
+ <table><tr><td>Metrics</td><td>Diffuser-KS5</td><td>Diffuser-KS13</td><td>Diffuser-KS19</td><td>Diffuser-KS25</td><td>HD</td></tr><tr><td>Successful Rate</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>100.0%</td></tr><tr><td>Cosine Similarity</td><td>0.85</td><td>0.89</td><td>0.93</td><td>0.93</td><td>0.98</td></tr><tr><td>Deviation (MSE)</td><td>1269.9</td><td>1311.1</td><td>758.5</td><td>1023.2</td><td>198.2</td></tr></table>
206
+
207
+ Effect of Dense Actions. Though the dense actions generated from high-level planer are discarded in the low-level refinement phase, we empirically find that including dense actions facilitates the learning of the guidance function. As shown in Table 4, validation loss of guidance fuction learned from HD-DA is lower than that of SD-SA, leading to better RL performance. We conduct the experiment on the Medium-Replay dataset where learning the value function is hard due to the mixed policies.
208
+
209
+ Efficiency Gains with Hierarchical Diffuser. A potential concern when introducing an additional round of sampling might be the increase in planning time. However, the high-level plan, being $K$ times shorter, and the parallel generation of low-level segments counteract this concern. In Table 6, we observed a $10 \times$ speed up over Diffuser in medium and large maze settings with horizons beyond 250 time steps. Details of the time measurement are in Appendix D.
210
+
211
+ Table 6: Wall-clock Time Comparison. Hierarchical Diffuser (HD) is more computationally efficient compared to Diffuser during both training and testing stages.
212
+
213
+ <table><tr><td rowspan="2">Environment</td><td colspan="4">Training [s]</td><td colspan="4">Planning [s]</td></tr><tr><td>U-Maze</td><td>Med-Maze</td><td>L-Maze</td><td>MuJoCo</td><td>U-Maze</td><td>Med-Maze</td><td>L-Maze</td><td>MuJoCo</td></tr><tr><td>HD</td><td>8.0</td><td>8.7</td><td>8.6</td><td>9.9</td><td>0.8</td><td>3.1</td><td>3.3</td><td>1.0</td></tr><tr><td>Diffuser</td><td>26.6</td><td>132.7</td><td>119.7</td><td>12.3</td><td>1.1</td><td>9.9</td><td>9.9</td><td>1.3</td></tr></table>
214
+
215
+ # 5 RELATED WORKS
216
+
217
+ Diffusion Models. Diffusion models have recently emerged as a new type of generative model that supports generating samples, computing likelihood, and flexible-model complexity control. In diffusion models, the generation process is formulated as an iterative denoising process Sohl-Dickstein et al. (2015); Ho et al. (2020). The diffusion process can also be guided to a desired direction such as to a specific class by using either classifier-based guidance Nichol et al. (2021) or classifier-free guidance Ho & Salimans (2022). Recently, diffusion models have been adopted for agent learning. Janner et al. (2022b) have adopted it first and proposed the diffuser model which is the non-hierarchical version of our proposed model, while subsequent works by Ajay et al. (2022); Lu et al. (2023) optimized the guidance sampling process. Other works have utilized diffusion models specifically for RL Wang et al. (2022); Chen et al. (2023), observation-to-action imitation modeling Pearce et al. (2022), and for allowing equivariance with respect to the product of the spatial symmetry group Brehmer et al. (2023). A noteworthy contribution in this field is the hierarchical diffusion-based planning method Li et al. (2023), which resonates closely with our work but distinguishes itself in the subgoal preprocessing. While it necessitates explicit graph searching, our high-level diffuser to discover subgoals automatically.
218
+
219
+ Hierarchical Planning. Hierarchical planning has been successfully employed using temporal generative models, commonly referred to as world models Ha & Schmidhuber (2018); Hafner et al. (2019). These models forecast future states or observations based on historical states and actions. Recent years have seen the advent of hierarchical variations of these world models Chung et al. (2017); Kim et al. (2019); Saxena et al. (2021). Once trained, a world model can be used to train a separate policy with rollouts sampled from it Hafner et al. (2019); Deisenroth & Rasmussen (2011); Ghugare et al. (2023); Buckman et al. (2018); Hafner et al. (2022), or it can be leveraged for plan searching Schrittwieser et al. (2020); Wang & Ba (2020); Pertsch et al. (2020); Hu et al. (2023); Zhu et al. (2023). Our proposed method draws upon these principles, but also has connections to hierarchical skill-based planning such as latent skill planning Xie et al. (2020); Shi et al. (2022). However, a crucial distinction of our approach lies in the concurrent generation of all timesteps of a plan, unlike the aforementioned methods that require a sequential prediction of future states.
220
+
221
+ # 6 CONCLUSION
222
+
223
+ We introduce Hierarchical Diffuser, a comprehensive hierarchical framework that leverages the strengths of both hierarchical reinforcement learning and diffusion-based planning methods. Our approach, characterized by a larger receptive field at higher levels and a fine-tuning stage at the lower levels, has the capacity to not only capture optimal behavior from the offline dataset, but also retain the flexibility needed for compositional out-of-distribution (OOD) tasks. Expanding our methodology to the visual domain, which boasts a broader range of applications, constitutes another potential future direction.
224
+
225
+ **Limitations** Our Hierarchical Diffuser (HD) model has notable strengths but also presents some limitations. Foremost among these is its dependency on the quality of the dataset. Being an offline method, the performance of HD is restricted by the coverage or quality of datasets. In situations where it encounters unfamiliar trajectories, HD may struggle to produce optimal plans. Another restriction is the choice of fixed sub-goal intervals. This decision simplifies the model's architecture but might fall short in handling a certain class of complex real-world scenarios. Furthermore, it introduces a task-dependent hyper-parameter. Lastly, the efficacy of HD is tied to the accuracy of the learned value function. This relationship places limits on the magnitude of the jump steps $K$ ; excessively skipping states pose challenge to learn the value function.
226
+
227
+ # ACKNOWLEDGEMENTS
228
+
229
+ This work is supported by Brain Pool Plus Program (No. 2021H1D3A2A03103645) through the National Research Foundation of Korea (NRF) funded by the Ministry of Science and ICT. We would like to thank Michael Janner and Jindong Jiang for insightful discussions.
230
+
231
+ # REFERENCES
232
+
233
+ Anurag Ajay, Yilun Du, Abhi Gupta, Joshua Tenenbaum, Tommi Jaakkola, and Pulkit Agrawal. Is conditional generative modeling all you need for decision-making? arXiv preprint arXiv:2211.15657, 2022.
234
+ Johann Brehmer, Joey Bose, Pim De Haan, and Taco Cohen. EDGI: Equivariant diffusion for planning with embodied agents. In Workshop on Reincarnating Reinforcement Learning at ICLR 2023, 2023. URL https://openreview.net/forum?id=OrbWCpidbt.
235
+ Jacob Buckman, Danijar Hafner, George Tucker, Eugene Brevdo, and Honglak Lee. Sample-efficient reinforcement learning with stochastic ensemble value expansion. Advances in neural information processing systems, 31, 2018.
236
+ Huayu Chen, Cheng Lu, Chengyang Ying, Hang Su, and Jun Zhu. Offline reinforcement learning via high-fidelity generative behavior modeling. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=42zs3qa2kpy.
237
+ Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, and Igor Mordatch. Decision transformer: Reinforcement learning via sequence modeling. arXiv preprint arXiv:2106.01345, 2021.
238
+ Junyoung Chung, Sungjin Ahn, and Yoshua Bengio. Hierarchical multiscale recurrent neural networks. International Conference on Learning Representations, 2017.
239
+ Marc Deisenroth and Carl E Rasmussen. *Pilco: A model-based and data-efficient approach to policy search*. In Proceedings of the 28th International Conference on machine learning (ICML-11), pp. 465–472, 2011.
240
+ Prafulla Dhariwal and Alexander Nichol. Diffusion models beat GANs on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021.
241
+ Scott Emmons, Benjamin Eysenbach, Ilya Kostrikov, and Sergey Levine. Rvs: What is essential for offline RL via supervised learning? In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=S874XAlpkR-.
242
+ Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4RL: Datasets for deep data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020.
243
+ Scott Fujimoto, David Meger, and Doina Precup. Off-policy deep reinforcement learning without exploration. In International conference on machine learning, 2019.
244
+ Raj Ghugare, Homanga Bharadhwaj, Benjamin Eysenbach, Sergey Levine, and Russ Salakhutdinov. Simplifying model-based RL: Learning representations, latent-space models, and policies with one objective. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=MQcmfgRxf7a.
245
+ David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018.
246
+ Danijar Hafner, Timothy Lillicrap, Ian Fischer, Ruben Villegas, David Ha, Honglak Lee, and James Davidson. Learning latent dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018.
247
+ Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019.
248
+
249
+ Danijar Hafner, Kuang-Huei Lee, Ian Fischer, and Pieter Abbeel. Deep hierarchical planning from pixels. arXiv preprint arXiv:2206.04114, 2022.
250
+ Jessica B Hamrick, Abram L Friesen, Feryal Behbahani, Arthur Guez, Fabio Viola, Sims Witherspoon, Thomas Anthony, Lars Buesing, Petar Velickovic, and Théophane Weber. On the role of planning in model-based deep reinforcement learning. arXiv preprint arXiv:2011.04021, 2020.
251
+ Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022.
252
+ Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33:6840-6851, 2020.
253
+ Edward S. Hu, Richard Chang, Oleh Rybkin, and Dinesh Jayaraman. Planning goals for exploration. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=6qeBuZSo7Pr.
254
+ Michael Janner, Qiyang Li, and Sergey Levine. Offline reinforcement learning as one big sequence modeling problem. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (eds.), Advances in Neural Information Processing Systems, 2021. URL https://openreview.net/forum?id=wgeK563QgSw.
255
+ Michael Janner, Yilun Du, Joshua Tenenbaum, and Sergey Levine. Planning with diffusion for flexible behavior synthesis. In International Conference on Machine Learning, 2022a.
256
+ Michael Janner, Yilun Du, Joshua B Tenenbaum, and Sergey Levine. Planning with diffusion for flexible behavior synthesis. arXiv preprint arXiv:2205.09991, 2022b.
257
+ Kenji Kawaguchi, Zhun Deng, Kyle Luh, and Jiaoyang Huang. Robustness implies generalization via data-dependent generalization bounds. In International Conference on Machine Learning, pp. 10866-10894. PMLR, 2022.
258
+ Rahul Kidambi, Aravind Rajeswaran, Praneeth Netrapalli, and Thorsten Joachims. Morel: Model-based offline reinforcement learning. Advances in neural information processing systems, 33: 21810-21823, 2020.
259
+ Taesup Kim, Sungjin Ahn, and Yoshua Bengio. Variational temporal abstraction. ICML Workshop on Generative Modeling and Model-Based Reasoning for Robotics and AI, 2019.
260
+ Craig A Knoblock. Learning abstraction hierarchies for problem solving. In AAAI, pp. 923-928, 1990.
261
+ Ilya Kostrikov, Ashvin Nair, and Sergey Levine. Offline reinforcement learning with implicit Q-learning. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=68n2s9ZJWF8.
262
+ Aviral Kumar, Justin Fu, Matthew Soh, George Tucker, and Sergey Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. Advances in Neural Information Processing Systems, 32, 2019.
263
+ Aviral Kumar, Aurick Zhou, George Tucker, and Sergey Levine. Conservative Q-learning for offline reinforcement learning. Advances in Neural Information Processing Systems, 33:1179-1191, 2020.
264
+ Michael Laskin, Denis Yarats, Hao Liu, Kimin Lee, Albert Zhan, Kevin Lu, Catherine Cang, Lerrel Pinto, and Pieter Abbeel. URLB: Unsupervised reinforcement learning benchmark. arXiv preprint arXiv:2110.15191, 2021.
265
+ Jinning Li, Chen Tang, Masayoshi Tomizuka, and Wei Zhan. Hierarchical planning through goal-conditioned offline reinforcement learning. IEEE Robotics and Automation Letters, 7(4):10216-10223, 2022.
266
+ Wenhao Li, Xiangfeng Wang, Bo Jin, and Hongyuan Zha. Hierarchical diffusion for offline decision making. In International Conference on Machine Learning, 2023.
267
+
268
+ Cheng Lu, Huayu Chen, Jianfei Chen, Hang Su, Chongxuan Li, and Jun Zhu. Contrastive energy prediction for exact energy-guided diffusion sampling in offline reinforcement learning. arXiv preprint arXiv:2304.12824, 2023.
269
+ Ajay Mandlekar, Fabio Ramos, Byron Boots, Silvio Savarese, Li Fei-Fei, Animesh Garg, and Dieter Fox. Iris: Implicit reinforcement without interaction at scale for learning control from offline robot manipulation data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 4414-4420, 2020. doi: 10.1109/ICRA40945.2020.9196935.
270
+ Thomas M Moerland, Joost Broekens, Aske Plaat, Catholijn M Jonker, et al. Model-based reinforcement learning: A survey. Foundations and Trends® in Machine Learning, 16(1):1-118, 2023.
271
+ Ofir Nachum, Haoran Tang, Xingyu Lu, Shixiang Gu, Honglak Lee, and Sergey Levine. Why does hierarchy (sometimes) work so well in reinforcement learning? arXiv preprint arXiv:1909.10618, 2019.
272
+ Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021.
273
+ Tim Pearce, Tabish Rashid, Anssi Kanervisto, David Bignell, Mingfei Sun, Raluca Georgescu, Sergio Valcarcel Macua, Shan Zheng Tan, Ida Momennejad, Katja Hofmann, and Sam Devlin. Imitating human behaviour with diffusion models. In Deep Reinforcement Learning Workshop NeurIPS 2022, 2022. URL https://openreview.net/forum?id=-pqCZ8tbd.
274
+ Karl Pertsch, Oleh Rybkin, Frederik Ebert, Shenghao Zhou, Dinesh Jayaraman, Chelsea Finn, and Sergey Levine. Long-horizon visual planning with goal-conditioned hierarchical predictors. Advances in Neural Information Processing Systems, 33:17321-17333, 2020.
275
+ Hieu Pham, Zihang Dai, Golnaz Ghiasi, Kenji Kawaguchi, Hanxiao Liu, Adams Wei Yu, Jiahui Yu, Yi-Ting Chen, Minh-Thang Luong, Yonghui Wu, Mingxing Tan, and Quoc V. Le. Combined scaling for open-vocabulary image classification. arXiv preprint arXiv:2111.10050, 2021. doi: 10.48550/ARXIV.2111.10050. URL https://arxiv.org/abs/2111.10050.
276
+ Sai Rajeswar, Pietro Mazzaglia, Tim Verbelen, Alexandre Piche, Bart Dhoedt, Aaron Courville, and Alexandre Lacoste. Mastering the unsupervised reinforcement learning benchmark from pixels. In International Conference on Machine Learning, 2023.
277
+ Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 2022.
278
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022.
279
+ O. Ronneberger, P. Fischer, and T. Brox. U-Net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention (MICCAI), volume 9351 of LNCS, pp. 234-241. Springer, 2015.
280
+ Earl D Sacerdoti. Planning in a hierarchy of abstraction spaces. Artificial intelligence, 5(2):115-135, 1974.
281
+ Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022.
282
+ Vaibhav Saxena, Jimmy Ba, and Danijar Hafner. Clockwork variational autoencoders. Advances in Neural Information Processing Systems, 34:29246-29257, 2021.
283
+ Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan, Laurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis, Thore Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned model. Nature, 588(7839):604-609, 2020.
284
+
285
+ Lucy Xiaoyang Shi, Joseph J. Lim, and Youngwoo Lee. Skill-based model-based reinforcement learning. In Conference on Robot Learning, 2022.
286
+ David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484-489, 2016.
287
+ Satinder P Singh. Reinforcement learning with a hierarchy of abstract models. In Proceedings of the National Conference on Artificial Intelligence, number 10, pp. 202. CiteSeer, 1992.
288
+ Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265. PMLR, 2015.
289
+ Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=PxTIG12RRHS.
290
+ Richard S Sutton and Andrew G Barto. Reinforcement learning: An introduction. MIT press, 2018.
291
+ Erik Talvitie. Model regularization for stable sample rollouts. In UAI, pp. 780-789, 2014.
292
+ Alexander Sasha Vezhnevets, Simon Osindero, Tom Schaul, Nicolas Heess, Max Jaderberg, David Silver, and Koray Kavukcuoglu. FeUdal networks for hierarchical reinforcement learning. In International Conference on Machine Learning, 2017a.
293
+ Alexander Sasha Vezhnevets, Simon Osindero, Tom Schaul, Nicolas Heess, Max Jaderberg, David Silver, and Koray Kavukcuoglu. Feudal networks for hierarchical reinforcement learning. In International Conference on Machine Learning, pp. 3540-3549. PMLR, 2017b.
294
+ Tingwu Wang and Jimmy Ba. Exploring model-based planning with policy networks. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=H1exf64KwH.
295
+ Zhendong Wang, Jonathan J Hunt, and Mingyuan Zhou. Diffusion policies as an expressive policy class for offline reinforcement learning. arXiv preprint arXiv:2208.06193, 2022.
296
+ Grady Williams, Paul Drews, Brian Goldfain, James M Rehg, and Evangelos A Theodorou. Aggressive driving with model predictive path integral control. In 2016 IEEE International Conference on Robotics and Automation (ICRA), pp. 1433-1440. IEEE, 2016.
297
+ Kevin Xie, Homanga Bharadhwaj, Danijar Hafner, Animesh Garg, and Florian Shkurti. Latent skill planning for exploration and transfer. arXiv preprint arXiv:2011.13897, 2020.
298
+ Jingwei Zhang, Jost Tobias Springenberg, Arunkumar Byravan, Leonard Hasenclever, Abbas Abdelmaleki, Dushyant Rao, Nicolas Heess, and Martin Riedmiller. Leveraging jumpy models for planning and fast learning in robotic domains. arXiv preprint arXiv:2302.12617, 2023.
299
+ Jinhua Zhu, Yue Wang, Lijun Wu, Tao Qin, Wengang Zhou, Tie-Yan Liu, and Houqiang Li. Making better decision by directly planning in continuous control. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=r8Mu7idxyF.
300
+
301
+ This appendix provides a detailed elaboration on several aspects of our study. In Section A, we outline our implementation procedure and the hyper-parameter settings used. Section B provides pseudocodes illustrating the processes of planning with Hierarchical Diffuser. We examine the robustness of HD with various $K$ values in Section C. The Out-of-distribution (OOD) visualizations and the corresponding experiment details are outlined in Section E. Section D explains details of the wall clock measurement. Finally, starting from Section G, we present our theoretical proofs.
302
+
303
+ # A IMPLEMENTATION DETAILS
304
+
305
+ In this section, we describe the details of implementation and hyperparameters we used during our experiments. For the Out-of-distribution experiment details, please check Section E.
306
+
307
+ - We build our Hierarchical Diffuser upon the officially released Diffuser code obtained from https://github.com/jannerm/diffuser. We list out the changes we made below.
308
+ - In our approach, the high-level and low-level planners are trained separately using segments randomly selected from the D4RL offline dataset.
309
+ - For the high-level planner's training, we choose segments equivalent in length to the planning horizon, $H$ . Within these segments, states at every $K$ steps are selected. In the dense action variants, the intermediary action sequences between these states are then flattened concatenated with the corresponding jumpy states along the feature dimension. This approach of trajectory representation is also employed in the training of the high-level reward predictor.
310
+ - The sequence modeling at the low-level is the same as Diffuser except that we are using a sequence length of $K + 1$ .
311
+ - We set $K = 15$ for the long-horizon planning tasks, while for the Gym-MuJoCo, we use $K = 4$ .
312
+ - Aligning closely with the settings used by Diffuser, we employ a planning horizon of $H = 32$ for the MuJoCo locomotion tasks. For the Maze2D tasks, we utilize varying planning horizons; $H = 120$ for the Maze2D UMaze task, $H = 255$ for the Medium Maze task, and $H = 390$ for the Large Maze task. For the AntMaze tasks, we set $H = 225$ for the UMaze, $H = 255$ for the Medium Maze, and $H = 450$ for the Large Maze.
313
+ - For the MuJoCo locomotion tasks, we select the guidance scales $\omega$ from a set of choices, $\{0.1, 0.01, 0.001, 0.0001\}$ , during the planning phase.
314
+
315
+ # B PLANNING WITH HIGH-LEVEL DIFFUSER
316
+
317
+ We highlight the high-level planning and low-level planning in Algorithm 1 and Algorithm 2, respectively. The complete process of planning with HD is detailed in Algorithm 3
318
+
319
+ # B.1 PLANNING WITH HIGH-LEVEL DIFFUSER
320
+
321
+ The high-level module, Sparse Diffuser (SD), models the subsampled states and actions, enabling it to operate independently. We present the pseudocode of guided planning with the Sparse Diffuser in Algorithm 1.
322
+
323
+ # B.2 PLANNING WITH LOW-LEVEL DIFFUSER
324
+
325
+ Given subgoals sampled from the high-level diffuser, segments of low-level plans can be generated concurrently. We illustrate generating one such segment as example in Algorithm 2.
326
+
327
+ # B.3 HIERARCHICAL PLANNING
328
+
329
+ The comprehensive hierarchical planning involving both high-level and low-level planners is outlined in Algorithm 3. For the Maze2D tasks, we employed an open-loop approach, while for more challenging environments like AntMaze, Gym-MuJoCo, and Franka Kitchen, a closed-loop strategy was adopted.
330
+
331
+ Algorithm 1 High-Level Planning
332
+ 1: function SAMPLEHIGHLEVELPLAN(Current State s, Sparse Diffuser $\mu_{\theta_{\mathrm{SD}}}$ , guidance function $\mathcal{J}_{\phi_{\mathrm{SD}}}$ , guidance scale $\omega$ , variance $\sigma_m^2$
333
+ 2: initialize plan $\mathbf{x}_M^{\mathrm{SD}} \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$
334
+ 3: for $m = M - 1, \dots, 1$ do
335
+ 4: $\tilde{\boldsymbol{\mu}} \gets \boldsymbol{\mu}_{\theta_{\mathrm{SD}}}(\mathbf{x}_{m + 1}^{\mathrm{SD}}) + \omega \sigma_m^2 \nabla_{\mathbf{x}_m^{\mathrm{SD}}} \mathcal{J}_{\phi_{\mathrm{SD}}}(\mathbf{x}_m^{\mathrm{SD}})$
336
+ 5: $\mathbf{x}_{m - 1}^{\mathrm{SD}} \sim \mathcal{N}(\tilde{\boldsymbol{\mu}}, \sigma_m^2 \mathbf{I})$
337
+ 6: Fix $\mathbf{g}_0$ in $\mathbf{x}_{m - 1}^{\mathrm{SD}}$ to current state s
338
+ 7: end for
339
+ 8: return High-level plan $\mathbf{x}_0^{\mathrm{SD}}$
340
+ 9: end function
341
+
342
+ Algorithm 2 Low-Level Planning
343
+ 1: function SAMPLELOWLEVELPLAN(Subgoals $(g_{i},g_{i + 1})$ , low-level diffuser $\mu_{\theta}$ , low-level guidance function $\mathcal{J}_{\phi}$ , guidance scale $\omega$ , variance $\sigma_m^2$
344
+ 2: Initialize all low-level plan $\mathbf{x}_M^i\sim \mathcal{N}(\mathbf{0},\mathbf{I})$
345
+ 3: for $m = M - 1,\dots ,1$ do
346
+ 4: $\tilde{\pmb{\mu}}\gets \pmb{\mu}_{\theta}(\mathbf{x}_{m + 1}^{i}) + \omega \sigma_{m}^{2}\nabla_{\mathbf{x}_{m}^{i}}\mathcal{J}_{\phi}(\mathbf{x}_{m}^{i})$
347
+ 5: $\mathbf{x}_{m - 1}^{i}\sim \mathcal{N}(\tilde{\pmb{\mu}},\sigma_{m}^{2}\mathbf{I})$
348
+ 6: Fix s0 in $\mathbf{x}_{m - 1}^{i}$ to $\mathbf{g}_i$ ; Fix sK in $\mathbf{x}_{m - 1}^{i}$ to $\mathbf{g}_{i + 1}$
349
+ 7: end for
350
+ 8: return low-level plan $\mathbf{x}_0^i$
351
+ 9: end function
352
+
353
+ # C ABLATION STUDY ON JUMPY STEPS K
354
+
355
+ In this section, we report the detailed findings from an ablation study concerning the impact of the parameter $K$ in Hierarchical Diffuser. The results, which are detailed in Tables 7 and 8, correspond to Maze2D tasks and MuJoCo locomotion tasks, respectively. As we increased $K$ , an initial enhancement in performance was observed. However, a subsequent performance decline was noted with larger $K$ values. This trend aligns with our initial hypothesis that a larger $K$ introduces more skipped steps at the high-level planning stage, potentially resulting in the omission of information necessary for effective trajectory modeling, consequently leading to performance degradation.
356
+
357
+ Table 7: Ablation on $K$ - Maze2D. The model's performance increased with the value of $K$ up until $K = {21}$ . We report the mean and standard error over 100 random seeds.
358
+
359
+ <table><tr><td colspan="2">Environment</td><td>K1 (Diffuser default)</td><td>HD-K7</td><td>HD-K15 (default)</td><td>HD-K21</td></tr><tr><td>Maze2D</td><td>U-Maze</td><td>113.9 ± 3.1</td><td>127.0 ± 1.5</td><td>128.4 ± 3.6</td><td>124.0 ± 2.1</td></tr><tr><td>Maze2D</td><td>Medium</td><td>121.5 ± 2.7</td><td>132.5 ± 1.3</td><td>135.6 ± 3.0</td><td>130.3 ± 2.4</td></tr><tr><td>Maze2D</td><td>Large</td><td>123.0 ± 6.4</td><td>153.2 ± 3.0</td><td>155.8 ± 2.5</td><td>158.9 ± 2.0</td></tr><tr><td colspan="2">Sing-task Average</td><td>119.5</td><td>137.6</td><td>139.9</td><td>137.7</td></tr><tr><td>Multi2D</td><td>U-Maze</td><td>128.9 ± 1.8</td><td>135.4 ± 1.1</td><td>144.1 ± 1.2</td><td>133.7 ± 1.3</td></tr><tr><td>Multi2D</td><td>Medium</td><td>127.2 ± 3.4</td><td>135.3 ± 1.6</td><td>140.2 ± 1.6</td><td>134.5 ± 1.4</td></tr><tr><td>Multi2D</td><td>Large</td><td>132.1 ± 5.8</td><td>160.2 ± 1.9</td><td>165.5 ± 0.6</td><td>159.3 ± 3.0</td></tr><tr><td colspan="2">Multi-task Average</td><td>129.4</td><td>143.7</td><td>149.9</td><td>142.5</td></tr></table>
360
+
361
+ # D WALL CLOCK COMPARISON DETAILS
362
+
363
+ We evaluated the wall clock time by averaging the time taken per complete plan during testing and, for the training phase, the time needed for 100 updates. All models were measured using a single NVIDIA RTX 8000 GPU to ensure consistency. We employ the released code and default settings for the Diffuser model. We select the Maze2D tasks and Hopper-Medium-Expert, a representative for
364
+
365
+ Algorithm 3 Hierarchical Planning
366
+ 1: function SAMPLEHIERARCHICALPLAN(High-level diffuser $\mu_{\theta_{\mathrm{SD}}}$ , low-level diffuser $\mu_{\theta}$ , high-level guidance function $\mathcal{I}_{\phi_{\mathrm{SD}}}$ , low-level guidance function $\mathcal{I}_{\phi}$ , high-level guidance scale $\omega_{\mathrm{SD}}$ , low-level guidance scale $\omega$ , high-level variance $\sigma_{\mathrm{SD},m}^{2}$ , low-level variance $\sigma_{m}^{2}$ )
367
+ 2: Observe state s;
368
+ 3: if do open-loop then
369
+ 4: Sample high-level plan $\mathbf{x}^{\mathrm{SD}} = \mathrm{SAMPLEHIGHLEVELPLAN}(\mathbf{s}, \mu_{\theta_{\mathrm{SD}}}, \mathcal{I}_{\phi_{\mathrm{SD}}}, \omega_{\mathrm{SD}}, \sigma_{SD,m}^{2})$
370
+ 5: for $i = 0, \ldots, H - 1$ parallel do
371
+ 6: Sample low-level plan $\mathbf{x}^{(i)} = \mathrm{SAMPLELOWLEVELPLAN}((g_i, g_{i + 1}), \mu_\theta, \mathcal{I}_\phi, \omega, \sigma_m^2)$
372
+ 7: end for
373
+ 8: Form the full plan $\mathbf{x}$ with low-level plans $\mathbf{x}^{(i)}$ for $i = 0, H - 1$
374
+ 9: for action $\mathbf{a}_t$ in $\mathbf{x}$ do
375
+ 10: Execute $\mathbf{a}_t$
376
+ 11: end for
377
+ 12: else
378
+ 13: while not done do
379
+ 14: Sample high-level plan $\mathbf{x}^{\mathrm{SD}} = \mathrm{SAMPLEHIGHLEVELPLAN}(\mathbf{s}, \mu_{\theta_{\mathrm{SD}}}, \mathcal{I}_\phi, \omega_{\mathrm{SD}}, \sigma_{\mathrm{SD},m}^{2})$
380
+ 15: // Sample only the first low-level segment
381
+ 16: Sample $\mathbf{x}^{(0)} = \mathrm{SAMPLELOWLEVELPLAN}((g_0, g_1), \mu_\theta, \mathcal{I}_\phi, \omega, \sigma_m^2)$
382
+ 17: Execute the first $\mathbf{a}_0$ of plan $\mathbf{x}^{(0)}$
383
+ 18: Observe state s
384
+ 19: end while
385
+ 20: end if
386
+ 21: end function
387
+
388
+ Table 8: Ablation on $K$ - MuJoCo Locomotion. The model's performance increased with the value of $K$ up until $K = 8$ . We report the mean and standard error over 5 random seeds.
389
+
390
+ <table><tr><td>Dataset</td><td>Environment</td><td>K1 (Diffuser default)</td><td>HD-K4 (default)</td><td>HD-K8</td></tr><tr><td>Medium-Expert</td><td>HalfCheetah</td><td>88.9 ± 0.3</td><td>92.5 ± 0.3</td><td>91.5 ± 0.3</td></tr><tr><td>Medium-Expert</td><td>Hopper</td><td>103.3 ± 1.3</td><td>115.3 ± 1.1</td><td>113.0 ± 0.5</td></tr><tr><td>Medium-Expert</td><td>Walker2d</td><td>106.9 ± 0.2</td><td>107.1 ± 0.1</td><td>107.6 ± 0.3</td></tr><tr><td>Medium</td><td>HalfCheetah</td><td>42.8 ± 0.3</td><td>46.7 ± 0.2</td><td>45.9 ± 0.7</td></tr><tr><td>Medium</td><td>Hopper</td><td>74.3 ± 1.4</td><td>99.3 ± 0.3</td><td>86.7 ± 7.4</td></tr><tr><td>Medium</td><td>Walker2d</td><td>79.6 ± 0.6</td><td>84.0 ± 0.6</td><td>84.2 ± 0.5</td></tr><tr><td>Medium-Replay</td><td>HalfCheetah</td><td>37.7 ± 0.5</td><td>38.1 ± 0.7</td><td>39.5 ± 0.4</td></tr><tr><td>Medium-Replay</td><td>Hopper</td><td>93.6 ± 0.4</td><td>94.7 ± 0.7</td><td>91.3 ± 1.3</td></tr><tr><td>Medium-Replay</td><td>Walker2d</td><td>70.6 ± 1.6</td><td>84.1 ± 2.2</td><td>76.4 ± 2.7</td></tr><tr><td colspan="2">Average</td><td>77.5</td><td>84.6</td><td>81.8</td></tr></table>
391
+
392
+ the Gym-MuJoCo tasks, from the D4RL benchmark for our measurement purpose. On the Maze2D tasks, we set $K = 15$ , and for the Gym-MuJoCo tasks, we set it to 4 as this is our default setting for RL tasks. The planning horizons of HD for each task, outlined in Table 9, are influenced by their need for divisibility by $K$ , leading to slight deviations from the default values used by the Diffuser.
393
+
394
+ Table 9: Wall-clock time $H$ value
395
+
396
+ <table><tr><td>Environment</td><td>Diffuser</td><td>Ours</td></tr><tr><td>Maze2d-Umaze</td><td>128</td><td>120</td></tr><tr><td>Maze2d-Medium</td><td>256</td><td>255</td></tr><tr><td>Maze2d-Large</td><td>384</td><td>390</td></tr><tr><td>Hopper-Medium-Expert</td><td>32</td><td>32</td></tr></table>
397
+
398
+ # E COMPOSITIONAL OUT-OF-DISTRIBUTION (OOD) EXPERIMENT DETAILS
399
+
400
+ While an increase in kernel size does indeed provide a performance boost for the Diffuser model, this enlargement inevitably augments the model's capacity, which potentially increases the risk of overfitting. Therefore, Diffuser models may underperform on tasks demanding both a large receptive field and strong generalization abilities. To illustrate this, inspired by Janner et al. (2022a), we designed a compositional out-of-distribution (OOD) Maze2D task, as depicted in Figure 4. During training, the agent is only exposed to offline trajectories navigating diagonally. However, during testing, the agent is required to traverse between novel start-goal pairs. We visualized the 32 plans generated by the models in Figure 4. As presented in the figure, only the Hierarchical Diffuser can generate reasonable plans approximating the optimal solution. In contrast, all Diffuser variants either create plans that lead the agent crossing a wall (i.e. Diffuser, Diffuser-KS13, and Diffuser-KS19) or produce plans that exceed the maximum step limit (i.e. Diffuser-13, Diffuser-KS19, and Diffuser-KS25).
401
+
402
+ To conduct this experiment, we generated a training dataset of 2 million transitions using the same Proportional-Derivative (PD) controller as used for generating the Maze2D tasks. Given that an optimal path typically requires around 230 steps to transition from the starting point to the end goal, we set the planning horizon $H$ for the Diffuser variants at 248, while for our proposed method, we set it at 255, to ensure divisibility by $K = 15$ . For the reinforcement learning task in the testing phase, the maximum steps allowed were set at 300. Throughout the training phase, we partitioned $10\%$ of the training dataset as a validation set to mitigate the risk of overfitting. To quantitatively measure the discrepancy between the generated plans and the optimal solution, we used Cosine Similarity and Mean Squared Error (MSE). Specifically, we crafted 10 optimal paths using the same controller and sampled 100 plans from each model for each testing task. To ensure that the optimal path length aligned with the planning horizon of each model, we modified the threshold distance used to terminate the controller once the agent reached the goal state. Subsequently, we computed the discrepancy between each plan and each optimal path. The mean of these results was reported in Table 5.
403
+
404
+ ![](images/8c829331df3fafb4d4d426b704f841870b6152d5bf87e8a7ac0bf2df3f2eb98b.jpg)
405
+ Figure 4: Large Kernel Size Hurts the OOD Generalization. Increasing kernel size generally improves the offline RL performance of Diffuser model. However, when a large receptive field and compositional out-of-distribution (OOD) generalization are both required, Diffuser models offer no simple solution. We demonstrate this with the sampled plans from both the standard Diffuser and a Diffuser with varied kernel sizes (KS). None of them can come up with an optimal plan by stching training segments together. Conversely, our proposed Hierarchical Diffuser (HD) possesses both a large receptive field and the flexibility needed of compositional OOD tasks.
406
+
407
+ # F THEORETICAL ANALYSIS
408
+
409
+ In this section, we show that the proposed method can improve the generalization capability when compared to the baseline. Our analysis also sheds light on the tradeoffs in $K$ and the kernel size. Let
410
+
411
+ $K\in \{1,\ldots ,T\} ,\ell (x) = \tau \mathbb{E}_{m,\epsilon}[||\epsilon -\epsilon_{\theta}(\sqrt{\bar{\alpha}_m} x + \sqrt{1 - \bar{\alpha}_m}\epsilon ,m)||^2 ]],$ where $\tau >0$ is an arbitrary normalization coefficient that can depend on $K$ : e.g., $1 / d$ where $d$ is the dimensionality of $\epsilon$ . Given the training trajectory data $(\mathbf{x}_0^{(i)})_{i = 1}^n$ , the training loss is defined by $\hat{\mathcal{L}} (\theta) = \frac{1}{n}\sum_{i = 1}^{n}\ell (\mathbf{x}_0^{(i)})$ where $\mathbf{x}_m^{(i)} = \sqrt{\bar{\alpha}_m}\mathbf{x}_0^{(i)} + \sqrt{1 - \bar{\alpha}_m}\epsilon$ , and $\mathbf{x}_0^{(1)},\dots ,\mathbf{x}_0^{(n)}$ are independent samples of trajectories. We have $\mathcal{L}(\theta) = \mathbb{E}_{\mathbf{x}_0}[\ell (\mathbf{x}_0)]$ . Define $\hat{\theta}$ to be an output of the training process using $(\mathbf{x}_0^{(i)})_{i = 1}^n$ , and $\varphi$ to be the (unknown) value function under the optimal policy. Let $\Theta$ be the set of $\theta$ such that $\hat{\theta}\in \Theta$ and $\Theta$ is independent of $(\mathbf{x}_0^{(i)})_{i = 1}^n$ . Denote the projection of the parameter space $\Theta$ onto the loss function by $\mathcal{H} = \{x\mapsto \tau \mathbb{E}_{m,\epsilon}[[\| \epsilon -\epsilon_{\theta}(\sqrt{\bar{\alpha}_m} x + \sqrt{1 - \bar{\alpha}_m}\epsilon ,m)\| ^2 ]:\theta \in \Theta \}$ , the conditional Rademacher complexity by $\mathcal{R}_t(\mathcal{H}) = \mathbb{E}_{(\mathbf{x}_0^{(i)})_{i = 1}^n,\xi}[\sup_{h\in \mathcal{H}}\frac{1}{nt}\sum_{i = 1}^{nt}\xi_ih(\mathbf{x}_0^{(i)})|\mathbf{x}_0^{(i)}\in \mathcal{C}_t]$ , where $\mathcal{C}_t = \{\mathbf{x}_0\in \mathcal{X}:t = \operatorname {argmax}_{j\in [H]}\varphi (\mathbf{g}_j)$ where $[\mathbf{g}_1\mathbf{g}_2\dots \mathbf{g}_H]$ is the first row of $\mathbf{x}_0\}$ and $n_t =$ $\sum_{i = 1}^{n}\mathbb{1}\{\mathbf{x}_0^{(i)}\in \mathcal{C}_t\}$ . Define $\mathcal{T} = \{t\in [H]:n_t\geq 1\}$ and $C_0 = d\tau c((1 / \sqrt{2}) + \sqrt{2})$ for some $c\geq 0$ such that $c\geq \mathbb{E}_{m,\epsilon}[(\epsilon -\epsilon_{\theta}(\mathbf{x}_m,m))_i)^2 ]$ for $i = 1,\ldots ,d$ , where $d$ is the dimension of $\epsilon \in \mathbb{R}^d$ Here, both the loss values and $C_0$ scale linearly in $d$ . Our theorem works for any. $\tau >0$ , including. $\tau = 1 / d$ , which normalizes the loss values and $C_0$ with respect to $d$ . Thus, the conclusion of our theorem is invariant of the scale of the loss value.
412
+
413
+ Theorem 1. For any $\delta >0$ , with probability at least $1 - \delta$
414
+
415
+ $$
416
+ \mathcal {L} (\hat {\theta}) \leq \hat {\mathcal {L}} (\hat {\theta}) + C _ {0} \sqrt {\left\lceil \frac {T}{K} \right\rceil \frac {\ln \left(\left\lceil \frac {T}{K} \right\rceil \frac {2}{\delta}\right)}{n}} + \sum_ {t \in \mathcal {T}} \frac {2 n _ {t} \mathcal {R} _ {t} (\mathcal {H})}{n}. \tag {13}
417
+ $$
418
+
419
+ The proof is presented in Appendix H. The baseline is recovered by setting $K = 1$ . Thus, Theorem 1 demonstrates that the proposed method (i.e., the case of $K > 1$ ) can improve the generalization capability of the baseline (i.e., the case of $K = 1$ ). Moreover, while the upper bound on $\mathcal{L}(\hat{\theta}) - \hat{\mathcal{L}}(\hat{\theta})$ decreases as $K$ increases, it is expected that we lose more details of states with a larger value of $K$ . Therefore, there is a tradeoff in $K$ : i.e., with a larger value of $K$ , we expect a better generalization for the diffusion process but a more loss of state-action details to perform RL tasks. On the other hand, the conditional Rademacher complexity term $\mathcal{R}_t(\mathcal{H})$ in Theorem 1 tends to increase as the number of parameters increases. Thus, there is also a tradeoff in the kernel size: i.e., with a larger kernel size, we expect a worse generalization for the diffusion process but a better receptive field to perform RL tasks. We provide the additional analysis on $\mathcal{R}_t(\mathcal{H})$ in Appendix G.
420
+
421
+ # G ON THE CONDITIONAL RADEMACHER COMPLEXITY
422
+
423
+ In this section, we state that the term $\sum_{t\in \mathcal{T}}\frac{2n_t\mathcal{R}_t(\mathcal{H})}{n}$ in Theorem 1 is also smaller for the proposed method with $K\geq 2$ when compared to the base model (i.e., with $K = 1$ ) under the following assumptions that typically hold in practice. We assume that we can express $\epsilon_{\theta}(\mathbf{x}_m,m) = Wg(V\mathbf{x}_m,m)$ for some functions $g$ and some matrices $W,V$ such that the parameters of $g$ do not contain the entries of $W$ and $V$ , and that $\Theta$ contains $\theta$ with $W$ and $V$ such that $\| W\|_{\infty}\leq \zeta_W$ and $\| V\|_{\infty} < \zeta_V$ for some $\zeta_W$ and $\zeta_V$ . This assumption is satisfied in most neural networks used in practice as $g$ is arbitrarily; e.g., we can set $g = \epsilon_{\theta}$ , $W = I$ and $V = I$ to have any arbitrary function $\epsilon_{\theta}(\mathbf{x}_m,m) = Wg(V\mathbf{x}_m,m) = g(\mathbf{x}_m,m)$ . We also assume that $\mathcal{R}_t(\mathcal{H})$ does not increase when we increase $n_t$ . This is reasonable since $\mathcal{R}_t(\mathcal{H}) = O(\frac{1}{n_t})$ for many machine learning models, including neural networks. Under this setting, the following proposition states that the term $\sum_{t\in \mathcal{T}}\frac{2n_t\mathcal{R}_t(\mathcal{H})}{n}$ of with the proposed method is also smaller than that of the base model:
424
+
425
+ Proposition 1. Let $q \geq 2$ and denote by $\bar{\mathcal{R}}_t(\bar{\mathcal{H}})$ and $\tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ the conditional Rademacher complexities for $K = 1$ (base case) and $K \geq q$ (proposed method) respectively. Then, $\bar{\mathcal{R}}_t(\bar{\mathcal{H}}) \geq \tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ for any $t \in \{1, \dots, T\}$ such that $s_t$ is not skipped with $K = q$ .
426
+
427
+ The proof is presented in Appendix H.
428
+
429
+ # H PROOFS
430
+
431
+ # H.1 PROOF OF THEOREM 1
432
+
433
+ Proof. Let $K \in \{1, \dots, T\}$ . Define $[H] = \{1, \dots, H\}$ . Define
434
+
435
+ $$
436
+ \ell (x) = \tau \mathbb {E} _ {m, \epsilon} [ \| \epsilon - \epsilon_ {\hat {\theta}} (\sqrt {\bar {\alpha} _ {m}} x + \sqrt {1 - \bar {\alpha} _ {m}} \epsilon , m) \| ^ {2} ] ]
437
+ $$
438
+
439
+ Then, we have that $\hat{\mathcal{L}} (\hat{\theta}) = \frac{1}{n}\sum_{i = 1}^{n}\ell (\mathbf{x}_0^{(i)})$ and $\mathcal{L}(\hat{\theta}) = \mathbb{E}_{\mathbf{x}_0}[\ell (\mathbf{x}_0)]$ . Here, $\ell (\mathbf{x}_0^{(1)}),\ldots ,\ell (\mathbf{x}_0^{(n)})$ are not independent since $\hat{\theta}$ is trained with the trajectories data $(\mathbf{x}_0^{(i)})_{i = 1}^n$ , which induces the dependence among $\ell (\mathbf{x}_0^{(1)}),\dots,\ell (\mathbf{x}_0^{(n)})$ . To deal with this dependence, we recall that
440
+
441
+ $$
442
+ \mathbf {x} _ {0} = \left[ \begin{array}{c c c c} \mathbf {g} _ {0} & \mathbf {g} _ {1} & \dots & \mathbf {g} _ {H} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \\ \mathbf {a} _ {1} & \mathbf {a} _ {K + 1} & \dots & \mathbf {a} _ {H K + 1} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf {a} _ {K - 1} & \mathbf {a} _ {2 K - 1} & \dots & \mathbf {a} _ {(H + 1) K - 1} \end{array} \right] \in \mathcal {X} \subseteq \mathbb {R} ^ {d},
443
+ $$
444
+
445
+ where the baseline method is recovered by setting $K = 1$ (and hence $H = T / K = T$ ). To utilize this structure, we define $\mathcal{C}_k$ by
446
+
447
+ $$
448
+ \mathcal {C} _ {k} = \left\{\mathbf {x} _ {0} = \left[ \begin{array}{c c c c} \mathbf {g} _ {0} & \mathbf {g} _ {1} & \dots & \mathbf {g} _ {H} \\ \mathbf {a} _ {0} & \mathbf {a} _ {K} & \dots & \mathbf {a} _ {H K} \\ \mathbf {a} _ {1} & \mathbf {a} _ {K + 1} & \dots & \mathbf {a} _ {H K + 1} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf {a} _ {K - 1} & \mathbf {a} _ {2 K - 1} & \dots & \mathbf {a} _ {(H + 1) K - 1} \end{array} \right] \in \mathcal {X}: k = \underset {t \in [ H ]} {\operatorname {a r g m a x}} \varphi (\mathbf {g} _ {t}), \right\}.
449
+ $$
450
+
451
+ We first write the expected error as the sum of the conditional expected error:
452
+
453
+ $$
454
+ \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) ] = \sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k}).
455
+ $$
456
+
457
+ Similarly,
458
+
459
+ $$
460
+ \frac {1}{n} \sum_ {i = 1} ^ {n} \ell (\mathbf {x} _ {0} ^ {(i)}) = \frac {1}{n} \sum_ {k \in I _ {\mathcal {K}}} \sum_ {i \in \mathcal {I} _ {k}} \ell (\mathbf {x} _ {0} ^ {(i)}) = \sum_ {k \in I _ {\mathcal {K}}} \frac {| \mathcal {I} _ {k} |}{n} \frac {1}{| \mathcal {I} _ {k} |} \sum_ {i \in \mathcal {I} _ {k}} \ell (\mathbf {x} _ {0} ^ {(i)}),
461
+ $$
462
+
463
+ where $\mathcal{I}_k = \{i\in [n]:\mathbf{x}_0^{(i)}\in \mathcal{C}_k\}$ and $I_{\mathcal{K}} = \{k\in [H]:|\mathcal{I}_k|\geq 1\}$ . Using these, we decompose the difference into two terms:
464
+
465
+ $$
466
+ \begin{array}{l} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) ] - \frac {1}{n} \sum_ {i = 1} ^ {n} \ell \left(\mathbf {x} _ {0} ^ {(i)}\right) = \sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \left(\Pr \left(\mathbf {x} _ {0} \in \mathcal {C} _ {k}\right) - \frac {| \mathcal {I} _ {k} |}{n}\right) \tag {14} \\ + \left(\sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \frac {| \mathcal {I} _ {k} |}{n} - \frac {1}{n} \sum_ {i = 1} ^ {n} \ell (\mathbf {x} _ {0} ^ {(i)})\right). \\ = \sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \left(\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k}) - \frac {| \mathcal {I} _ {k} |}{n}\right) \\ + \frac {1}{n} \sum_ {k \in I _ {\mathcal {K}}} | \mathcal {I} _ {k} | \left(\mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] - \frac {1}{| \mathcal {I} _ {k} |} \sum_ {i \in \mathcal {I} _ {k}} \ell (\mathbf {x} _ {0} ^ {(i)})\right). \\ \end{array}
467
+ $$
468
+
469
+ By using Lemma 1 of (Kawaguchi et al., 2022), we have that for any $\delta > 0$ , with probability at least $1 - \delta$ ,
470
+
471
+ $$
472
+ \begin{array}{l} \sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \left(\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k}) - \frac {| \mathcal {I} _ {k} |}{n}\right) \tag {15} \\ \leq \left(\sum_ {k} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] \sqrt {\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k})}\right) \sqrt {\frac {2 \ln (H / \delta)}{n}} \\ \leq C \left(\sum_ {k} \sqrt {\operatorname * {P r} (\mathbf {x} _ {0} \in \mathcal {C} _ {k})}\right) \sqrt {\frac {2 \ln (H / \delta)}{n}}. \\ \end{array}
473
+ $$
474
+
475
+ Here, note that for any $(f,h,M)$ such that $M > 0$ and $B\geq 0$ for all $X$ , we have that $\mathbb{P}(f(X)\geq M)\geq \mathbb{P}(f(X) > M)\geq \mathbb{P}(Bf(X) + h(X) > BM + h(X))$ , where the probability is with respect to the randomness of $X$ . Thus, by combining equation 14 and equation 15, we have that for any $\delta >0$ , with probability at least $1 - \delta$ , the following holds:
476
+
477
+ $$
478
+ \begin{array}{l} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) ] - \frac {1}{n} \sum_ {i = 1} ^ {n} \ell \left(\mathbf {x} _ {0} ^ {(i)}\right) \leq \frac {1}{n} \sum_ {k \in I _ {\mathcal {K}}} | \mathcal {I} _ {k} | \left(\mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] - \frac {1}{| \mathcal {I} _ {k} |} \sum_ {i \in \mathcal {I} _ {k}} \ell \left(\mathbf {x} _ {0} ^ {(i)}\right)\right) \tag {16} \\ + C \left(\sum_ {k} \sqrt {\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k})}\right) \sqrt {\frac {2 \ln (H / \delta)}{n}} \\ \end{array}
479
+ $$
480
+
481
+ We now bound the first term in the right-hand side of equation equation 16. Define
482
+
483
+ $$
484
+ \mathcal {H} = \left\{x \mapsto \tau \mathbb {E} _ {m, \epsilon} [ \| \epsilon - \epsilon_ {\theta} (\sqrt {\bar {\alpha} _ {m}} x + \sqrt {1 - \bar {\alpha} _ {m}} \epsilon , m) \| ^ {2} ]: \theta \in \Theta \right\},
485
+ $$
486
+
487
+ and
488
+
489
+ $$
490
+ \mathcal {R} _ {t} (\mathcal {H}) = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {h \in \mathcal {H}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} h \left(\mathbf {x} _ {0} ^ {(i)}\right) \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right].
491
+ $$
492
+
493
+ with independent uniform random variables $\xi_1, \ldots, \xi_n$ taking values in $\{-1, 1\}$ . We invoke Lemma 4 of (Pham et al., 2021) to obtain that for any $\delta > 0$ , with probability at least $1 - \delta$ ,
494
+
495
+ $$
496
+ \begin{array}{l} \frac {1}{n} \sum_ {k \in I _ {\mathcal {K}}} | \mathcal {I} _ {k} | \left(\mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) | \mathbf {x} _ {0} \in \mathcal {C} _ {k} ] - \frac {1}{| \mathcal {I} _ {k} |} \sum_ {i \in \mathcal {I} _ {k}} \ell \left(\mathbf {x} _ {0} ^ {(i)}\right)\right) \leq \frac {1}{n} \sum_ {k \in I _ {\mathcal {K}}} | \mathcal {I} _ {k} | \left(2 \mathcal {R} _ {k} (\mathcal {H}) + C \sqrt {\frac {\ln (H / \delta)}{2 | \mathcal {I} _ {k} |}}\right) \tag {17} \\ = \sum_ {k \in I _ {\mathcal {K}}} \frac {2 | \mathcal {I} _ {k} | \mathcal {R} _ {k} (\mathcal {H})}{n} + C \sqrt {\frac {\ln (H / \delta)}{2 n}} \sum_ {k \in I _ {\mathcal {K}}} \sqrt {\frac {| \mathcal {I} _ {k} |}{n}} \\ \leq \sum_ {k \in I _ {\mathcal {K}}} \frac {2 | \mathcal {I} _ {k} | \mathcal {R} _ {k} (\mathcal {H})}{n} + C \sqrt {\frac {H \ln (H / \delta)}{2 n}}, \\ \end{array}
497
+ $$
498
+
499
+ where the last line follows from the Cauchy-Schwarz inequality applied on the term $\sum_{k\in I_{\mathcal{K}}}\sqrt{\frac{|\mathcal{T}_k|}{n}}$ as
500
+
501
+ $$
502
+ \sum_ {k \in I _ {\mathcal {K}}} \sqrt {\frac {| \mathcal {I} _ {k} |}{n}} \leq \sqrt {\sum_ {k \in I _ {\mathcal {K}}} \frac {| \mathcal {I} _ {k} |}{n}} \sqrt {\sum_ {k \in I _ {\mathcal {K}}} 1} = \sqrt {\sum_ {k \in I _ {\mathcal {K}}} 1} \leq \sqrt {H}.
503
+ $$
504
+
505
+ On the other hand, by using Jensen's inequality,
506
+
507
+ $$
508
+ \frac {1}{H} \sum_ {k = 1} ^ {H} \sqrt {\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k})} \leq \sqrt {\frac {1}{H} \sum_ {k = 1} ^ {H} \Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k})} = \frac {1}{\sqrt {H}}
509
+ $$
510
+
511
+ which implies that
512
+
513
+ $$
514
+ \sum_ {k = 1} ^ {H} \sqrt {\Pr \left(\mathbf {x} _ {0} \in \mathcal {C} _ {k}\right)} \leq \sqrt {H}. \tag {18}
515
+ $$
516
+
517
+ By combining equations equation 16 and equation 17 with union bound along with equation 18, it holds that any $\delta >0$ , with probability at least $1 - \delta$
518
+
519
+ $$
520
+ \begin{array}{l} \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) ] - \frac {1}{n} \sum_ {i = 1} ^ {n} \ell (\mathbf {x} _ {0} ^ {(i)}) \\ \leq \sum_ {k \in I _ {\mathcal {K}}} \frac {2 | \mathcal {I} _ {k} | \mathcal {R} _ {k} (\mathcal {H})}{n} + C \sqrt {\frac {H \ln (2 H / \delta)}{2 n}} + C \left(\sum_ {k} \sqrt {\Pr (\mathbf {x} _ {0} \in \mathcal {C} _ {k})}\right) \sqrt {\frac {2 \ln (2 H / \delta)}{n}} \\ \leq \sum_ {k \in I _ {\mathcal {K}}} \frac {2 | \mathcal {I} _ {k} | \mathcal {R} _ {k} (\mathcal {H})}{n} + C \left(\sqrt {2} ^ {- 1} + \sqrt {2}\right) \sqrt {\frac {H \ln (2 H / \delta)}{n}} \\ \end{array}
521
+ $$
522
+
523
+ Since $H \leq \lceil T / K \rceil$ , this implies that any $\delta > 0$ , with probability at least $1 - \delta$ ,
524
+
525
+ $$
526
+ \mathbb {E} _ {\mathbf {x} _ {0}} [ \ell (\mathbf {x} _ {0}) ] - \frac {1}{n} \sum_ {i = 1} ^ {n} \ell (\mathbf {x} _ {0} ^ {(i)}) \leq C _ {0} \sqrt {\left\lceil \frac {T}{K} \right\rceil \frac {\ln (\left\lceil \frac {T}{K} \right\rceil \frac {2}{\delta})}{n}} + \sum_ {t \in I _ {\mathcal {K}}} \frac {2 | \mathcal {I} _ {t} | \mathcal {R} _ {t} (\mathcal {H})}{n}.
527
+ $$
528
+
529
+ where $C_0 = C\left(\sqrt{2}^{-1} + \sqrt{2}\right)$ . This proves the first statement of this theorem.
530
+
531
+ ![](images/cf60124f1a733814efbd3c1c424a89a446040c29d7dece841d3b3cfca9d4ef45.jpg)
532
+
533
+ # H.2 PROOF OF PROPOSITION 1
534
+
535
+ Proof. For the second statement, let $K = 1$ and we consider the effect of increasing $K$ from one to an arbitrary value greater than one. Denote by $\mathcal{R}_t(\mathcal{H})$ and $\tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ the conditional Rademacher complexities for $K = 1$ (base case) and $K > 1$ (after increasing $K$ ) respectively: i.e., we want to show that $\mathcal{R}_t(\mathcal{H}) \geq \tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ . Given the increasing value of $K$ , let $t \in \{1, \dots, T\}$ such that $s_t$ is not skipped after increasing $K$ . From the definition of $\mathcal{H}$ ,
536
+
537
+ $$
538
+ \begin{array}{l} \mathcal {R} _ {t} (\mathcal {H}) = \mathbb {E} _ {\left(\mathbf {x} _ {0} ^ {(i)}\right) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {h \in \mathcal {H}} \frac {1}{\left| \mathcal {I} _ {t} \right|} \sum_ {i = 1} ^ {\left| \mathcal {I} _ {t} \right|} \xi_ {i} h \left(\mathbf {x} _ {0} ^ {(i)}\right) \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \tag {19} \\ = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\boldsymbol {\xi}} \left[ \sup _ {\theta \in \Theta} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \mathbb {E} _ {m, \epsilon} [ \| \epsilon - \epsilon_ {\theta} (\varsigma (\mathbf {x} _ {0} ^ {(i)}), m) \| ^ {2} ] \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right]. \\ \end{array}
539
+ $$
540
+
541
+ where everything is defined for $K = 1$ and $\varsigma (\mathbf{x}_0^{(i)}) = \sqrt{\bar{\alpha}_m}\mathbf{x}_0^{(i)} + \sqrt{1 - \bar{\alpha}_m}\epsilon$ . Here, we recall that $\epsilon_{\theta}(\mathbf{x}_m,m) = Wg(\mathbf{x}_m,m)$ for some function $g$ and an output layer weight matrix $W$ such that the parameters of $g$ does not contain the entries of the output layer weight matrix $W$ . This implies that $\epsilon_{\theta}(\varsigma (\mathbf{x}_0^{(i)}),m) = W\tilde{g}_m(V\mathbf{x}_0^{(i)})$ where $\tilde{g}_m(x) = g(\tilde{\varsigma} (x),m)$ where $\tilde{\varsigma} (x) = \sqrt{\bar{\alpha}_m} x + \sqrt{1 - \bar{\alpha}_m} V\epsilon$ , and that we can decompose $\Theta = \mathcal{W}\times \mathcal{V}\times \tilde{\Theta}$ with which $\theta$ can be decomposed into $W\in \mathcal{W},V\in \mathcal{V}$ , and $\tilde{\theta}\in \tilde{\Theta}$ . Using this,
542
+
543
+ $$
544
+ \begin{array}{l} \mathcal {R} _ {t} (\mathcal {H}) = \mathbb {E} _ {\left(\mathbf {x} _ {0} ^ {(i)}\right) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {\theta \in \Theta} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \mathbb {E} _ {m, \epsilon} [ \| \epsilon - W \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)}) \| ^ {2} ] \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \tag {20} \\ = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\boldsymbol {\xi}} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j} - W _ {j} \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)})) ^ {2} ] \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right]. \\ \end{array}
545
+ $$
546
+
547
+ where $W_{j}$ is the $j$ -th row of $W$ . Recall that when we increase $K$ , some states are skipped and accordingly $d$ decreases. Let $d_{0}$ be the $d$ after $K$ increased from one to some value greater than one: i.e., $d_{0} \leq d$ . Without loss of generality, let us arrange the order of the coordinates over $j = 1, 2, \ldots, d_{0}, d_{0} + 1, \ldots, d$ so that $j = d_{0} + 1, d_{0} + 2, \ldots, d$ are removed after $K$ increases.
548
+
549
+ Since $\Theta$ contains $\theta$ with $W$ and $V$ such that $\| W\|_{\infty}\leq \zeta_W$ and $\| V\|_{\infty} < \zeta_V$ for some $\zeta_W$ and $\zeta_V$ , the set $\mathcal{W}$ contains $W$ such that $W_{j} = 0$ for $j = d_0 + 1,d_0 + 2,\ldots ,d$ . Define $\mathcal{W}_0$ such that $\mathcal{W} = \mathcal{W}_0\times \tilde{\mathcal{W}}_0$ where $(W_{j})_{j = 1}^{d_{0}}\in \mathcal{W}_{0}$ and $(W_{j})_{j = d_{0} + 1}^{d}\in \tilde{\mathcal{W}}_{0}$ . Notice that $\mathcal{W} = \{(W_j)^d_{j = 1}:$ $\| (W_j)^d_{j = 1}\|_{\infty}\leq \zeta_W\}$ and $\mathcal{W}_0 = \{(W_j)^{d_0}_{j = 1}:\| (W_j)^{d_0}_{j = 1}\|_{\infty}\leq \zeta_W\}$ . Since we take supremum over $W\in \mathcal{W}$ , setting $W_{j} = 0$ for $j = d_0 + 1,d_0 + 2,\ldots ,d$ attains a lower bound as
550
+
551
+ $$
552
+ \begin{array}{l} \mathcal {R} _ {t} (\mathcal {H}) = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\boldsymbol {\xi}} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j} - W _ {j} \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)})) ^ {2} ] | \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ \geq \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \left(\sum_ {j = 1} ^ {d _ {0}} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j} - W _ {j} \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)})) ^ {2} ] + \sum_ {j = d _ {0} + 1} ^ {d} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j}) ^ {2} ]\right) | \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\boldsymbol {\xi}} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d _ {0}} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j} - W _ {j} \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)})) ^ {2} ] | \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ \end{array}
553
+ $$
554
+
555
+ where the last line follows from the fact that
556
+
557
+ $$
558
+ \mathbb {E} _ {\xi} \sup _ {(W, \tilde {\theta}) \in \mathcal {W} \times \tilde {\Theta}} \sum_ {j = d _ {0} + 1} ^ {d} \xi_ {i} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j}) ^ {2} ] = \mathbb {E} _ {\xi} \sum_ {j = d _ {0} + 1} ^ {d} \xi_ {i} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j}) ^ {2} ] = \sum_ {j = d _ {0} + 1} ^ {d} \mathbb {E} _ {\xi} [ \xi_ {i} ] \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j}) ^ {2} ] = 0.
559
+ $$
560
+
561
+ Similarly, since $\Theta$ contains $\theta$ with $W$ and $V$ such that $\| W\|_{\infty}\leq \zeta_W$ and $\| V\|_{\infty} < \zeta_V$ for some $\zeta_W$ and $\zeta_V$ , the set $\mathcal{V}$ contains $V$ such that $V_{j} = 0$ for $j = d_0 + 1,d_0 + 2,\ldots ,d$ , where $V_{j}$ is the $j$ -th row of $V$ . Define $\mathcal{V}_0$ such that $\mathcal{V} = \mathcal{V}_0\times \tilde{\mathcal{V}}_0$ where $(V_j)_{j = 1}^{d_0}\in \mathcal{V}_0$ and $(V_j)_{j = d_0 + 1}^d\in \tilde{\mathcal{V}}_0$ . Notice that $\mathcal{V} = \{(V_j)_{j = 1}^d:\| (V_j)_{j = 1}^d\|_\infty \leq \zeta_V\}$ and $\mathcal{V}_0 = \{(V_j)_{j = 1}^{d_0}:\| (V_j)_{j = 1}^{d_0}\|_\infty \leq \zeta_V\}$ . Since we take supremum over $V\in \mathcal{V}$ , setting $V_{j} = 0$ for $j = d_0 + 1,d_0 + 2,\ldots ,d$ attains a lower bound as
562
+
563
+ $$
564
+ \begin{array}{l} \mathcal {R} _ {t} (\mathcal {H}) \geq \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d _ {0}} \mathbb {E} _ {m, \epsilon} [ (\epsilon_ {j} - W _ {j} \tilde {g} _ {m} (V \mathbf {x} _ {0} ^ {(i)})) ^ {2} ] \mid \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ = \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d _ {0}} \mathbb {E} _ {m, \epsilon} \left[ \left(\epsilon_ {j} - W _ {j} \tilde {g} _ {m} \left(\sum_ {k = 1} ^ {d} V _ {k} (\mathbf {x} _ {0} ^ {(i)}) _ {k}\right)\right) ^ {2} \right] | \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ \geq \mathbb {E} _ {(\mathbf {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {(W, V, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} _ {0} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \sum_ {j = 1} ^ {d _ {0}} \mathbb {E} _ {m, \epsilon} \left[ \left(\epsilon_ {j} - W _ {j} \tilde {g} _ {m} \left(\sum_ {k = 1} ^ {d _ {0}} V _ {k} (\mathbf {x} _ {0} ^ {(i)}) _ {k}\right)\right) ^ {2} \right] | \mathbf {x} _ {0} ^ {(i)} \in \mathcal {C} _ {t} \right] \\ = \mathbb {E} _ {(\tilde {x} _ {0} ^ {(i)}) _ {i = 1} ^ {n}} \mathbb {E} _ {\xi} \left[ \sup _ {(\tilde {W}, \tilde {V}, \tilde {\theta}) \in \mathcal {W} _ {0} \times \mathcal {V} _ {0} \times \tilde {\Theta}} \frac {1}{| \mathcal {I} _ {t} |} \sum_ {i = 1} ^ {| \mathcal {I} _ {t} |} \xi_ {i} \mathbb {E} _ {m, \epsilon} [ \| \tilde {\epsilon} - \tilde {W} \tilde {g} _ {m} (\tilde {V} \tilde {x} _ {0} ^ {(i)}) \| ^ {2} ] \mid \tilde {x} _ {0} ^ {(i)} \in \tilde {\mathcal {C}} _ {t} \right] \\ \geq \tilde {\mathcal {R}} _ {t} (\tilde {\mathcal {H}}) \\ \end{array}
565
+ $$
566
+
567
+ where $\tilde{\epsilon} = (\epsilon_j)_{j=1}^{d_0}$ , $\tilde{x}_0^{(i)} = ((\tilde{x}_0^{(i)})_j)_{j=1}^{d_0}$ , $\tilde{C}_t$ is the $C_t$ for $\tilde{x}_0^{(i)}$ with skipping states, and $\tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ is the conditional Rademacher complexity after increasing $K > 1$ . The last line follows from the same steps of equation 19 and equation 20 applied for $\tilde{\mathcal{R}}_t(\tilde{\mathcal{H}})$ and the fact that $|\mathcal{I}_t|$ of $\mathcal{R}_t(\mathcal{H})$ is smaller than that of $\mathcal{R}_t(\tilde{\mathcal{H}})$ (due to the effect of removing the states), along with the assumption that $\mathcal{R}_t(\mathcal{H})$ does not increase when we increase $n_t$ . This proves the second statement.
568
+
569
+ ![](images/df344ad20951d7eaba22169d7f00c47cb1d5f863cf955d9b863c4a01623630f6.jpg)
2401.02xxx/2401.02644/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0391ad61f332a1bd876ab7455c33f1dbd64ea6c8382f8dd9892a10baaf073e99
3
+ size 1044321
2401.02xxx/2401.02644/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02669/f8adcee7-946a-4720-abe5-c5b9353fb0f6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793e89135067feec0e916867acabe8f5bb1779c8319852ba0b5c180e5f5e063b
3
+ size 1309177
2401.02xxx/2401.02669/full.md ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Infinite-LLM: Efficient LLM Service for Long Context with DistAttention and Distributed KVCache
2
+
3
+ Bin Lin*
4
+
5
+ Alibaba Group
6
+
7
+ Chen Zhang*†
8
+
9
+ Shanghai Jiao Tong University
10
+
11
+ Tao Peng*
12
+
13
+ Alibaba Group
14
+
15
+ Hanyu Zhao
16
+
17
+ Alibaba Group
18
+
19
+ Wencong Xiao
20
+
21
+ Alibaba Group
22
+
23
+ Minmin Sun
24
+
25
+ Alibaba Group
26
+
27
+ Anmin Liu
28
+
29
+ Peking University
30
+
31
+ Zhipeng Zhang
32
+
33
+ Alibaba Group
34
+
35
+ Lanbo Li
36
+
37
+ Alibaba Group
38
+
39
+ Xiafei Qiu
40
+
41
+ Alibaba Group
42
+
43
+ Shen Li
44
+
45
+ Alibaba Group
46
+
47
+ Zhigang Ji
48
+
49
+ Shanghai Jiao Tong University
50
+
51
+ Tao Xie
52
+
53
+ Peking University
54
+
55
+ Yong Li
56
+
57
+ Alibaba Group
58
+
59
+ Wei Lin
60
+
61
+ Alibaba Group
62
+
63
+ # Abstract
64
+
65
+ Large Language Models (LLMs) demonstrate substantial potential across a diverse array of domains via request serving. However, as trends continue to push for expanding context sizes, the autoregressive nature of LLMs results in highly dynamic behavior of the attention layers, showcasing significant differences in computational characteristics and memory requirements from the non-attention layers. This presents substantial challenges for resource management and performance optimization in service systems. Existing static model parallelism and resource allocation strategies fall short when dealing with this dynamicity. To address the issue, we propose Infinite-LLM, a novel LLM serving system designed to effectively handle dynamic context lengths. Infinite-LLM disaggregates attention layers from an LLM's inference process, facilitating flexible and independent resource scheduling that optimizes computational performance and enhances memory utilization jointly. By leveraging a pooled GPU memory strategy across a cluster, Infinite-LLM not only significantly boosts system throughput but also supports extensive context lengths. Evaluated on a dataset with context lengths ranging from a few to 2000K tokens across a cluster with 32 A100 GPUs, Infinite-LLM demonstrates throughput improvement of 1.35-3.4x compared to state-of-the-art methods, enabling efficient and elastic LLM deployment.
66
+
67
+ # 1 Introduction
68
+
69
+ Large Language Models (LLMs)[5, 9, 15, 45, 53] have significantly advanced the field of generative artificial intelligence, and these inspiring capabilities have been integrated into various aspects of daily life. The universality of LLMs is
70
+
71
+ evident across numerous domains, such as programming copilot[16, 22], document summarization[50, 57], information retrieval[46, 60], and chatbots[25, 52]. The inference serving of LLMs [2, 6, 29] has emerged as a critical component within cloud infrastructures.
72
+
73
+ Today's LLM serving typically employs autoregressive mechanisms[14, 38, 44, 49] to iteratively generate output tokens and intermediate contexts (a.k.a. KVCache[36]). The autoregressive nature of these models introduces a characteristic of unpredictability in the sequence of generated tokens, as the process continues until the generation of an end token. As a result, the required memory and computational resources for LLM services dynamically change, with neither the lifetime nor the length of the context known a priori. With the rapid development of LLMs, the supported context is continuously expanding[10]. Multiple LLM vendors have significantly increased their capacity to millions of tokens—e.g., 128K for ChatGPT [35], 1000K for Google's Gemini [21], and 2000K for LongRoPE [19].
74
+
75
+ In LLM cloud service systems, resource demands are highly dynamic due to the enormous dynamicity and unpredictability of context generation tasks for LLMs. Since context generation tasks may generate arbitrary lengths, from as few as 1 to up to 2000K tokens, the cloud services must cater to a broad range of demands. Due to the unpredictable length of context generated by each request, pre-assigning resources accurately becomes unfeasible, leading to highly varied demands for computing and memory resources. For example, a single instance (i.e., a model replica deployed to handle request data in parallel) might manage numerous compute-intensive short-context tasks at one time and switch to memory-intensive long-context tasks or a mix of varying lengths at another.
76
+
77
+ The preceding complexity in dynamic resource demands results in LLM service systems that struggle to efficiently and
78
+
79
+ elastically adapt to varying workload requirements under different context lengths. This often leads to reduced system efficiency, manifesting primarily in two aspects:
80
+
81
+ Inefficient Model Parallelism inside an Instance. The model parallelism strategy required for processing requests with short and normal-length contexts differs significantly from that for long contexts. Traditional LLM service systems use a fixed model parallelism, where each instance is allocated a fixed number of GPUs. This fixed allocation makes it challenging to flexibly support both long and normal-length contexts efficiently. For example, processing a context with a length of 1K tokens on the Llama-7B model requires approximately 15 GB of memory, a fraction of an A100 GPU's capacity, while a context with a length of 1000K tokens demands over 500 GB, equivalent to the combined memory of about 7 A100 GPUs. Consequently, a higher degree of parallelism (DoP) is necessary for longer requests to meet their extensive resource needs, in stark contrast to the minimal requirements of shorter tasks. Configuring the system to meet the high DoP needed for long requests results in excessive model slicing and communication overhead for shorter requests, severely impacting performance. Existing parallelism strategies such as Tensor Parallelism and Pipeline Parallelism [34, 42], which are based on static model dimensions, struggle to adapt flexibly to such dynamic workloads.
82
+
83
+ Inefficient Resource Management across Instances. The dynamic lengths of requests also limit the efficiency of resource management across instances and the cluster throughput. In particular, it is difficult for the scheduler to find an optimal request placement to saturate both memory and compute utilization. This is because the memory utilization is determined only by the total KVCache, whereas the compute utilization largely depends on the batch size, i.e., number of running requests. For example, when a request grows too long on an instance, its KVCache will consume too much memory space on that instance, which in turn will greatly reduce the running batch size and compute utilization, even the memory is saturated. Similarly, when requests are short on an instance, the spare memory also cannot be harvested by the long requests on other instances. As a result, the overall cluster throughput would be limited.
84
+
85
+ Through an in-depth analysis of the computational characteristics of LLM models, we identified that the root of the challenges lies in the significant differences between attention and non-attention layers: non-attention layers exhibit static behavior with changing sequence lengths and are sensitive to batch size, while attention layers display dynamic behavior and are not affected by the batch size. To address these challenges, we present Infinite-LLM, a novel LLM service system designed for managing highly dynamic context lengths in LLM requests. Infinite-LLM introduces a new approach that decouples the computation of attention layers from the rest of the LLM model. This decoupling allows for flexible and independent resource scheduling, specifically
86
+
87
+ targeting the memory needs of dynamic attention layers and computation needs of the rest of the LLM model. Additionally, Infinite-LLM optimizes resource allocation by using the entire cluster's GPU memory as a pooled resource, allowing instances with surplus memory capacity to aid those processing extensive context tasks. This method not only significantly enhances resource efficiency and system throughput but also enables the cluster to support tasks with extremely long context lengths that surpass the memory limits of a single instance.
88
+
89
+ The contributions of this paper are summarized as follows:
90
+
91
+ - We reveal the dynamic characteristic of LLM request serving, and identify the limitations inherent in existing static model parallelism deployments and KV-Cache scheduling within a single instance.
92
+ - We present DistAttention, a novel attention mechanism that is mathematically equivalent to the original attention, designed to flexibly disaggregate attention computation and KVCache in a distributed way.
93
+ - We propose Infinite-LLM, an efficient LLM serving system specifically designed to adapt to the LLM serving dynamicity. It is capable of supporting scalable context length efficiently, by scheduling KVCache in cluster-level, thus to balance resource requirements between instances and achieve high overall system throughput.
94
+ - Evaluations show that Infinite-LLM can serve 2,000K tokens with 32 GPUs, achieving $1.35 - 3.4 \times$ improvement in end-to-end performance compared to state-of-the-art LLM serving system.
95
+
96
+ # 2 Background and Motivation
97
+
98
+ # 2.1 LLM Serving and Parallelism Method
99
+
100
+ LLM Inference. Large Language Models (LLMs)[9, 15, 45] are dominated by Transformer architectures[14, 49]. A Transformer block consists of three key components: the QKV Linear layer, Multi-Head Attention Mechanism, and Feed-Forward Neural Network (FFN) modules. Multi-Head Attention involves attention kernels and persists key-value cache (KVCache) across iterations, while both the QKV Linear layers and FFN layers are mostly General Matrix to Matrix Multiplication (GEMM) kernels. Inference serving of LLMs is autoregressive. Specially, the prefetch phase takes prompts as inputs to generate the first token, and each new token afterwards is generative iteratively until an "end-of-sequence" (EOS) token, usually referred to as decode. Due to the generality of LLM, context length of inference serving can be wide-ranging [3, 7, 11, 56], from only 1 token [51] to 2000K tokens [19]. To further scale the serving throughput capacity, multiple model replicas are deployed to handle request data in parallel (a.k.a. data-parallel). Deployed as an instance, each replica contains a full copy of model parameters. This
101
+
102
+ ![](images/f60d5e2cfdc81054287dca2dd5420b6f70ef9f0661274e171014484d7dabc336.jpg)
103
+ (a) Efficiency and max support length
104
+
105
+ ![](images/4451641fb4403badb51e9611d383d7f56fd6ba60ac7f5f1ca8dc9671ba842fca.jpg)
106
+ (b) Memory requirement with context length
107
+
108
+ ![](images/d0da5d942ed78f724bb2d9bd380293bfd1f4213c20b6954774f8a5dc64960cfd.jpg)
109
+ (c) Computational utility with different DOP
110
+
111
+ ![](images/9b10fe0b26845129ce3830e2526f0e5b5d41eeefe8e560be9f3d177e2fa94d9d.jpg)
112
+ (a) Many instances are in a state of low computational/memory utilization.
113
+
114
+ ![](images/29eaa1f51f75aa73d4bc9f880cdfb8d0b8f83ddbe2138a8e391963cfd2b1cca4.jpg)
115
+ Figure 1. Static model parallelism struggles to maintain efficiency across all context length
116
+
117
+ ![](images/a9bacdd03d4143a43ba5ade02e02c4ae1197e57c452df2a6d33bd1c1c4986ff0.jpg)
118
+ (b) Long request competes with short requests, resulting in low batch size and low GPU util.
119
+ (c) Computational utility on different batch size.
120
+ Figure 2. Resource under-utilization across instances in a cluster
121
+
122
+ instance is responsible for accommodating an LLM request and processing it to completion iteratively.
123
+
124
+ Model Parallelism. Model-parallelism is necessary for supporting large models that do not fit into the memory of a single device. It can expand the total memory available to the serving instance for storing its prompt inputs, model weights, and intermediate values. Tensor parallelism and pipeline parallelism are the two major categories. Tensor-parallel [42] partitions a model layer across multiple GPUs, with each GPU executing a part of inference computation in parallel. Communication is required to split input or merge output for subsequent layers in the model. Pipeline-parallel[29, 33] avoids intra-layer communication by assigning contiguous layers on different GPUs for pipeline fashion execution, introducing inter-layer communication instead.
125
+
126
+ # 2.2 System Design Challenges
127
+
128
+ In this section, we delve into the computational characteristics of attention and non-attention layers at various text lengths and the challenges that these differences pose to system design. To better illustrate our points, we conduct a series of motivating experiments using the LLaMA2-7B model on 32 A100 GPUs.
129
+
130
+ Observation 1: Instances with a higher number of GPUs are capable of supporting long-text tasks but perform poorly on normal-text tasks. As illustrated in Figure 1(a), the performance differences across these instances are stark: Instance DP1TP32, with the most GPUs in a single instance (32 GPUs), has the largest memory capacity to support text generation tasks up to 2000K in length but performs the
131
+
132
+ worst on text generation tasks of standard length (1K). Conversely, as the number of GPUs decreases in Instances DP4TP8 (with 8 A100 each) and DP32TP1 (with only 1 A100 each), the maximum supportable text length decreases, while performance on text generation tasks of standard length improves.
133
+
134
+ This phenomenon originates from the different computational characteristics of attention and non-attention layers across context lengths of high dynamic range. As shown in Figure 1(b), the tensor size of attention layers grows steadily with the context length, thus requiring more memory space and a higher degree of parallelism, typically necessitating deployment across more GPUs. For example, supporting a single text generation task of $1000\mathrm{K}$ length would require at least 8 A100 GPUs. In contrast, the tensor size of non-attention layers does not change with text length; hence, no GPU number increase is needed. Traditional LLM parallel strategies [23, 42] do not differentiate between attention and non-attention layers, applying static model splits such as tensor or pipeline parallelism indiscriminately. This nondifferentiation can lead to non-attention layers being mapped to an excessive number of GPUs, potentially reducing computational performance due to over-segmentation. As shown in Figure 1(c), for a text generation task of $1000\mathrm{K}$ length, the performance of non-attention layers on an 8 GPU instances is only about one-third of that on a single GPU instance.
135
+
136
+ Observation 2: When handling tasks with long context lengths, the computational utilization of GPU significantly decreases, and for short contexts, there is insufficient GPU memory utilization. This leads to instances typically exhibiting low computational or low memory utilization during the service process, as illustrated in
137
+
138
+ Figure 2(a). To illustrate this, we analyzed the performance of the decode phase for a single A100 LLaMA2-7B instance in a simplified scenario where all requests have the same context length. The results, depicted in Figure 2(b), show that when the context length is 20, the memory can support up to 1800 requests. However, this situation indicates over batching; compared to a batch size of 800, there is hardly any improvement in GPU compute utilization, suggesting that the additional 1000 requests do not contribute to performance improvement but instead significantly increase the request's latency. In the appropriate case with a batch size of 800, the actual GPU memory utilization is only $42\%$ . Given that single A100 GPUs have a fixed total memory capacity of 80GB, as the context length of requests increases, the system is forced to handle smaller batches. Specifically, when the context length reaches 20K, the maximum batch size is limited to 3, leading to GPU compute utilization being only one percent of what it is when the context length is 20.
139
+
140
+ This issue is rooted in the stark contrasts in computational characteristics and resource demands between attention and non-attention layers. Non-attention layers utilize weight parameters that can be shared across all input vectors for requests. This capability allows the system to transform 'matrix-vector' multiplications (GEMV) into more efficient 'matrix-matrix' multiplications (GEMM) as batch sizes increase, significantly improving the computation-to-memory ratio and achieving high computational utilization, as shown in Figure 2(c). With the continued growth in context length, LLM service systems are forced to reduce batch sizes to free up memory to accommodate the increasing KVCache of the attention layers, resulting in decreased GPU compute utilization. However, in previous systems [4, 26, 41, 55], requests could only utilize the fixed resources available within their instance, limiting the system's ability to adapt to highly dynamic resource demands.
141
+
142
+ In summary, these observations highlight the need for adaptive parallelism and resource management strategies to efficiently handle the varying demands of different context lengths, optimizing both computational and memory resource utilization across large-scale clusters.
143
+
144
+ # 3 System Overview
145
+
146
+ The key concept of Infinite-LLM is to distribute the attention computation and KVCache beyond the boundaries of LLM inference instances, in order to leverage the resources of the entire GPU cluster, as illustrated in Figure 3. This idea disaggregates the attention layers from the non-attention layers, allowing them to employ independent parallel strategies and resource scheduling policies. It further enhances the scheduling strategy's ability to effectively manage the computation and memory of GPU resources at the cluster level.
147
+
148
+ ![](images/22e102a428cbef979cfdbb1c170c588f9c4ea585415280e39e1dc5d824d85949.jpg)
149
+ Figure 3. Infinite-LLM System Overview
150
+
151
+ Infinite-LLM accomplishes the design goal through three main system innovations. First, we introduce DistAttention, a novel attention mechanism that subdivides attention computations across GPUs, meanwhile avoiding KVCache transfer at decoding. DistAttention is mathematically equivalent to the common attention modules, such as multi-head attention, multi-query attention, and grouped-query attention[39, 49]. The new distributed attention mechanism allows partition the attention in arbitrary size of sequence length and introduces negligible output data to be transferred for the other layers. Therefore, attention can be efficiently disaggregated in a scalable way (Section 4). Second, DistAttention can be utilized in multiple ways to resolve the resource contention and low efficiency of processing long requests and small requests thereby to significantly improve the cluster throughput. We model the major attention and non-attention cost of using DistAttention to formulate the aggregated cluster throughput. Achieving the optimal cluster throughput is costly as LLM serving is dynamic and unpredictable. Infinite-LLM includes a greedy scheduling policy based on our empirical study, approaching the improved cluster throughput and efficiency (Section 5). Third, Infinite-LLM introduces a new centralized controller, gManager, to host the scheduling policy and coordinate the dynamic inter-instance KVCache tracking and migration. For scalability and fault tolerance, gManager works with a series of rManagers in a distributed architecture through a set of newly defined protocol (Section 6).
152
+
153
+ # 4 DistAttention
154
+
155
+ To achieve dynamic and flexible management of the KVCache, we propose DistAttention, a method that subdivides attention and the KVCache into regular small sub-blocks, thereby allowing the attention layer to be efficiently distributed and computed across multiple instances. Unlike traditional model parallelism methods, DistAttention is characterized by its slicing along the dynamic sequence dimension of the KVCache, allowing newly generated tokens in the auto-regressive process to be flexibly grouped, scheduled,
156
+
157
+ and computed. Although the KVCache tensor in the original attention can also be partitioned along the sequence dimension, the complex computation pattern of attention means that direct partition introduces significant communication overhead, greatly affecting the computational efficiency of distributed Attention. Inspired by online softmax[37], DistAttention successfully addresses this issue through an equivalent mathematical transformation on the original attention. Equation 1 shows the original computation formula of attention, requiring calculating the maximum attention score $(m_g$ in Equation 1) across all sequences and summing the intermediate results along the sequence dimension, thus necessitating the entire KVCache of all sequences. If KVCache is directly partitioned and stored in a distributed manner, it would necessitate transferring the KVCache from remote instances back to the local machine for each attention computation. As illustrated in Figure 4(a), given the substantial size of the KVCache, each decoding step needs to transfer GBs or even TBs of data, significantly impacting the performance of distributed attention computation.
158
+
159
+ $$
160
+ m _ {g} = m a x (Q K _ {1}, \dots , Q K _ {s e q})
161
+ $$
162
+
163
+ $$
164
+ \operatorname {A t t e n t i o n} (Q, K, V) = \sum_ {i = 1} ^ {s e q} \frac {\exp \left(Q K _ {i} ^ {T} - m _ {g}\right)}{\sum_ {j = 1} ^ {s e q} \exp \left(Q K _ {j} ^ {T} - m _ {g}\right)} V _ {i} \tag {1}
165
+ $$
166
+
167
+ DistAttention's equivalent mathematical transformation on the original attention avoids the need to perform max and summation operations across all sequences. It allows each instance to execute the max and summation operations locally on partial KVCache data with partial sequence length $seq_{p}$ . As shown in Equation 2, MicroAttention (MA) refers to the partial attention computations that result from the partition and can be distributed across various instances for computation. Consequently, for each distributed computation of attention, instances need to transfer the query vector along with only two float values, $e_{j}$ and $m_{j}$ .
168
+
169
+ $$
170
+ m _ {j} = \max (Q K _ {1},..., Q K _ {s e q _ {p}}), e _ {j} = \sum_ {i = 1} ^ {s e q _ {p}} \exp (Q K _ {i} ^ {T} - m _ {j})
171
+ $$
172
+
173
+ $$
174
+ M A _ {j} (Q, K, V) = \sum_ {i = 1} ^ {s e q _ {p}} \left(\exp \left(Q K _ {i} ^ {T} - m _ {j}\right) V _ {i}\right) \tag {2}
175
+ $$
176
+
177
+ $$
178
+ m _ {g} = \max (m _ {1},..., m _ {b}), e _ {g} = \sum_ {j = 1} ^ {b} e _ {j} \exp (m _ {j} - m _ {g})
179
+ $$
180
+
181
+ $$
182
+ \operatorname {A t t e n t i o n} (Q, K, V) = \sum_ {j = 1} ^ {b} \frac {M A _ {j} \exp \left(m _ {j} - m _ {g}\right)}{e _ {g}} \tag {3}
183
+ $$
184
+
185
+ The intermediate results computed by the $b$ remote instances are then transferred back to the local instance for aggregation (Equation 3) to arrive at an outcome equivalent to the original attention, as indicated in Figure 4(b). Because the computation FLOPs for aggregation is less than $1\%$ of the total MA computational load, this overhead is virtually negligible. Since the query round trip involves only a few KBs
186
+
187
+ Figure 4. DistAttention reduces the communication overhead through equivalent mathematical transformations.
188
+ ![](images/7f4fc3c9dc35bb3acd3b87afb3631d4da543fe98599b166b8ea4010c5d97b6d6.jpg)
189
+ (a) Original attention necessitates traversing the full sequence, thus requiring communication of KVCache.
190
+
191
+ ![](images/14a19de511a0ace6643faf85c1e518ff205eca4502525a6ad844358a6957b4be.jpg)
192
+ (b) DistAttention achieves communication of only the query through equivalent mathematical transformations on original attention.
193
+ (c) Time comparison between shipping KVCache and shipping the query.
194
+
195
+ <table><tr><td>Context len</td><td>8192</td><td>16384</td><td>32768</td><td>65536</td><td>131072</td></tr><tr><td>ship query</td><td>0.075ms</td><td>0.081ms</td><td>0.12ms</td><td>0.21ms</td><td>0.36ms</td></tr><tr><td>ship kvcache</td><td>0.581ms</td><td>1.080ms</td><td>1.98ms</td><td>3.77ms</td><td>7.48ms</td></tr></table>
196
+
197
+ of data, DistAttention substantially reduces the data communication overhead, as depicted in Figure 4(c).
198
+
199
+ # 5 Cluster-scale Throughput Optimization
200
+
201
+ # 5.1 Overview
202
+
203
+ DistAttention allows Infinite-LLM to place and compute a single request across multiple instances. This is not merely a means to enable serving extremely long requests beyond a single instance's capacity; we show that, from a cluster perspective, DistAttention is a powerful weapon as it greatly enlarges the request scheduling space across instances and can improve cluster-wide throughput. In particular, with DistAttention, Infinite-LLM is no longer limited to scheduling KVCache of each whole request on an instance; instead, Infinite-LLM can schedule any arbitrary sub-blocks of request's KVCache onto instances, which represents a much finer scheduling granularity and higher flexibility than existing systems.
204
+
205
+ Such sub-block level scheduling is beneficial because Infinite-LLM can better balance the KVCache and the batch sizes across instances, thereby maximizing memory and compute utilization simultaneously. Specifically, Infinite-LLM maintains balanced batch sizes by controlling the number of sub-blocks on each instance, preventing individual requests from occupying too much memory and decreasing the batch size. Figure 5 shows an intuitive example. Figure 5(a) demonstrates the initial status on the KVCache distribution of four serving instances. Instance A is processing a long request that saturates all memory space, and Instance D is processing a long request however with some available GPU memory
206
+
207
+ ![](images/1bcd141ff471965354053ae3ee68d2c34fbdd72fb1b2c292eaa4c144a5b85014.jpg)
208
+ Inst A
209
+
210
+ ![](images/878eea97db94a914b9cd5441a19422da0cd9a1f31b92b62417080c0f2959ef0c.jpg)
211
+ Inst B
212
+ Batch=443
213
+
214
+ ![](images/ac3c0814f4609709f0ae5ec6f2485001d0948561d19b53c3589eb62a8321a6e3.jpg)
215
+ Inst C
216
+ Batch=454
217
+
218
+ ![](images/7e11965e96e5649f8098a8c1170237f4ae842d315ef23ff54fb4543941432dfd.jpg)
219
+ Inst D
220
+
221
+ ![](images/749801038eec18a7e103c67f958636d94eb9ff5be9a76551d95efc95efd91de2.jpg)
222
+ Batch=103
223
+
224
+ ![](images/3e1772661f4e3b405a689c63f928513599092bb36ba4fea74f8fbc101b318b9c.jpg)
225
+ Batch=1
226
+ (a) Original state, Inst A is saturated while Inst D contains free memory
227
+ Inst A
228
+ Batch=1
229
+
230
+ ![](images/080390a55490548e59cc2b73f0ba99ccfad0298940f766dad0b1056676b2b449.jpg)
231
+ Inst B
232
+ Batch=443
233
+
234
+ ![](images/522977fafe3120158cfda4c65b145405fb5c18eaa008127386b937f5c794de31.jpg)
235
+ Inst C
236
+ Batch=454
237
+
238
+ ![](images/287f16548a98f291616062e88ab0616194b0c043489034ebcc2941d02905e304.jpg)
239
+ Inst D
240
+
241
+ ![](images/4ba23205a4afe122a250ef6052f2bf31083c35da4f60efcc10ff085cc9ab60c6.jpg)
242
+ Batch=45
243
+
244
+ ![](images/78186271fbfb00da37cff2916fc9bb4c82d7c7606aa25fe53d1d2940e5c995fa.jpg)
245
+ (b) Placement approach 1,Inst A offload extensive block to B passively
246
+ Inst A
247
+ Batch=131
248
+
249
+ ![](images/4179b550a335606b056f739296c8b159fcaf68a7709ed6123bb9203cae513bfb.jpg)
250
+ Inst B
251
+ Batch=443
252
+
253
+ ![](images/76822a70f8488e329fd8a1f13c3cb718af342aed079f68755c8ef9994fa9fa52.jpg)
254
+ Inst C
255
+ Batch=454
256
+
257
+ ![](images/f4fabf0a193466d44a263baed051d056ddd6540a668fee5a1b1092ba20b02734.jpg)
258
+ Inst D
259
+
260
+ ![](images/09a161c0e74562b456fab0f8c71fe4147b4154ec8ca8411bb3c9906599d63cc4.jpg)
261
+ Batch=156
262
+ (c) Placement approach 2,Inst A/D offload more block to Inst B/C proactively
263
+ Figure 5. Effects of two different placement methods
264
+
265
+ left. Instances B and C are handling short requests, and despite high batch sizes, they still have ample memory space. Figure 5(b) shows a strawman placement strategy that only places the newly generated blocks onto the instance with the most remaining space when the length of a long request exceeds the memory capacity of an instance. While this approach can support long requests that exceed the resource capacity of a single instance, the cluster as a whole maintains a relatively low throughput: the batch size for Instance A remains at 1, and the newly generated attention sub-blocks of the long request in Instance D compete with local short requests, reducing the batch size and resulting in low GPU compute utilization. The second placement method (Figure 5(c)) proactively places more sub-blocks onto other instances with sufficient available space before the length of a long request exceeds the instance memory capacity. As shown in Figure 5(c), Instances A and D proactively place more attention sub-blocks to Instances B and C, freeing up memory space to handle more short requests, thereby increasing the batch size. Compared to the first method, this proactive placement balances the batch sizes among instances, improving the overall cluster throughput.
266
+
267
+ The insights summarized from the above study inspire us to devise a reasonable scheduling method for placing attention sub-blocks to enhance the overall throughput of the cluster as much as possible. Our scheduling method must address three main issues: (1) If an instance offloads part of its KVCache to remote, how to determine a proper size? What is the performance gain and overhead? (2) If an instance lends some space out, how much space should be used? (3) Given that there are numerous instances, how to decide the borrow-lend relationship that maximizes the overall performance?
268
+
269
+ ![](images/8abf301b2283e55888e85582ce7f326cb1d210c51b7f1e914eb098572c2478df.jpg)
270
+
271
+ ![](images/a588eda8a213c6ee3e24d250ac14ebccc1fcf7888fed98420329f17d2aba0ff1.jpg)
272
+ (a) Overlapping query transmission and MicroAttention computation
273
+ (b) Overlapping KVCache transmission and model inference
274
+ Figure 6. Communication Overlapping Optimization
275
+
276
+ # 5.2 Debtors and Creditors
277
+
278
+ To address these problems, we discuss the performance impacts between instances that borrow or lend memory spaces. We refer to instances that borrow memory from others as debtors, such as Instances A and D, and instances that lend memory as creditors, such as Instances B and C in Figure 5. We do not allow an instance to act both as a debtor and a creditor simultaneously. Whenever a debtor has free local space (e.g., a request retires and frees up its memory), it prefers to increase its batch size, or to retrieve attention subblocks that were previously offloaded instead of lending it to others. Similarly, a creditor, lacking sufficient local space, will reclaim the memory that it has previously lent out.
279
+
280
+ 5.2.1 Debtors Debtors borrow memory space from one or more creditors to store portions of their KVCache. This operation has both positive and negative impacts on their performance. The primary benefit is that offloading part of the attention computation to creditors reduces the time debtors spend generating tokens, and the freed-up space allows higher batch size, thereby improving generation throughput.
281
+
282
+ This approach also introduces several challenges. First, debtors must collect the partial attention computation results from creditors to complete the attention calculation. If creditors compute an excessive number of MicroAttention (MA) operations, it may lead to idle waiting for the debtors and thus reduce performance. As shown in Figure 6(a), to mitigate this issue, our scheduling policy aims to limit the size of KVCache on remote instances so that the remote computation and transmission can be entirely covered by local computations. Secondly, transferring the KVCache to creditors is time-consuming. To minimize the impact of KVCache transfer on LLM inference performance, as shown in Figure 6(b), we overlap the transfer with local model inference. Micro-benchmarking and performance analysis. Figure 7(a) depicts the debtor's throughput as a function of the number of KVCache blocks moved to the creditor, represented by the dashed line. As more KVCache are offloaded, the debtor's throughput greatly increases because the debtor enjoys a boost in performance when its batch size is very
283
+
284
+ ![](images/8fc4b10a781b220969c352ccca150341410cc26d14eb16c5ad5df4634796eea3.jpg)
285
+ (a) Debtor's throughput with MA Block Num
286
+
287
+ ![](images/119909835c2e4ea3a542ee02b15a20bdbc45aaed1bca622446bf92aaf43add44.jpg)
288
+ Figure 7. Experiment is conducted using LLAMA2-7B model on two A100 instances. One instance acts as a creditor executing normal-length requests (500 tokens on averag). The other acts as a debtor, processing a long context (1000K tokens).
289
+
290
+ ![](images/e92e8dbb2cf984610283047c1a9ce5f730ff6dece34ae3bbed4b8235213c24c2.jpg)
291
+ (b) Creditor's throughput with MA Block Num
292
+ (c) Overall throughput with MA Block Num
293
+
294
+ low. As the batched requests approach the computational limits of the system, this trend eventually plateaus.
295
+
296
+ 5.2.2 Creditors Creditors lend their excess memory space to one or multiple debtors. Due to the additional computation of partial attention for debtors, the performance of creditors' local request services is negatively affected.
297
+
298
+ Micro-benchmarking and performance analysis. Figure 7(b) reveals a slow but steady performance degradation with more KVCache moved to the creditor. When the transferred KVCache exceeds the surplus memory space of the creditor, the batch size of the creditor is reduced, resulting in an even steeper decline in performance.
299
+
300
+ 5.2.3 Overall cluster throughput The overall system throughput is obtained by summing the throughput of all instances in the cluster. Our optimization goal is to find an effective KVCache placement strategy that enhances the throughput of the entire system. For example, Figure 7(c) presents the aggregated performance of a micro-benchmarking test for a debtor and a creditor. As the KVCache is transferred, the total throughput increases from approximately 6500 tokens/second to about 8800 tokens/second. As more MA blocks are moved to the creditor, the overall throughput sharply declines. Under this configuration, the system achieves maximum aggregate throughput when 200 blocks of KVCache are transferred.
301
+
302
+ Complexity analysis. However, determining a KVCache placement schedule for the real cluster is very difficult because the design space is prohibitively complex. Considering a cluster with $N$ instances as debtors and $M$ ones as creditors, we refer the number of surplus memory blocks in each creditor as $Y_{i}$ . For each block, there are $N + 1$ possible options: offering it to one of the $N$ debtors or to not lend it out at all. Supposing all memory blocks make the decision independently, the search space can be $(N + 1)^{\sum_{i = 1}^{M}Y_{i}}$ . However, the blocks within each creditor are homogeneous. After dedduplication, the final search space size is as follows:
303
+
304
+ $$
305
+ \frac {(N + 1) ^ {\sum_ {i = 1} ^ {M} Y _ {i}}}{\prod_ {i = 1} ^ {M} Y _ {i} !} \tag {4}
306
+ $$
307
+
308
+ Such a huge design space makes it impractical to figure out the optimal cluster throughput during runtime. Next, to avoid the overhead of empirical measurements, we have constructed a performance model to predict the overall cluster throughput for a given placement strategy, and we propose an optimization algorithm based on this model.
309
+
310
+ # 5.3 Scheduling Algorithm
311
+
312
+ To efficiently figure out an efficient KVCache placement schedule during runtime, we propose a greedy algorithm based on a performance model.
313
+
314
+ Performance modeling. Equation 5 outlines a general analytical model for single transformer layer, comprising both non-attention and attention layers. The computational load of all non-attention layers, denoted as $W(\beta)$ , is primarily influenced by batch size $\beta$ . The GPU's real performance(FLOPs/s), denoted as $f(\beta)$ , is closely tied to batch size and can be experimentally measured. The workload of attention layers is dictated by the requests' length $S$ . Since attention layers cannot benefit from batching, their GPU performance, denoted as $g(S)$ , typically remains constant and is also ascertainable through experimental methods.
315
+
316
+ $$
317
+ T ^ {l y r} (\beta , S) = T ^ {n a t n} (\beta) + T ^ {a t n} (S) = \frac {W (\beta)}{f (\beta)} + \sum_ {r = 1} ^ {\beta} \frac {S ^ {r}}{g (S)} \tag {5}
318
+ $$
319
+
320
+ Equation 6 extends this performance model to specifically address the roles of debtors and creditors within the system. In Infinite-LLM, a debtor can offload KVCaches of size $K^d$ to creditors, allowing an increase in its batch size to $\beta'$ . Meanwhile, a creditor may allocate space for KVCaches of size $K^c$ to debtors while maintaining its original batch size $\beta$ .
321
+
322
+ $$
323
+ T _ {d b t} ^ {l y r} \left(\beta^ {\prime}, K ^ {d}\right) = T ^ {l y r} - \frac {K ^ {d}}{g (S)}, T _ {c d t} ^ {l y r} \left(\beta , K ^ {c}\right) = T ^ {l y r} + \frac {K ^ {c}}{g (S)} \tag {6}
324
+ $$
325
+
326
+ Combining the formulations above, per-instance throughput (a.k.a, tokens per second) is as $TPS = \frac{\beta}{n \cdot T^{layer}}$ , where $n$ represents the number of transformer layers of an LLM. For a cluster deployed with $M$ instances, the overall aggregated
327
+
328
+ throughput equals to the sum of the TPS of all instances:
329
+
330
+ $$
331
+ T P S _ {c l u s t e r} = \sum_ {i = 1} ^ {M} T P S _ {i} \tag {7}
332
+ $$
333
+
334
+ We have validated the accuracy of the performance model, which is shown in Figure 7. The results predicted by the performance model are consistent with the real measurements. Having obtained the formula for calculating the overall cluster throughput, we can estimate the cluster throughput with any specific schedules.
335
+
336
+ Greedy Algorithm. We propose a greedy algorithm to maximize the cluster throughput with DistAttention approximately. Our algorithm is founded on a principle: pairing overloaded debtor instances with the free creditor to continuously perform load balancing scheduling, thereby enhancing overall throughput.
337
+
338
+ As illustrated in Algorithm 1, we select instances with batch sizes smaller than the empirical threshold $\beta^{thres}$ as debtor instances, while those with memory utilization rates below the empirical threshold $U^{thres}$ serve as creditor instances. As small batch size instances are with great performance potential empirically, debtors are processed in ascending order according to their batch sizes. At each round, the longest request $r$ is selected from the debtor, as well as the creditor with maximal available memory. The possible block number to move for request $r$ is explored to estimate the potential throughput gain. Specifically, we first establish the upper limit on the number of MA blocks that can be offloaded, which corresponds to the number of MA blocks for request $r$ . Under the constraint of maximum number of MA Blocks, $Block_{max}$ , the performance model of aggregated throughput of two instances (as illustrated in Equation 7) is utilized to determine the number of offload blocks between 0 and $Block_{max}$ . For each debtor, the algorithm loops the creditors in orders until no performance gain can be achieved from the memory block movement. In Infinite-LLM, the algorithm acts periodically and retrospectively to adapt to the dynamic and unpredictable serving load of LLM.
339
+
340
+ # 6 System Design
341
+
342
+ # 6.1 gManager and rManager
343
+
344
+ To realize the global planning described in section 5, Infinite-LLM employs a centralized manager termed the gManager to maintain a global view of instance status and make the request and KVCache placement decisions. The gManager tracks the KVCache placement on the instances of each request, maintained in the request placement map, where each entry represents (part of) the KVCache memory usage of a request on a certain instance. A request is allowed to be distributed on multiple instances, i.e., having multiple entries. One of the entries/instances of a request is marked as the debtor instance of it. Utilizing this data, gManager tracks the
345
+
346
+ Algorithm 1 Cluster-level DistAttention Scheduling
347
+ 1: Collect debtors with small batch size $D^i \in \{I^i, \beta^i \leq \beta^{thres}\}$
348
+ 2: Sort debtors in increased batch size order $\langle I^i, \beta^i \rangle$
349
+ 3: Collect creditors with low memory utility $C^i \in \{I^i, U^i \leq U^{thres}\}$
350
+ 4: Sort creditors in increased memory utility order $\langle I^i, U^i \rangle$
351
+ 5: for $D^i \in D$ do
352
+ 6: $r = \text{pick\_longest\_request}(D^i)$
353
+ 7: $Block_{max} = \text{get\_block\_num}(r)$
354
+ 8: for $C^j \in C$ do
355
+ 9: for $k \in \text{range}(0, Block_{max})$ do
356
+ 10: $\langle perf, k \rangle = \text{perf\_model\_throughput\_esti}(k, D^i, C^j)$
357
+ 11: end for
358
+ 12: $Block_{best} = \text{pick\_max\_perf}(\langle perf, k \rangle)$
359
+ 13: if $Block_{best} <= 0$ then break;
360
+ 14: end if
361
+ 15: move_kvcache $(D^i, C^j, Block_{best})$
362
+ 16: update_and_sort_memUtil(C)
363
+ 17: $Block_{max} = \text{Block}_{best}$
364
+ 18: end for
365
+ 19: end for
366
+
367
+ current status of request placements and then derives a new expected placement status and the transition plan.
368
+
369
+ Considering the rapid changing memory usages of requests, tracking the status of every single request precisely in the gManager would be prohibitively expensive. Infinite-LLM develops a distributed and coordinated system architecture to implement the global planning efficiently. As shown in Figure 3, Infinite-LLM introduces a series of distributed rManagers co-located with the instances. The gManager and rManagers work in a loosely-coordinated manner. That is, instead of keeping the global view in sync with the real request statuses, it relies on periodic heartbeat signals from the rManager of each instance to convey updates about the KVCache memory usages of requests on it. This approach reduces the overhead of the global planning and enhances system performance. After receiving a full update from all instances in each round, the gManager calculates a status transition plan and finally instructs the rManagers to move the KVCaches if needed.
370
+
371
+ Under this architecture, Infinite-LLM needs to deal with the potential staleness of the global view during the periodic updates. In normal cases, the running batch on each instance and its KVCache memory usage keep growing as the decoding computation proceeds after a periodic status update. Moreover, due to the continuous batching behavior, the memory usage may experience a steeper change when certain requests complete or new requests join. We design a protocol among the gManager and rManagers to implement this interaction.
372
+
373
+ ![](images/391b66ead117327f42ba39d12a25da889d4502dbb50ce0ccebff6f86dae33eee.jpg)
374
+ Figure 8. Overall workflow of Infinite-LLM's protocol
375
+
376
+ # 6.2 Protocol
377
+
378
+ Figure 8 shows the overall workflow of Infinite-LLM's protocol. We summarize the APIs used in the protocol in Listing 1. Each rManager reports its local status using the heartbeat API $\bullet$ , which includes an array of the request placement entries (shown by the RequestPlacementEntry struct). Note that when a request locates in multiple instances, it is possible that its status on certain instances do not change in a period if the newly generated KVcaches are not placed on them. Therefore, in normal cases, the rManager only sends the entries that have changed since the last update to the gManager. An exception is that when initializing a new gManager (e.g., after a failover), the rManager will send the full information to help the gManager construct the initial status. The gManager updates these entries into the global request placement map accordingly $\bullet$ . The gManager then dispatches its request placement decisions using the move_kvcache API, which instructs an instance to move a certain amount of KVCache blocks to a destination instance $\bullet$
379
+
380
+ ```erlang
381
+ class RequestPlacementEntry: req_id:int, inst_id:int, num_blocks:int, local : bool
382
+ heartbeat(List[RequestPlacementEntry]) -> None
383
+ move_kvcache(req_id:int, num_blocks:int, dst_inst: int) -> None
384
+ try_move_kvcachereq_id:int, num_blocks:int->bool
385
+ ```
386
+
387
+ # Listing 1. Infinite-LLM APIs
388
+
389
+ Considering the potential staleness of the request placement map, the instruction from the gManager to move KVCache to another instance could be infeasible - for example, when the KVCache on the destination instance grows and the memory space becomes insufficient. Therefore, Infinite-LLM further provides the try_move_kvcache API for the source instance to try to reserve space on the destination instance before transferring the real KVCache data ④. On the destination side, it may receive multiple concurrent try_move_kvcache calls from other instances. The destination instance uses a first-come-first-serve policy to decide the allocation among these competing candidates; if the total space is not enough
390
+
391
+ <table><tr><td colspan="2">Trace</td><td>Range</td><td>Avg.</td><td>SD</td></tr><tr><td rowspan="3">S</td><td>0</td><td>1-60k</td><td>1233</td><td>7785.68</td></tr><tr><td>1</td><td>1-60k</td><td>712</td><td>5531.4</td></tr><tr><td>2</td><td>1-60k</td><td>469</td><td>3506.36</td></tr><tr><td rowspan="6">L</td><td>3</td><td>1-200k</td><td>56362</td><td>28787.78</td></tr><tr><td>4</td><td>1-280k</td><td>75650</td><td>39479.42</td></tr><tr><td>5</td><td>1-600k</td><td>160239</td><td>87906.67</td></tr><tr><td>6</td><td>1-480k</td><td>128804</td><td>70647.93</td></tr><tr><td>7</td><td>1-1200k</td><td>293945</td><td>172169.14</td></tr><tr><td>8</td><td>1-2000k</td><td>498609</td><td>261817.24</td></tr></table>
392
+
393
+ Table 1. Ranges, average values, and standard deviations (SDs) of context lengths of the traces.
394
+
395
+ for satisfying all of the them, the destination instance will reject some of them. The destination instance responds with a boolean value to identify whether this KVCache movement is allowed or rejected 5. If it is allowed, the source instance proceeds to transfer the data; otherwise, it simply waits for further instructions from the gManager in future rounds, which will have captured the latest cluster status.
396
+
397
+ # 7 Evaluation
398
+
399
+ # 7.1 Experimental Setup
400
+
401
+ Environment. We deploy Infinite-LLM on a cluster with 4 nodes and 32 GPUs. Each node has 8xNVIDIA A100 (80GB) GPUs. The GPUs are connected via NVLink (600GB/s) within each node and via Ethernet (125MB/s) across nodes.
402
+
403
+ Models. Since most LLM models have similar backbone Transformer block, we choose one representative model family, LLaMA2[48] for evaluation. The LLaMA2 family contains three different model sizes: 7B, 13B and 70B. They use two popular attention architectures; the 7B and 13B models utilize Multi-head Attention (MHA)[49], while the 70B model employs Grouped-Query Attention (GQA)[39].
404
+
405
+ Traces. We generate 9 traces with different context length ranges and length distributions to comprehensively evaluate Infinite-LLM's end-to-end performance. Traces 0-2, marked as "S" (short) in Table 1, have relatively short sequence lengths that are guaranteed to fit in each instance when using vLLM (i.e., vLLM-multi). Requests of trace 0 come from the open-source dataset ShareGPT4[47], which contains conversations of GPT4 service. To assess the impact of different length distributions, particularly the variance of sequence lengths, we select a subset of data from ShareGPT4 to construct traces 1 and 2 with reduced standard deviations. Traces 3-8, marked as "L" (long) in Table 1, are used to evaluate Infinite-LLM using larger context length ranges, where requests of trace 3 come from open-source dataset L-Eval[8], and traces 4-8 are from the distribution of long requests from our online service. In each experiment, we assign an arrival time to each request using a Poisson distribution using varying request rates.
406
+
407
+ Comparison. Our evaluation focuses on comparing Infinite-LLM with static model parallelism and resource planning. To this end, we use vLLM[26], a state-of-the-art LLM serving engine using static model parallelism, as the primary baseline. Specifically, we compare the following approaches:
408
+
409
+ - Infinite-LLM: Given the total cluster resources, Infinite-LLM divides them into multiple model instances using an appropriate parallelism configuration (for non-attention computation) while scaling the attention computation across the instances. Infinite-LLM dispatches each request to the instance with the most free GPU memory.
410
+ - vLLM-multi (vLLM-M): vLLM with the same number and parallelism configuration of the instances as Infinite-LLM. It might fail to run some long requests due to limited per-instance memory capacity. vLLM-M uses the same dispatching policy as Infinite-LLM.
411
+ - vLLM-single (vLLM-S): a single instance containing all the cluster resources, so that vLLM can support the same sequence length ranges as Infinite-LLM. Note that vLLM only supports tensor parallelism, which is known to be less efficient than pipeline parallelism when distributed across machines[42]. We implement pipeline parallelism in vLLM for cross-machine communication when this instance needs to be distributed on multiple machines.
412
+
413
+ # 7.2 Context Length Performance
414
+
415
+ We first benchmark the performance of Infinite-LLM and the baselines when running requests with different context lengths. We use six context length ranges and three models. For each model and range, we test three specific context lengths: (1) a short context length (1k); (2) a length slightly exceeding the maximum length that vLLM-multi can support; and (3) the maximum length that Infinite-LLM supports given the cluster resources. For each data point, we measure the throughput of a largest batch of requests given the context length.
416
+
417
+ As shown in Figure 9, Infinite-LLM achieves the best of both long and short sequences. Compared to vLLM-multi, which is limited by per-instance memory, Infinite-LLM supports substantially longer context (2x-19x) while achieving comparable throughput on short sequence lengths. This improvement is attributed to Infinite-LLM's ability to efficiently coordinate memory and computation usage across all instances, while vLLM-multi is limited to the instance's private resource. Compared to vLLM-single, Infinite-LLM obtains 1.4x-5.3x higher throughput on short lengths while sustaining similar longest context lengths. This is because Infinite-LLM can maintain an efficient model parallelism strategy for FNN computations while vLLM-single has to partition the model into smaller segments across more GPUs,
418
+
419
+ which results in lower GPU computation utilization for the Non-Attention parts and more communication overhead.
420
+
421
+ # 7.3 End-to-end Serving Performance
422
+
423
+ Comparison with multiple small instances. We first compare Infinite-LLM with vLLM-multi, which launches multiple instances with the same parallelism configuration as Infinite-LLM. We conduct six experiments using Traces 0-2, where the sequence lengths won't exceed the limit of each single instance of vLLM. Figure 10a shows the throughput-latency variation when using different request rates. We compare the maximum achieved throughputs of Infinite-LLM and vLLM. The results demonstrate that Infinite-LLM gets a throughput improvement of approximately $1.35\mathrm{x} - 1.73\mathrm{x}$ over vLLM. We further examine how the number of instances and traces' context length distribution affect performance gains. As depicted in the six sub-figures of Figure 10a, from left to right, the standard deviation of the traces' context length distribution decreases while the number of instances increases from top to bottom. We observe that the performance gains rise with the standard deviation (indicating a more uneven length distribution) and the number of instances. This is because a more uneven length distribution or a larger number of instances lead to greater variance in resource demands among different instances, enhancing the benefits of unified resource management across all instances.
424
+
425
+ Comparison with a single large instance. We use Traces 3-8 with longer context lengths to compare Infinite-LLM with vLLM-S, which allocates all GPUs to a single instance to accommodate sufficiently long sequences. The results shown in Figure 10b indicate that Infinite-LLM gets a $1.4\mathrm{x}$ to $3.4\mathrm{x}$ throughput gain over vLLM. From top to bottom and left to right in Figure 10b, we observe that Infinite-LLM's performance gains grow with the context length range expanding. This is attributed to vLLM's static model parallelism fragmenting the model across more GPUs, leading to reduced efficiency in the non-attention segments and significantly lowering the system's capability to process shorter request efficiently, whereas Infinite-LLM maintains an appropriate model parallelism strategy for the non-attention part, thereby preserving their performance.
426
+
427
+ # 7.4 Micro-benchmarks
428
+
429
+ Comparison with other long-context attention methods. We compared the performance of DistAttention, RingAttention, and TP (partition by numera of heads) within the context range from 4K to 256K, where the Attention computation is based on the dimensions of LLaMA2-13B with four GPUs. As shown in Figure 11, the results show that DistAttention is $1\% - 25\%$ faster than TP due to its lower communication overhead. Compared to RingAttention, DistAttention is 7.7x-19.8x faster owing to the significantly higher communication overhead of RingAttention, which involves the transfer of
430
+
431
+ ![](images/7fdecbb2730b17e97c04f643b586f3236f3dfecd47d905f79843b5b373471fbf.jpg)
432
+ Figure 9. Context Length Performance
433
+
434
+ ![](images/f435a07453a55b238b8333e54114bba38c78e2d99261cbbeae58ccbd9e36eb67.jpg)
435
+
436
+ ![](images/eacd1e9dd3643f3bec4dc4a7381ef8830db15b534509294b975c9d81a8af0991.jpg)
437
+
438
+ ![](images/7f5c81c2c773832d564cdead21fc9854725c30206f1440c589a3b445440a0004.jpg)
439
+ Figure 11. Comparison of distributed attention methods.
440
+
441
+ ![](images/5a34e708368161e38de0252911bfad465c35b7adc46bfcbcce492d2faccaf3f5.jpg)
442
+ (a) Comparison with vLLM-M
443
+ Figure 10. End-to-end serving performance.
444
+
445
+ ![](images/06e807edb77532c27ccbce4e00b7da4aafcd43e31f5a25f5f83746ab02fbd9d7.jpg)
446
+ (b) Comparison with vLLM-S
447
+
448
+ large KVCache (MB to GB), whereas DistAttention transmits very small-sized queries (KB).
449
+
450
+ Overhead of KVCache movement. We improve cluster-scale throughput by scheduling DistAttention across all instances. To reduce the overhead of KVCache movement between instances, Infinite-LLM overlaps the movement with model computation. To evaluate the impact of movement communication on instance throughput, we compared the instance throughput with movement enabled to that with movement disabled. It's important to note that movement in this experiment does not change the batch size, hence any fluctuations in the throughput curve are due to the communication costs of movement. Results shown in Figure 12(a) indicate that instance throughput decreased by $8.6\%$ when moving 32 tokens per decode step. When moving 16 tokens per decode step, the throughput of instance was identical to the instance with movement turned off. Therefore, when
451
+
452
+ ![](images/d834dd11c394f10153460a0d8caa98516235d2499fd0e4bdff2c3d13ec37ad86.jpg)
453
+
454
+ ![](images/42df338c1ea0997e172b9e20cb008edbeb68c27e70ccbaaf3ff4028350bcd815.jpg)
455
+ Figure 12. Overhead of KVCache movement.
456
+
457
+ the movement size is set to 16, communication can overlap well with computation without affecting the instance's performance.
458
+
459
+ # 8 Related Work
460
+
461
+ LLM inference system. ORCA [55] introduced iteration-level scheduling which greatly enhances the computation and memory utilization in batching inference. vLLM [26] further proposed PagedAttention to address the memory wastage due to fragmentation. DeepSpeed-FastGen[2] proposed a novel prompt and generation composition strategy called Dynamic SplitFuse (or Sarathi[4]) to further enhance system throughput. DistServe[59] proposed to disaggregate the prefill and decode stages to different instances to avoid their interference. Despite these novel systems solve many problems and achieve outstanding results, the dynamic problem along with the need to support exceptionally long context lengths still remains an unresolved challenge.
462
+
463
+ Long-context LLM. Works like FlashAttention[18] and FlashDecoding[1] focus on optimizing the performance of attention in long sequences. They enhance the compute-to-memory ratio and SM (Streaming Multiprocessors) parallelism of Attention on a single GPU by addressing data dependency issues. However, they do not take into account
464
+
465
+ the communication overhead in multi-GPU settings and cannot be directly applied to scenarios involving multiple GPUs. To train LLM with long context, some research work [13, 27, 28, 32] has introduced the method of context parallelism to partition the computation in sequence dimension. Ring Attention [31, 32] distributes long sequences across multiple devices, with the intent of fully overlapping the communication of KV blocks with the computation of blockwise attention. Those methods are designed for training, which is a poor fit to the highly dynamic characteristic in LLM inference decoding phase, causing substantial overhead to transfer KVCache across devices at each iteration. Another thread to address the challenge of oversized KVCache for long-context inference is to utilize sparse KVCaches such as Sliding Window Attention [12, 17, 24], H2O [58] and StreamingLLM [54], which compromises with the potential accuracy loss, because of the KVCache eviction. Infinite-LLM supports long-context LLM serving by introducing a new scalable distributed attention mechanism, DistAttention. Attention can be disaggregated from the model inference, therefore to schedule across multiple serving instances, both for computation and KVCache management. DistAttention retains equivalence to the original attention thereby be harmless to model accuracy. Scheduling. To improve throughput and latency of LLM serving, serveral systems [20, 30, 40, 43] have been proposed to optimize for request scheduling across multiple model instances. Llumnix[43] dynamically reschedules requests across multiple instances at runtime to deal with the heterogeneity and unpredictability of requests. Parrot[30] uncovers the dependencies and commonalities among LLM requests, thus creating a new space for enhancing the end-to-end performance of LLM applications. However, the previous work is limited to scheduling each whole request on an instance, facing issue of low GPU utilization due to dynamic request length. Infinite-LLM can schedule any arbitrary subsequences of requests onto instances, representing a much finer scheduling granularity and higher flexibility than existing systems.
466
+
467
+ # 9 Conclusion
468
+
469
+ In this paper, we have presented Infinite-LLM, a novel LLM service system designed for managing highly dynamic context lengths in LLM requests. Through Infinite-LLM, we have revealed the highly dynamic characteristic within LLM requests and advocated attention disaggregation to be a common technology for LLM serving. In particular, we have introduced a novel system architecture both efficient and scalable for all LLM requests, proposed a scheduling policy to saturate the computation and bandwidth of GPU simultaneously, and shown significant improvement through extensive evaluations on representative real traces. Going forward, we hope that Infinite-LLM can become a common foundation
470
+
471
+ toward AGI for both the research community and industry, inspiring future advancements in LLM serving.
472
+
473
+ # References
474
+
475
+ [1] Flashdecoding. https://princeton-nlp.github.io/flash-decoding/, 2021.
476
+ [2] Deepspeed-fastgen: High-throughput text generation for llms via mii and deepspeed-inference. https://github.com/microsoft/DeepSpeed/tree/master/blogs/deepspeed-fastgen, 2023.
477
+ [3] Lisa Adams, Felix Busch, Tianyu Han, Jean-Baptiste Excoffier, Matthieu Ortala, Alexander Löser, Hugo JWL Aerts, Jakob Nikolas Kather, Daniel Truhn, and Keno Bressem. Longhealth: A question answering benchmark with long clinical documents. arXiv preprint arXiv:2401.14490, 2024.
478
+ [4] Amey Agrawal, Ashish Panwar, Jayashree Mohan, Nipun Kwatra, Bhargav S. Gulavani, and Ramachandran Ramjee. Sarathi: Efficient lvm inference by piggybacking decodes with chunked prefixes, 2023.
479
+ [5] AI@Meta. Llama 3 model card. 2024.
480
+ [6] Reza Yazdani Aminabadi, Samyam Rajbhandari, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Olatunji Ruwase, Shaden Smith, Minjia Zhang, Jeff Rasley, et al. Deepspeed-inference: enabling efficient inference of transformer models at unprecedented scale. In SC22: International Conference for High Performance Computing, Networking, Storage and Analysis, pages 1-15. IEEE, 2022.
481
+ [7] Chenxin An, Shansan Gong, Ming Zhong, Mukai Li, Jun Zhang, Lingpeng Kong, and Xipeng Qiu. L-eval: Instituting standardized evaluation for long context language models. arXiv preprint arXiv:2307.11088, 2023.
482
+ [8] Chenxin An, Shansan Gong, Ming Zhong, Xingjian Zhao, Mukai Li, Jun Zhang, Lingpeng Kong, and Xipeng Qiu. L-eval: Instituting standardized evaluation for long context language models, 2023.
483
+ [9] Rohan Anil, Andrew M Dai, Orhan First, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023.
484
+ [10] Anthropic. https://www.anthropic.com/news/claudi-3-family, 2024.
485
+ [11] Yushi Bai, Xin Lv, Jiajie Zhang, Hongchang Lyu, Jiankai Tang, Zhidian Huang, Zhengxiao Du, Xiao Liu, Aohan Zeng, Lei Hou, et al. Longbench: A bilingual, multitask benchmark for long context understanding. arXiv preprint arXiv:2308.14508, 2023.
486
+ [12] Iz Beltagy, Matthew E Peters, and Arman Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020.
487
+ [13] William Brandon, Aniruddha Nrusimha, Kevin Qian, Zachary Ankner, Tian Jin, Zhiye Song, and Jonathan Ragan-Kelley. Striped attention: Faster ring attention for causal transformers. arXiv preprint arXiv:2311.09431, 2023.
488
+ [14] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
489
+ [15] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kajjie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, Wei Ye, Yue Zhang, Yi Chang, Philip S. Yu, Qiang Yang, and Xing Xie. A survey on evaluation of large language models, 2023.
490
+ [16] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin,
491
+
492
+ Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021.
493
+ [17] Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers, 2019.
494
+ [18] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with Io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022.
495
+ [19] Yiran Ding, Li Lyna Zhang, Chengruidong Zhang, Yuanyuan Xu, Ning Shang, Jiahang Xu, Fan Yang, and Mao Yang. Longrope: Extending llm context window beyond 2 million tokens. arXiv preprint arXiv:2402.13753, 2024.
496
+ [20] Yao Fu, Leyang Xue, Yeqi Huang, Andrei-Octavian Brabete, Dmitrii Ustiugov, Yuvraj Patel, and Luo Mai. Serverlesslm: Locality-enhanced serverless inference for large language models, 2024.
497
+ [21] Google. Our next-generation model: Gemini 1.5. https://blog.google/technology/ai/google-gemini-next-generationmodel-february-2024/, 2024.
498
+ [22] Dong Huang, Qingwen Bu, Jie M Zhang, Michael Luck, and Heming Cui. Agentcoder: Multi-agent-based code generation with iterative testing and optimisation. arXiv preprint arXiv:2312.13010, 2023.
499
+ [23] Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems, 32, 2019.
500
+ [24] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b, 2023.
501
+ [25] Jin K Kim, Michael Chua, Mandy Rickard, and Armando Lorenzo. Chatgpt and large language model (llm) chatbots: The current state of acceptability and a proposal for guidelines on utilization in academic medicine. Journal of Pediatric Urology, 2023.
502
+ [26] W Kwon, Z Li, S Zhuang, et al. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023.
503
+ [27] Dacheng Li, Rulin Shao, Anze Xie, Eric P Xing, Joseph E Gonzalez, Ion Stoica, Xuezhe Ma, and Hao Zhang. Lightseq: Sequence level parallelism for distributed training of long context transformers. arXiv preprint arXiv:2310.03294, 2023.
504
+ [28] Shenggui Li, Fuzhao Xue, Chaitanya Baranwal, Yongbin Li, and Yang You. Sequence parallelism: Long sequence training from system perspective. arXiv preprint arXiv:2105.13120, 2021.
505
+ [29] Zhuohan Li, Lianmin Zheng, Yinmin Zhong, Vincent Liu, Ying Sheng, Xin Jin, Yanping Huang, Zhifeng Chen, Hao Zhang, Joseph E Gonzalez, et al. Alpaserve: Statistical multiplexing with model parallelism for deep learning serving. arXiv preprint arXiv:2302.11665, 2023.
506
+ [30] Chaofan Lin, Zhenhua Han, Chengruidong Zhang, Yuqing Yang, Fan Yang, Chen Chen, and Lili Qiu. Parrot: Efficient serving of llm-based applications with semantic variable, 2024.
507
+ [31] Hao Liu and Pieter Abbeel. Blockwise parallel transformer for long context large models. arXiv preprint arXiv:2305.19370, 2023.
508
+ [32] Hao Liu, Matei Zaharia, and Pieter Abbeel. Ring attention with blockwise transformers for near-infinite context. arXiv preprint arXiv:2310.01889, 2023.
509
+
510
+ [33] Deepak Narayanan, Aaron Harlap, Amar Phanishayee, Vivek Seshadri, Nikhil R Devanur, Gregory R Ganger, Phillip B Gibbons, and Matei Zaharia. Pipedream: Generalized pipeline parallelism for dnn training. In Proceedings of the 27th ACM Symposium on Operating Systems Principles, pages 1-15, 2019.
511
+ [34] Deepak Narayanan, Mohammad Shoeybi, Jared Casper, Patrick LeGresley, Mostofa Patwary, Vijay Korthikanti, Dmitri Vainbrand, Prethvi Kashinkunti, Julie Bernauer, Bryan Catanzaro, Amar Phanishayee, and Matei Zaharia. Efficient large-scale language model training on GPU clusters. CoRR, abs/2104.04473, 2021.
512
+ [35] OpenAI. https://openai.com/blog/chatgpt, 2022.
513
+ [36] Reiner Pope, Sholto Douglas, Aakanksha Chowdhery, Jacob Devlin, James Bradbury, Jonathan Heek, Kefan Xiao, Shivani Agrawal, and Jeff Dean. Efficiently scaling transformer inference. Proceedings of Machine Learning and Systems, 5, 2023.
514
+ [37] Markus N Rabe and Charles Staats. Self-attention does not need $o(n^{2})$ memory. arXiv preprint arXiv:2112.05682, 2021.
515
+ [38] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018.
516
+ [39] Noam Shazeer. Fast transformer decoding: One write-head is all you need. arXiv preprint arXiv:1911.02150, 2019.
517
+ [40] Ying Sheng, Shiyi Cao, Dacheng Li, Banghua Zhu, Zhuohan Li, Danyang Zhuo, Joseph E. Gonzalez, and Ion Stoica. Fairness in serving large language models, 2024.
518
+ [41] Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Beidi Chen, Percy Liang, Christopher Ré, Ion Stoica, and Ce Zhang. Flexgen: High-throughput generative inference of large language models with a singlegpu. In International Conference on Machine Learning, pages 31094-31116. PMLR, 2023.
519
+ [42] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism, 2020.
520
+ [43] Biao Sun, Ziming Huang, Hanyu Zhao, Wencong Xiao, Xinyi Zhang, Yong Li, and Wei Lin. Llumnix: Dynamic scheduling for large language model serving, 2024.
521
+ [44] Ilya Sutskever, James Martens, and Geoffrey E Hinton. Generating text with recurrent neural networks. In Proceedings of the 28th international conference on machine learning (ICML-11), pages 1017–1024, 2011.
522
+ [45] Salmonn Talebi, Elizabeth Tong, and Mohammad RK Mofrad. Beyond the hype: Assessing the performance, trustworthiness, and clinical suitability of gpt3. 5. arXiv preprint arXiv:2306.15887, 2023.
523
+ [46] Qiaoyu Tang, Jiawei Chen, Bowen Yu, Yaojie Lu, Cheng Fu, Haiyang Yu, Hongyu Lin, Fei Huang, Ben He, Xianpei Han, et al. Self-retrieval: Building an information retrieval system with one large language model. arXiv preprint arXiv:2403.00801, 2024.
524
+ [47] ShareGPT Team. https://huggingface.co/datasets/shibing-624/sharegpt_gpt4, 2023.
525
+ [48] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Alma-hairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288, 2023.
526
+ [49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
527
+ [50] Yiming Wang, Zhuosheng Zhang, and Rui Wang. Element-aware summarization with large language models: Expert-aligned evaluation and chain-of-thought method. arXiv preprint arXiv:2305.13412, 2023.
528
+ [51] Yuxin Wang, Yuhan Chen, Zeyu Li, Zhenheng Tang, Rui Guo, Xin Wang, Qiang Wang, Amelie Chi Zhou, and Xiaowen Chu. Towards efficient and reliable llm serving: A real-world workload study, 2024.
529
+ [52] Jing Wei, Sungdong Kim, Hyunhoon Jung, and Young-Ho Kim. Leveraging large language models to power chatbots for collecting user self-reported data. arXiv preprint arXiv:2301.05843, 2023.
530
+
531
+ [53] BigScience Workshop, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, et al. Bloom: A 176b-parameter open-access multilingual language model. arXiv preprint arXiv:2211.05100, 2022.
532
+ [54] Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. Efficient streaming language models with attention sinks. arXiv preprint arXiv:2309.17453, 2023.
533
+ [55] Gyeong-In Yu, Joo Seong Jeong, Geon-Woo Kim, Soojeong Kim, and Byung-Gon Chun. Orca: A distributed serving system for {Transformer-Based} generative models. In 16th USENIX Symposium on Operating Systems Design and Implementation (OSDI 22), pages 521-538, 2022.
534
+ [56] Tao Yuan, Xuefei Ning, Dong Zhou, Zhijie Yang, Shiyao Li, Minghui Zhuang, Zheyue Tan, Zhuyu Yao, Dahua Lin, Boxun Li, et al. Lv-eval: A balanced long-context benchmark with 5 length levels up to 256k. arXiv preprint arXiv:2402.05136, 2024.
535
+
536
+ [57] Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B Hashimoto. Benchmarking large language models for news summarization. Transactions of the Association for Computational Linguistics, 12:39-57, 2024.
537
+ [58] Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuandong Tian, Christopher Ré, Clark Barrett, Zhangyang Wang, and Beidi Chen. $\mathrm{H}_{2}\mathrm{o}$ : Heavy-hitter oracle for efficient generative inference of large language models, 2023.
538
+ [59] Yinmin Zhong, Shengyu Liu, Junda Chen, Jianbo Hu, Yibo Zhu, Xu-anzhe Liu, Xin Jin, and Hao Zhang. Distserve: Disaggregating prefetch and decoding for goodput-optimized large language model serving, 2024.
539
+ [60] Yutao Zhu, Huaying Yuan, Shuting Wang. Jiongnan Liu, Wenhan Liu, Chenlong Deng, Zhicheng Dou, and Ji-Rong Wen. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107, 2023.
2401.02xxx/2401.02669/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:059725b8565a5baffa7096e306dc138ea77e3d019cc6e02604419f9aed00419a
3
+ size 617201
2401.02xxx/2401.02669/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_content_list.json ADDED
@@ -0,0 +1,1692 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "PROGRESSIVE KNOWLEDGE DISTILLATION OF Stable Diffusion XL USING LAYER LEVEL LOSS",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 125,
8
+ 119,
9
+ 872,
10
+ 165
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "TECHNICAL REPORT",
17
+ "bbox": [
18
+ 424,
19
+ 198,
20
+ 571,
21
+ 212
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Yatharth Gupta*",
28
+ "bbox": [
29
+ 290,
30
+ 239,
31
+ 411,
32
+ 253
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Segmind",
39
+ "bbox": [
40
+ 313,
41
+ 255,
42
+ 372,
43
+ 268
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "yatharthg@segmind.com",
50
+ "bbox": [
51
+ 251,
52
+ 268,
53
+ 434,
54
+ 281
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Vishnu V. Jaddipal*",
61
+ "bbox": [
62
+ 593,
63
+ 239,
64
+ 735,
65
+ 253
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Segmind",
72
+ "bbox": [
73
+ 632,
74
+ 255,
75
+ 692,
76
+ 267
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "vishnuj@segmind.com",
83
+ "bbox": [
84
+ 578,
85
+ 268,
86
+ 743,
87
+ 281
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "Harish Prabhala",
94
+ "bbox": [
95
+ 191,
96
+ 301,
97
+ 310,
98
+ 315
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "Segmind",
105
+ "bbox": [
106
+ 220,
107
+ 316,
108
+ 281,
109
+ 330
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "harish@segmind.com",
116
+ "bbox": [
117
+ 173,
118
+ 332,
119
+ 328,
120
+ 344
121
+ ],
122
+ "page_idx": 0
123
+ },
124
+ {
125
+ "type": "text",
126
+ "text": "Sayak Paul",
127
+ "bbox": [
128
+ 441,
129
+ 301,
130
+ 522,
131
+ 316
132
+ ],
133
+ "page_idx": 0
134
+ },
135
+ {
136
+ "type": "text",
137
+ "text": "Hugging Face",
138
+ "bbox": [
139
+ 434,
140
+ 316,
141
+ 529,
142
+ 330
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "text",
148
+ "text": "sayak@huggingface.co",
149
+ "bbox": [
150
+ 395,
151
+ 332,
152
+ 566,
153
+ 344
154
+ ],
155
+ "page_idx": 0
156
+ },
157
+ {
158
+ "type": "text",
159
+ "text": "Patrick Von Platen",
160
+ "bbox": [
161
+ 661,
162
+ 301,
163
+ 795,
164
+ 315
165
+ ],
166
+ "page_idx": 0
167
+ },
168
+ {
169
+ "type": "text",
170
+ "text": "Hugging Face",
171
+ "bbox": [
172
+ 681,
173
+ 316,
174
+ 774,
175
+ 330
176
+ ],
177
+ "page_idx": 0
178
+ },
179
+ {
180
+ "type": "text",
181
+ "text": "patrick@huggingface.co",
182
+ "bbox": [
183
+ 633,
184
+ 332,
185
+ 823,
186
+ 344
187
+ ],
188
+ "page_idx": 0
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "ABSTRACT",
193
+ "text_level": 1,
194
+ "bbox": [
195
+ 449,
196
+ 393,
197
+ 547,
198
+ 409
199
+ ],
200
+ "page_idx": 0
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "Stable Diffusion XL (SDXL) has become the best open source text-to-image model (T2I) for its versatility and top-notch image quality. Efficiently addressing the computational demands of SDXL models is crucial for wider reach and applicability. In this work, we introduce two scaled-down variants, Segmind Stable Diffusion (SSD-1B) and Segmind-Vega, with 1.3B and 0.74B parameter UNets, respectively, achieved through progressive removal using layer-level losses focusing on reducing the model size while preserving generative quality. We release these models weights at https://hf.co/Segmind.",
205
+ "bbox": [
206
+ 169,
207
+ 416,
208
+ 823,
209
+ 513
210
+ ],
211
+ "page_idx": 0
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "Our methodology involves the elimination of residual networks and transformer blocks from the U-Net structure of SDXL, resulting in significant reductions in parameters, and latency. Our compact models effectively emulate the original SDXL by capitalizing on transferred knowledge, achieving competitive results against larger multi-billion parameter SDXL.",
216
+ "bbox": [
217
+ 169,
218
+ 515,
219
+ 823,
220
+ 571
221
+ ],
222
+ "page_idx": 0
223
+ },
224
+ {
225
+ "type": "text",
226
+ "text": "Our work underscores the efficacy of knowledge distillation coupled with layer-level losses in reducing model size while preserving the high-quality generative capabilities of SDXL, thus facilitating more accessible deployment in resource-constrained environments.",
227
+ "bbox": [
228
+ 169,
229
+ 574,
230
+ 823,
231
+ 617
232
+ ],
233
+ "page_idx": 0
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "1 Introduction",
238
+ "text_level": 1,
239
+ "bbox": [
240
+ 112,
241
+ 638,
242
+ 253,
243
+ 652
244
+ ],
245
+ "page_idx": 0
246
+ },
247
+ {
248
+ "type": "text",
249
+ "text": "Stable Diffusion (Rombach et al., 2022) has emerged as highly influential in the realm of text-to-image (T2I) synthesis, playing a pivotal role as an open-source framework. Its remarkable capabilities has spurred its integration as a backbone in various text-guided vision applications. Stable Diffusion, characterized as T2I-specialized latent diffusion models (LDMs), leverages diffusion operations within a semantically compressed space, enhancing computational efficiency. Central to the architecture of Stable Diffusion is a U-Net that employs iterative sampling to progressively denoise a random latent code. This process is further supported by a text encoder and an image decoder, orchestrating the generation of text-aligned images. SDXL (Podell et al., 2023) is the largest variant with a 2.6B Parameter UNet and two text encoders, providing the best quality among open source models.",
250
+ "bbox": [
251
+ 109,
252
+ 670,
253
+ 883,
254
+ 782
255
+ ],
256
+ "page_idx": 0
257
+ },
258
+ {
259
+ "type": "text",
260
+ "text": "Notably, distillation techniques have been applied to pretrained diffusion models to curtail the number of denoising steps, resulting in identically structured models with reduced sampling requirements. Additionally, methods such as post-training quantization and implementation optimizations have been explored. The exploration of removing architectural elements in large diffusion models has also been investigated for the base U-Net models (Kim et al., 2023). In this context, our work endeavors to apply knowledge distillation methods to the SDXL model (Podell et al., 2023), resulting in the creation of two streamlined variants, namely Segmind Stable Diffusion (SSD-1B) and Segmind-Vega. We use the base model as well as finetuned versions in the distillation process. These models, with 1.3B and 0.74B",
261
+ "bbox": [
262
+ 109,
263
+ 787,
264
+ 883,
265
+ 886
266
+ ],
267
+ "page_idx": 0
268
+ },
269
+ {
270
+ "type": "aside_text",
271
+ "text": "arXiv:2401.02677v1 [cs.CV] 5 Jan 2024",
272
+ "bbox": [
273
+ 22,
274
+ 273,
275
+ 60,
276
+ 700
277
+ ],
278
+ "page_idx": 0
279
+ },
280
+ {
281
+ "type": "footer",
282
+ "text": "*Equal Contribution",
283
+ "bbox": [
284
+ 135,
285
+ 897,
286
+ 256,
287
+ 911
288
+ ],
289
+ "page_idx": 0
290
+ },
291
+ {
292
+ "type": "text",
293
+ "text": "parameter UNets respectively, employ layer level losses to progressively reduce the model size to $20\\%$ , $40\\%$ , $50\\%$ , $60\\%$ , and ultimately $70\\%$ . This reduction in model size aims to strike a balance between computational efficiency and the preservation of generative capabilities, making SDXL more accessible for diverse applications.",
294
+ "bbox": [
295
+ 109,
296
+ 90,
297
+ 883,
298
+ 133
299
+ ],
300
+ "page_idx": 1
301
+ },
302
+ {
303
+ "type": "text",
304
+ "text": "2 Related Work",
305
+ "text_level": 1,
306
+ "bbox": [
307
+ 112,
308
+ 152,
309
+ 264,
310
+ 167
311
+ ],
312
+ "page_idx": 1
313
+ },
314
+ {
315
+ "type": "text",
316
+ "text": "2.1 Large Latent Diffusion Models",
317
+ "text_level": 1,
318
+ "bbox": [
319
+ 112,
320
+ 183,
321
+ 369,
322
+ 196
323
+ ],
324
+ "page_idx": 1
325
+ },
326
+ {
327
+ "type": "text",
328
+ "text": "The exploration of diffusion-based generative models has been instrumental in achieving high-fidelity synthesis with broad mode coverage by gradually removing noise from corrupted data. The integration of these models with pretrained language models has notably enhanced the quality of text-to-image (T2I) synthesis. In models such asImagen (Saharia et al., 2022) and Deepfloyd IF (Shonenkov et al., 2023), text-conditional diffusion models generate small images, subsequently upsampled through super-resolution modules. DALL-E (Ramesh et al., 2021) style models, on the other hand, employ a text-conditional prior network to produce an image embedding, transformed via a diffusion decoder and further upscaled into higher resolutions. LDMs perform diffusion modeling in a low-dimensional latent space constructed through a pixel-space autoencoder.",
329
+ "bbox": [
330
+ 109,
331
+ 209,
332
+ 883,
333
+ 320
334
+ ],
335
+ "page_idx": 1
336
+ },
337
+ {
338
+ "type": "text",
339
+ "text": "2.2 Efficient Diffusion Models",
340
+ "text_level": 1,
341
+ "bbox": [
342
+ 112,
343
+ 335,
344
+ 336,
345
+ 349
346
+ ],
347
+ "page_idx": 1
348
+ },
349
+ {
350
+ "type": "text",
351
+ "text": "Efforts to address the slow sampling process in diffusion models have been widespread. Diffusion-tailored distillation progressively transfers knowledge from a pretrained diffusion model to a model with fewer sampling steps while maintaining the same architecture. Latent Consistency Models (Luo et al., 2023a) also allow the models to generate images in very few steps. Combining this with Low Rank Adapters (LoRAs) (Luo et al., 2023b) provides a very easy way of enabling fast generation with large models. Fast high-order solvers for diffusion ordinary differential equations aim to boost sampling speed. In complement to these approaches, our network compression method reduces per-step computation and seamlessly integrates with models employing fewer sampling steps. Leveraging quantization and implementation optimizations designed for SDXL can further enhance the efficiency of our compact models.",
352
+ "bbox": [
353
+ 109,
354
+ 361,
355
+ 883,
356
+ 472
357
+ ],
358
+ "page_idx": 1
359
+ },
360
+ {
361
+ "type": "text",
362
+ "text": "2.3 Distillation-Based Compression",
363
+ "text_level": 1,
364
+ "bbox": [
365
+ 112,
366
+ 488,
367
+ 374,
368
+ 503
369
+ ],
370
+ "page_idx": 1
371
+ },
372
+ {
373
+ "type": "text",
374
+ "text": "Knowledge Distillation (KD) has been successful in improving the performance of small-size models by exploiting output-level and feature-level information from larger source models. While classical KD has found applications in efficient GANs, and Stable Diffusion Base model. Our work demonstrates the extension of distillation pretraining techniques, proven successful in small yet capable general-purpose language models and vision transformers, to SDXL.",
375
+ "bbox": [
376
+ 109,
377
+ 513,
378
+ 883,
379
+ 571
380
+ ],
381
+ "page_idx": 1
382
+ },
383
+ {
384
+ "type": "text",
385
+ "text": "2.3.1 Concurrent Studies",
386
+ "text_level": 1,
387
+ "bbox": [
388
+ 112,
389
+ 584,
390
+ 303,
391
+ 598
392
+ ],
393
+ "page_idx": 1
394
+ },
395
+ {
396
+ "type": "text",
397
+ "text": "Studies such as SnapFusion (Li et al., 2023) achieve an efficient U-Net for Stable Diffusion through architecture evolution and step distillation. Wuerstchen (Pernias et al., 2023) introduces two diffusion processes on low- and high-resolution latent spaces for economic training. While these works are valuable, it is essential to note that they often require significantly larger computational resources than our proposed approach. Additionally, As demonstrated on Stable Diffusion, BK-SDM proposes pruning the UNet via removal of blocks, showcasing promising compression.",
398
+ "bbox": [
399
+ 109,
400
+ 608,
401
+ 883,
402
+ 679
403
+ ],
404
+ "page_idx": 1
405
+ },
406
+ {
407
+ "type": "text",
408
+ "text": "This work uses the technique of classical architectural compression in achieving smaller and faster diffusion models. The approach involves the removal of multiple transformer layers from the U-Net of SDXL, followed by retraining with feature-level knowledge distillation for general-purpose T2I. The contributions of this study are summarized as follows:",
409
+ "bbox": [
410
+ 109,
411
+ 684,
412
+ 883,
413
+ 726
414
+ ],
415
+ "page_idx": 1
416
+ },
417
+ {
418
+ "type": "list",
419
+ "sub_type": "text",
420
+ "list_items": [
421
+ "- Architectural Compression: We compress SDXL by strategically removing architectural blocks from the U-Net, resulting in a notable reduction in model size (up to $70\\%$ ) and increased inference speeds (up to $100\\%$ speedup).",
422
+ "- Feature Distillation: We use feature distillation for training diffusion models, demonstrating its remarkable benefits in achieving competitive T2I performance with significantly fewer resources. The cost-effectiveness of network compression is emphasized, particularly when compared to the substantial expense of training diffusion models from scratch.",
423
+ "- Downstream benefits: The method, to an extent preserves fidelity of generation with different LoRA and Controlnet networks, thus requiring less training to be used on the distilled model."
424
+ ],
425
+ "bbox": [
426
+ 156,
427
+ 737,
428
+ 880,
429
+ 869
430
+ ],
431
+ "page_idx": 1
432
+ },
433
+ {
434
+ "type": "text",
435
+ "text": "In summary, this research explores classical architectural compression for SDXL, providing a cost-effective strategy for building compact general-purpose diffusion models with compelling performance.",
436
+ "bbox": [
437
+ 109,
438
+ 883,
439
+ 883,
440
+ 912
441
+ ],
442
+ "page_idx": 1
443
+ },
444
+ {
445
+ "type": "header",
446
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
447
+ "bbox": [
448
+ 225,
449
+ 42,
450
+ 767,
451
+ 56
452
+ ],
453
+ "page_idx": 1
454
+ },
455
+ {
456
+ "type": "page_number",
457
+ "text": "2",
458
+ "bbox": [
459
+ 491,
460
+ 935,
461
+ 504,
462
+ 946
463
+ ],
464
+ "page_idx": 1
465
+ },
466
+ {
467
+ "type": "text",
468
+ "text": "3 Methodology",
469
+ "text_level": 1,
470
+ "bbox": [
471
+ 112,
472
+ 89,
473
+ 256,
474
+ 107
475
+ ],
476
+ "page_idx": 2
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "In our pursuit of compressing SDXL models, we adopt a nuanced approach that centers on the removal of transformer layers within attention blocks. Our observation reveals a redundancy in numerous blocks, and our strategy involves judicious elimination without compromising the model's generative prowess. We draw inspiration from the architectural compression techniques applied to Stable Diffusion v1.5's $^2$ U-Net and extend the methodology to SDXL, yielding two scaled-down variants: Segmind Stable Diffusion (SSD-1B) and Segmind-Vega.",
481
+ "bbox": [
482
+ 109,
483
+ 121,
484
+ 883,
485
+ 191
486
+ ],
487
+ "page_idx": 2
488
+ },
489
+ {
490
+ "type": "text",
491
+ "text": "3.1 Architecture",
492
+ "text_level": 1,
493
+ "bbox": [
494
+ 112,
495
+ 209,
496
+ 243,
497
+ 223
498
+ ],
499
+ "page_idx": 2
500
+ },
501
+ {
502
+ "type": "text",
503
+ "text": "Our compression strategy is motivated by the recognition that certain layers are dispensable without significantly affecting the model's performance. We leverage insights from various teacher models, including SDXL-base-1.0 and the fine-tuned Zavy Chroma-XL $^{3}$ and Juggernaut-XL $^{4}$ , during the compression process.",
504
+ "bbox": [
505
+ 109,
506
+ 234,
507
+ 883,
508
+ 277
509
+ ],
510
+ "page_idx": 2
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "We report similar findings as BK-SDM (Kim et al., 2023), in that the middle block of the U-Net can be removed without significantly affecting image quality. To add, we observe that removal of only the attention layers and the second residual network (He et al., 2015) block preserves image quality to a higher degree, as opposed to removal of the whole mid-block.",
515
+ "bbox": [
516
+ 109,
517
+ 282,
518
+ 883,
519
+ 339
520
+ ],
521
+ "page_idx": 2
522
+ },
523
+ {
524
+ "type": "image",
525
+ "img_path": "images/1d019a5f0399f5089149b9443b2d1fbfc16705d888ad8cf8a06dc452ba6db52c.jpg",
526
+ "image_caption": [
527
+ "Figure 1: SDXL U-Net structure"
528
+ ],
529
+ "image_footnote": [],
530
+ "bbox": [
531
+ 124,
532
+ 358,
533
+ 401,
534
+ 435
535
+ ],
536
+ "page_idx": 2
537
+ },
538
+ {
539
+ "type": "image",
540
+ "img_path": "images/6d91e8cad379269874e14ae21ec129796abd3d3c0945f92940cb0da083a1319d.jpg",
541
+ "image_caption": [],
542
+ "image_footnote": [],
543
+ "bbox": [
544
+ 405,
545
+ 358,
546
+ 872,
547
+ 436
548
+ ],
549
+ "page_idx": 2
550
+ },
551
+ {
552
+ "type": "image",
553
+ "img_path": "images/3f4c9aa6d87cad14f2f5d8ece3dc0dfa88df8c470e3bec012253a0551d1cf9e5.jpg",
554
+ "image_caption": [
555
+ "Figure 2: SSD-1B U-Net structure"
556
+ ],
557
+ "image_footnote": [],
558
+ "bbox": [
559
+ 124,
560
+ 494,
561
+ 413,
562
+ 575
563
+ ],
564
+ "page_idx": 2
565
+ },
566
+ {
567
+ "type": "image",
568
+ "img_path": "images/81e599b06fce4ab63093d0146f17691a6d6109f0e9dca899876eef001c8ac47b.jpg",
569
+ "image_caption": [],
570
+ "image_footnote": [],
571
+ "bbox": [
572
+ 418,
573
+ 494,
574
+ 872,
575
+ 575
576
+ ],
577
+ "page_idx": 2
578
+ },
579
+ {
580
+ "type": "image",
581
+ "img_path": "images/ff7a671b9cea95295b3d0700a218550c1a020684ace21132d5ef6f2ced770fbe.jpg",
582
+ "image_caption": [
583
+ "Figure 3: Vega U-Net structure"
584
+ ],
585
+ "image_footnote": [],
586
+ "bbox": [
587
+ 124,
588
+ 635,
589
+ 413,
590
+ 715
591
+ ],
592
+ "page_idx": 2
593
+ },
594
+ {
595
+ "type": "image",
596
+ "img_path": "images/3829428b791322c0c1adcaf0a5b2fe2a13930039411f467c76cb9fd99f97ffc2.jpg",
597
+ "image_caption": [],
598
+ "image_footnote": [],
599
+ "bbox": [
600
+ 418,
601
+ 635,
602
+ 872,
603
+ 715
604
+ ],
605
+ "page_idx": 2
606
+ },
607
+ {
608
+ "type": "text",
609
+ "text": "3.2 Loss",
610
+ "text_level": 1,
611
+ "bbox": [
612
+ 112,
613
+ 773,
614
+ 187,
615
+ 787
616
+ ],
617
+ "page_idx": 2
618
+ },
619
+ {
620
+ "type": "text",
621
+ "text": "In contrast to the block-level losses employed in prior work, we introduce layer-level losses specific to each attention and ResNet layer. This refined approach allows for a more granular assessment of the model's internal representations, enabling us to identify and retain essential features while discarding redundant elements. Our choice of layer-level losses is influenced by their efficacy in capturing the nuanced interactions within the model's architecture.",
622
+ "bbox": [
623
+ 109,
624
+ 800,
625
+ 883,
626
+ 857
627
+ ],
628
+ "page_idx": 2
629
+ },
630
+ {
631
+ "type": "header",
632
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
633
+ "bbox": [
634
+ 225,
635
+ 42,
636
+ 767,
637
+ 56
638
+ ],
639
+ "page_idx": 2
640
+ },
641
+ {
642
+ "type": "page_footnote",
643
+ "text": "$^{2}$ https://huggingface.co/runwayml/stable-diffusion-v1-5",
644
+ "bbox": [
645
+ 132,
646
+ 869,
647
+ 553,
648
+ 883
649
+ ],
650
+ "page_idx": 2
651
+ },
652
+ {
653
+ "type": "page_footnote",
654
+ "text": "<sup>3</sup>https://civitai.com/models/119229/zavychromaxl",
655
+ "bbox": [
656
+ 132,
657
+ 883,
658
+ 496,
659
+ 897
660
+ ],
661
+ "page_idx": 2
662
+ },
663
+ {
664
+ "type": "page_footnote",
665
+ "text": "4https://civitai.com/models/133005?modelVersionId=240840",
666
+ "bbox": [
667
+ 132,
668
+ 897,
669
+ 566,
670
+ 911
671
+ ],
672
+ "page_idx": 2
673
+ },
674
+ {
675
+ "type": "page_number",
676
+ "text": "3",
677
+ "bbox": [
678
+ 493,
679
+ 935,
680
+ 503,
681
+ 946
682
+ ],
683
+ "page_idx": 2
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "3.2.1 Input Generation",
688
+ "text_level": 1,
689
+ "bbox": [
690
+ 112,
691
+ 90,
692
+ 289,
693
+ 104
694
+ ],
695
+ "page_idx": 3
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "To obtain the input for the U-Net, we employ pretrained text encoders for the to obtain the text embeddings. The latent representations of the image are obtained by the pretrained VAE. Both text encoders and the VAE are kept frozen during training and only the UNet is trained. The latent representation $z$ of an image and its paired text embedding $y$ form the basis for our training process.",
700
+ "bbox": [
701
+ 109,
702
+ 114,
703
+ 883,
704
+ 172
705
+ ],
706
+ "page_idx": 3
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "3.2.2 Task Loss",
711
+ "text_level": 1,
712
+ "bbox": [
713
+ 112,
714
+ 186,
715
+ 235,
716
+ 200
717
+ ],
718
+ "page_idx": 3
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "We formulate the task loss, denoted as $L_{Task}$ , which is computed through the reverse denoising process. The task loss measures the disparity between the sampled noise $\\epsilon$ from the diffusion process and the estimated noise $\\epsilon_S(z_t,y,t)$ generated by our compact UNet student. The objective is to align the noise distribution of the student with that of the teacher.",
723
+ "bbox": [
724
+ 109,
725
+ 210,
726
+ 883,
727
+ 266
728
+ ],
729
+ "page_idx": 3
730
+ },
731
+ {
732
+ "type": "equation",
733
+ "text": "\n$$\nL _ {T a s k} = \\mathbb {E} _ {z, \\epsilon , y, t, t _ {h}} | | \\epsilon - \\epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n$$\n",
734
+ "text_format": "latex",
735
+ "bbox": [
736
+ 369,
737
+ 277,
738
+ 625,
739
+ 296
740
+ ],
741
+ "page_idx": 3
742
+ },
743
+ {
744
+ "type": "text",
745
+ "text": "3.2.3 Output-Level Knowledge Distillation (KD)",
746
+ "text_level": 1,
747
+ "bbox": [
748
+ 112,
749
+ 309,
750
+ 464,
751
+ 324
752
+ ],
753
+ "page_idx": 3
754
+ },
755
+ {
756
+ "type": "text",
757
+ "text": "The compact student is trained to imitate the outputs of the original U-Net teacher, denoted as $\\epsilon_{T}$ , using an output-level KD objective. This objective ensures that the overall output distribution of the student aligns with that of the teacher.",
758
+ "bbox": [
759
+ 109,
760
+ 333,
761
+ 883,
762
+ 362
763
+ ],
764
+ "page_idx": 3
765
+ },
766
+ {
767
+ "type": "equation",
768
+ "text": "\n$$\nL _ {O u t K D} = \\mathbb {E} _ {z, \\epsilon , y, t, t _ {h}} | | \\epsilon_ {T} - \\epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n$$\n",
769
+ "text_format": "latex",
770
+ "bbox": [
771
+ 356,
772
+ 375,
773
+ 638,
774
+ 391
775
+ ],
776
+ "page_idx": 3
777
+ },
778
+ {
779
+ "type": "text",
780
+ "text": "3.2.4 Feature-Level Knowledge Distillation (KD)",
781
+ "text_level": 1,
782
+ "bbox": [
783
+ 112,
784
+ 404,
785
+ 465,
786
+ 420
787
+ ],
788
+ "page_idx": 3
789
+ },
790
+ {
791
+ "type": "text",
792
+ "text": "A pivotal component of our approach is feature-level KD, providing rich guidance for the student's training. The feature-level KD objective, denoted as $L_{FeatKD}$ , measures the difference between the feature maps of corresponding layers in both the teacher and student models. Importantly, our approach eliminates the need for additional regressors by ensuring that the dimensionality of feature maps already matches at the end of each layer in both models.",
793
+ "bbox": [
794
+ 109,
795
+ 429,
796
+ 883,
797
+ 486
798
+ ],
799
+ "page_idx": 3
800
+ },
801
+ {
802
+ "type": "equation",
803
+ "text": "\n$$\nL _ {F e a t K D} = \\mathbb {E} _ {h, X _ {l}} | | f _ {l} ^ {T} (z _ {t}, y, t) - f _ {l} ^ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n$$\n",
804
+ "text_format": "latex",
805
+ "bbox": [
806
+ 334,
807
+ 496,
808
+ 658,
809
+ 513
810
+ ],
811
+ "page_idx": 3
812
+ },
813
+ {
814
+ "type": "text",
815
+ "text": "3.2.5 Overall Objective",
816
+ "text_level": 1,
817
+ "bbox": [
818
+ 112,
819
+ 527,
820
+ 290,
821
+ 542
822
+ ],
823
+ "page_idx": 3
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "The final objective encompasses the task loss, output-level KD, and feature-level KD, weighted by coefficients $\\lambda_{OutKD}$ and $\\lambda_{FeatKD}$ . Without loss-weight tuning, our approach demonstrates effectiveness in empirical validation.",
828
+ "bbox": [
829
+ 109,
830
+ 551,
831
+ 883,
832
+ 582
833
+ ],
834
+ "page_idx": 3
835
+ },
836
+ {
837
+ "type": "equation",
838
+ "text": "\n$$\n\\mathrm {L} = L _ {\\text {T a s k}} + \\lambda_ {\\text {O u t K D}} * L _ {\\text {O u t K D}} + \\lambda_ {\\text {F e a t K D}} * L _ {\\text {F e a t K D}}\n$$\n",
839
+ "text_format": "latex",
840
+ "bbox": [
841
+ 305,
842
+ 593,
843
+ 687,
844
+ 609
845
+ ],
846
+ "page_idx": 3
847
+ },
848
+ {
849
+ "type": "text",
850
+ "text": "Another advantage of this method of distillation is that LoRA weights created for the parent model tend to produce close results without retraining. This may reduce the number of training steps required to migrate models.",
851
+ "bbox": [
852
+ 109,
853
+ 621,
854
+ 883,
855
+ 650
856
+ ],
857
+ "page_idx": 3
858
+ },
859
+ {
860
+ "type": "text",
861
+ "text": "To expound on our compression strategy, we consider the analogy to DistilBERT (Sanh et al., 2020), which reduces the number of layers while initializing the compact model with original weights. Our compression methodology involves targeted removal strategies in both down and up stages.",
862
+ "bbox": [
863
+ 109,
864
+ 655,
865
+ 883,
866
+ 699
867
+ ],
868
+ "page_idx": 3
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "3.3 Teacher Models",
873
+ "text_level": 1,
874
+ "bbox": [
875
+ 112,
876
+ 714,
877
+ 264,
878
+ 728
879
+ ],
880
+ "page_idx": 3
881
+ },
882
+ {
883
+ "type": "text",
884
+ "text": "We initially take SDXL Base $^{5}$ as the teacher, but later swap it for a finetuned model, ZavychromaXL $^{6}$ and finally use JuggernautXL $^{7}$ . We find that swapping the teacher boosts the quality significantly even if the same dataset is used again. This showcases that using multiple expert models can aid in instilling new concepts as well as improving quality of the student.",
885
+ "bbox": [
886
+ 109,
887
+ 739,
888
+ 883,
889
+ 796
890
+ ],
891
+ "page_idx": 3
892
+ },
893
+ {
894
+ "type": "text",
895
+ "text": "Our compression methodology, inspired by proven techniques (Kim et al., 2023), not only reduces model size but also ensures that essential features are retained through the careful removal of redundant blocks. The introduction of layer-level losses further refines this process, contributing to the overall efficiency and efficacy of our compressed models—SSD-1B and Segmind-Vega.",
896
+ "bbox": [
897
+ 109,
898
+ 801,
899
+ 883,
900
+ 859
901
+ ],
902
+ "page_idx": 3
903
+ },
904
+ {
905
+ "type": "header",
906
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
907
+ "bbox": [
908
+ 225,
909
+ 42,
910
+ 767,
911
+ 56
912
+ ],
913
+ "page_idx": 3
914
+ },
915
+ {
916
+ "type": "page_footnote",
917
+ "text": "<sup>5</sup>https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0",
918
+ "bbox": [
919
+ 132,
920
+ 869,
921
+ 630,
922
+ 883
923
+ ],
924
+ "page_idx": 3
925
+ },
926
+ {
927
+ "type": "page_footnote",
928
+ "text": "$^{6}$ https://civitai.com/models/119229/zavychromaxl",
929
+ "bbox": [
930
+ 132,
931
+ 883,
932
+ 496,
933
+ 897
934
+ ],
935
+ "page_idx": 3
936
+ },
937
+ {
938
+ "type": "page_footnote",
939
+ "text": "<sup>7</sup>https://civitai.com/models/133005?modelVersionId=240840",
940
+ "bbox": [
941
+ 132,
942
+ 897,
943
+ 566,
944
+ 911
945
+ ],
946
+ "page_idx": 3
947
+ },
948
+ {
949
+ "type": "page_number",
950
+ "text": "4",
951
+ "bbox": [
952
+ 493,
953
+ 935,
954
+ 503,
955
+ 946
956
+ ],
957
+ "page_idx": 3
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "3.4 Pruning",
962
+ "text_level": 1,
963
+ "bbox": [
964
+ 112,
965
+ 90,
966
+ 212,
967
+ 106
968
+ ],
969
+ "page_idx": 4
970
+ },
971
+ {
972
+ "type": "text",
973
+ "text": "We employ human evaluation of outputs along with heuristics to identify potential attention layers to remove.",
974
+ "bbox": [
975
+ 109,
976
+ 116,
977
+ 830,
978
+ 132
979
+ ],
980
+ "page_idx": 4
981
+ },
982
+ {
983
+ "type": "text",
984
+ "text": "To create SSD-1B, along with removal of the mid-block's attention layers and the second Residual Network, we remove the following layers of SDXL:",
985
+ "bbox": [
986
+ 109,
987
+ 137,
988
+ 883,
989
+ 166
990
+ ],
991
+ "page_idx": 4
992
+ },
993
+ {
994
+ "type": "list",
995
+ "sub_type": "text",
996
+ "list_items": [
997
+ "- 4th, 5th, 7th, 8th, 9th and 10th transformer blocks of all attention layers in the 3rd downsampling stage and the first two attention layers of the first upsampling stage of the U-Net.",
998
+ "- The second transformer block of the second and third attention layers of the second upsampling stage."
999
+ ],
1000
+ "bbox": [
1001
+ 156,
1002
+ 176,
1003
+ 879,
1004
+ 223
1005
+ ],
1006
+ "page_idx": 4
1007
+ },
1008
+ {
1009
+ "type": "text",
1010
+ "text": "To create Segmind Vega, we remove the following layers:",
1011
+ "bbox": [
1012
+ 109,
1013
+ 234,
1014
+ 493,
1015
+ 250
1016
+ ],
1017
+ "page_idx": 4
1018
+ },
1019
+ {
1020
+ "type": "list",
1021
+ "sub_type": "text",
1022
+ "list_items": [
1023
+ "- 3rd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the first attention layer of the third downsampling stage and all attention layers in the first upsampling stage of the U-Net.",
1024
+ "- 2nd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the second attention layer of the third down sampling stage.",
1025
+ "- The second transformer block of all attention layers of the second downsampling and upsampling stages."
1026
+ ],
1027
+ "bbox": [
1028
+ 156,
1029
+ 260,
1030
+ 880,
1031
+ 339
1032
+ ],
1033
+ "page_idx": 4
1034
+ },
1035
+ {
1036
+ "type": "text",
1037
+ "text": "4 Training",
1038
+ "text_level": 1,
1039
+ "bbox": [
1040
+ 109,
1041
+ 359,
1042
+ 220,
1043
+ 378
1044
+ ],
1045
+ "page_idx": 4
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "text": "In our training methodology, we adopt a distillation-based retraining approach. We use a layer-level loss in an attempt to mimic the features at each stage of the teacher U-Net. This process is crucial for achieving efficient knowledge transfer and preserving the generative quality of SDXL even in significantly compressed models.",
1050
+ "bbox": [
1051
+ 109,
1052
+ 391,
1053
+ 883,
1054
+ 434
1055
+ ],
1056
+ "page_idx": 4
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "text": "Our training strategy, inspired by distillation-based retraining, ensures that our compressed models inherit the essential knowledge from the teacher model, enabling them to efficiently mimic the behavior of the original U-Net across various layers, including attention and residual network (ResNet) layers.",
1061
+ "bbox": [
1062
+ 109,
1063
+ 439,
1064
+ 883,
1065
+ 482
1066
+ ],
1067
+ "page_idx": 4
1068
+ },
1069
+ {
1070
+ "type": "text",
1071
+ "text": "We trained SSD-1B at fp16 mixed-precision for a total of 251,000 steps with a constant learning rate of 1e-5, using Adam Optimizer (Kingma & Ba, 2017), at $1024^{*}1024$ image resolutions, on four 80GB A100 GPUs at an effective batch size of 32. We trained Vega at fp16 mixed-precision for a total of 540,000 steps with a learning rate of 1e-5, at $1024^{*}1024$ image resolutions, on four 80GB A100 GPUs, at an effective batch size of 128. The datasets used for training and evaluation include GRIT (Peng et al., 2023) and images generated by Midjourney<sup>8</sup>.",
1072
+ "bbox": [
1073
+ 109,
1074
+ 487,
1075
+ 883,
1076
+ 556
1077
+ ],
1078
+ "page_idx": 4
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "text": "5 Results",
1083
+ "text_level": 1,
1084
+ "bbox": [
1085
+ 112,
1086
+ 575,
1087
+ 209,
1088
+ 590
1089
+ ],
1090
+ "page_idx": 4
1091
+ },
1092
+ {
1093
+ "type": "text",
1094
+ "text": "We present two distilled versions of Stable Diffusion XL, Segmind Stable Diffusion(SSD-1B) and Segmind Vega, which closely mimic the outputs of the base model as shown in the Figure 4, 5, 6, 7, 8 and 9. All images are generated with the DDPM Scheduler, 25 inference steps and Guidance Scale set to 9.",
1095
+ "bbox": [
1096
+ 109,
1097
+ 607,
1098
+ 883,
1099
+ 650
1100
+ ],
1101
+ "page_idx": 4
1102
+ },
1103
+ {
1104
+ "type": "text",
1105
+ "text": "We report up to $60\\%$ speedup with SSD-1B and up to $100\\%$ speedup with Segmind-Vega. The detailed metrics taken on an A100 at 25 steps with DDPM Scheduler at guidance scale 9 and batch size 1, are reported in Table 1.",
1106
+ "bbox": [
1107
+ 109,
1108
+ 656,
1109
+ 883,
1110
+ 685
1111
+ ],
1112
+ "page_idx": 4
1113
+ },
1114
+ {
1115
+ "type": "table",
1116
+ "img_path": "images/2418a2e65733226f0123d6b9453128297785f22883f7470bb1e91db91c7d5b7e.jpg",
1117
+ "table_caption": [],
1118
+ "table_footnote": [],
1119
+ "table_body": "<table><tr><td>Model</td><td>Inference Time (s) (↓)</td><td>Iteration/s (↑)</td></tr><tr><td>SD1.59</td><td>1.699</td><td>16.79</td></tr><tr><td>SDXL</td><td>3.135</td><td>8.80</td></tr><tr><td>SSD-1B</td><td>2.169</td><td>13.37</td></tr><tr><td>Vega</td><td>1.616</td><td>18.95</td></tr></table>",
1120
+ "bbox": [
1121
+ 313,
1122
+ 696,
1123
+ 679,
1124
+ 772
1125
+ ],
1126
+ "page_idx": 4
1127
+ },
1128
+ {
1129
+ "type": "text",
1130
+ "text": "Table 1: Benchmarking inference latency",
1131
+ "bbox": [
1132
+ 359,
1133
+ 787,
1134
+ 633,
1135
+ 803
1136
+ ],
1137
+ "page_idx": 4
1138
+ },
1139
+ {
1140
+ "type": "header",
1141
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
1142
+ "bbox": [
1143
+ 225,
1144
+ 42,
1145
+ 767,
1146
+ 56
1147
+ ],
1148
+ "page_idx": 4
1149
+ },
1150
+ {
1151
+ "type": "page_footnote",
1152
+ "text": "<sup>8</sup>https://www.midjourney.com/ \n<sup>9</sup>Inference Times reported at resolution 768 * 768",
1153
+ "bbox": [
1154
+ 132,
1155
+ 883,
1156
+ 431,
1157
+ 911
1158
+ ],
1159
+ "page_idx": 4
1160
+ },
1161
+ {
1162
+ "type": "page_number",
1163
+ "text": "5",
1164
+ "bbox": [
1165
+ 493,
1166
+ 935,
1167
+ 503,
1168
+ 946
1169
+ ],
1170
+ "page_idx": 4
1171
+ },
1172
+ {
1173
+ "type": "image",
1174
+ "img_path": "images/07a59664e8d6b0cfca1ff3eb496f8baa5ed2886be03d7fa1841075459e4cc863.jpg",
1175
+ "image_caption": [
1176
+ "(a) SDXL"
1177
+ ],
1178
+ "image_footnote": [],
1179
+ "bbox": [
1180
+ 114,
1181
+ 114,
1182
+ 346,
1183
+ 262
1184
+ ],
1185
+ "page_idx": 5
1186
+ },
1187
+ {
1188
+ "type": "image",
1189
+ "img_path": "images/a0cd48cb3c3742c5ee15e9acf36f7fee1deb7d1d816ae89c8963403abdf16ddb.jpg",
1190
+ "image_caption": [
1191
+ "(b) SSD-1B",
1192
+ "Figure 4: \"A royal flaming wolf emerging from a magical big forest, blue flames, front facing, portrait, closeup, dark, bokeh, dawn, god rays, highly detailed, highres, Cinematic, Cinemascope, astonishing, epic, gorgeous, ral-fluff\""
1193
+ ],
1194
+ "image_footnote": [],
1195
+ "bbox": [
1196
+ 369,
1197
+ 114,
1198
+ 604,
1199
+ 263
1200
+ ],
1201
+ "page_idx": 5
1202
+ },
1203
+ {
1204
+ "type": "image",
1205
+ "img_path": "images/b6020be88cb29f929393888097eb1c0e539d0d55ce61c54c68638e3b61b6427b.jpg",
1206
+ "image_caption": [
1207
+ "(c) Vega"
1208
+ ],
1209
+ "image_footnote": [],
1210
+ "bbox": [
1211
+ 625,
1212
+ 114,
1213
+ 859,
1214
+ 263
1215
+ ],
1216
+ "page_idx": 5
1217
+ },
1218
+ {
1219
+ "type": "image",
1220
+ "img_path": "images/147d5f50c62fd6cdd292daf3c7ca3dcafeb294b7c2cc4248ce0363ae3b90162d.jpg",
1221
+ "image_caption": [
1222
+ "(a) SDXL",
1223
+ "Figure 5: \"raw photo, close-up, punk band cover, red brick wall, red theme, a brutal man, 40 years old, mohawk, (manly, wide jaw:1.2), leather jacket, red shirt, (vibrant colors:0.9), film grain, bokeh, fashion magazine, hdr, highly detailed photography, (muted colors, cinematic, dim colors, soothing tones:1.2), vibrant, insanely detailed, hyperdetailed, (dark shot:1.2), (vsco:0.3), (intricate details:0.9), (hdr, hyperdetailed:1.2)\""
1224
+ ],
1225
+ "image_footnote": [],
1226
+ "bbox": [
1227
+ 114,
1228
+ 383,
1229
+ 346,
1230
+ 530
1231
+ ],
1232
+ "page_idx": 5
1233
+ },
1234
+ {
1235
+ "type": "image",
1236
+ "img_path": "images/cda4fe33f6fe5abbcdb226295606e3231f166fb7569c7f1fddba8e6a2246be79.jpg",
1237
+ "image_caption": [
1238
+ "(b) SSD-1B"
1239
+ ],
1240
+ "image_footnote": [],
1241
+ "bbox": [
1242
+ 369,
1243
+ 383,
1244
+ 604,
1245
+ 530
1246
+ ],
1247
+ "page_idx": 5
1248
+ },
1249
+ {
1250
+ "type": "image",
1251
+ "img_path": "images/05e3a954da7505caa497e085efb656241ed3a19dbb0b7b6fdec6b067beb84d19.jpg",
1252
+ "image_caption": [
1253
+ "(c) Vega"
1254
+ ],
1255
+ "image_footnote": [],
1256
+ "bbox": [
1257
+ 625,
1258
+ 383,
1259
+ 859,
1260
+ 530
1261
+ ],
1262
+ "page_idx": 5
1263
+ },
1264
+ {
1265
+ "type": "image",
1266
+ "img_path": "images/5131f9726bb6909778cc437ed16f5972dae906c70a81ab5434722a010ebccd76.jpg",
1267
+ "image_caption": [
1268
+ "(a) SDXL",
1269
+ "Figure 6: \"(best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), pink Cadillac, car, driving through the country, sunset, relaxing vibes. cartoon style, line art, sticker style\""
1270
+ ],
1271
+ "image_footnote": [],
1272
+ "bbox": [
1273
+ 114,
1274
+ 676,
1275
+ 346,
1276
+ 825
1277
+ ],
1278
+ "page_idx": 5
1279
+ },
1280
+ {
1281
+ "type": "image",
1282
+ "img_path": "images/48cd02cecf92136ee610268891a2972cfab524a0e9d18d8271bf4e4542353d2e.jpg",
1283
+ "image_caption": [
1284
+ "(b) SSD-1B"
1285
+ ],
1286
+ "image_footnote": [],
1287
+ "bbox": [
1288
+ 369,
1289
+ 676,
1290
+ 604,
1291
+ 825
1292
+ ],
1293
+ "page_idx": 5
1294
+ },
1295
+ {
1296
+ "type": "image",
1297
+ "img_path": "images/d09a20c171fea95b894670f3da797b0968a2e85fdec86eca1c55f41eaab50c09.jpg",
1298
+ "image_caption": [
1299
+ "(c) Vega"
1300
+ ],
1301
+ "image_footnote": [],
1302
+ "bbox": [
1303
+ 625,
1304
+ 676,
1305
+ 859,
1306
+ 825
1307
+ ],
1308
+ "page_idx": 5
1309
+ },
1310
+ {
1311
+ "type": "header",
1312
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
1313
+ "bbox": [
1314
+ 225,
1315
+ 42,
1316
+ 767,
1317
+ 56
1318
+ ],
1319
+ "page_idx": 5
1320
+ },
1321
+ {
1322
+ "type": "page_number",
1323
+ "text": "6",
1324
+ "bbox": [
1325
+ 493,
1326
+ 936,
1327
+ 503,
1328
+ 946
1329
+ ],
1330
+ "page_idx": 5
1331
+ },
1332
+ {
1333
+ "type": "image",
1334
+ "img_path": "images/ad79135e13607d7737cba03e508c5214c5138bfabc9cb78b1434e77f3c23b7cd.jpg",
1335
+ "image_caption": [
1336
+ "(a) SDXL"
1337
+ ],
1338
+ "image_footnote": [],
1339
+ "bbox": [
1340
+ 114,
1341
+ 116,
1342
+ 346,
1343
+ 262
1344
+ ],
1345
+ "page_idx": 6
1346
+ },
1347
+ {
1348
+ "type": "image",
1349
+ "img_path": "images/594450319e3beb0c8f4868d752c288b8004f4151d3c5c261e712f6c94c080cce.jpg",
1350
+ "image_caption": [
1351
+ "(b) SSD-1B",
1352
+ "Figure 7: \"Swamp marsh Poison green red Soft watercolors digital watercolors painting illustration masterpiece raining shooting stars twinkling stars glistening stars glittery stars full moon stars full moon intricate motifs perfect composition masterpiece insanely-detailed extreme-detailed hyper-detailed beautiful volumetric deep rich colors volumetric lighting shadows Ray tracing, Mark Brooks and Dan Mumford, comic book art, perfect\""
1353
+ ],
1354
+ "image_footnote": [],
1355
+ "bbox": [
1356
+ 370,
1357
+ 116,
1358
+ 602,
1359
+ 262
1360
+ ],
1361
+ "page_idx": 6
1362
+ },
1363
+ {
1364
+ "type": "image",
1365
+ "img_path": "images/1c26eca1e033343b135988c79571bb7f87627f70b7d1bc3f6e46a47f206c00a6.jpg",
1366
+ "image_caption": [
1367
+ "(c) Vega"
1368
+ ],
1369
+ "image_footnote": [],
1370
+ "bbox": [
1371
+ 625,
1372
+ 116,
1373
+ 857,
1374
+ 262
1375
+ ],
1376
+ "page_idx": 6
1377
+ },
1378
+ {
1379
+ "type": "image",
1380
+ "img_path": "images/ed7d848c764093464e63fe5bb933c825a97d58de0317ff8ebda9eeda62d0a37a.jpg",
1381
+ "image_caption": [
1382
+ "(a) SDXL",
1383
+ "Figure 8: (\"best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), magical woods, unexplained lights, fantasy, otherworldly, mist, atmospheric, flowers, plants\""
1384
+ ],
1385
+ "image_footnote": [],
1386
+ "bbox": [
1387
+ 114,
1388
+ 411,
1389
+ 344,
1390
+ 556
1391
+ ],
1392
+ "page_idx": 6
1393
+ },
1394
+ {
1395
+ "type": "image",
1396
+ "img_path": "images/1c434151cfd430ea2fa14c658bfc05cd6cca2e4303286be9af7563b21c143ac4.jpg",
1397
+ "image_caption": [
1398
+ "(b) SSD-1B"
1399
+ ],
1400
+ "image_footnote": [],
1401
+ "bbox": [
1402
+ 370,
1403
+ 411,
1404
+ 602,
1405
+ 556
1406
+ ],
1407
+ "page_idx": 6
1408
+ },
1409
+ {
1410
+ "type": "image",
1411
+ "img_path": "images/97cae3e8af3bd8774be0b5216f5b28cdd44f4126cbe291b4ae81fb32bf157158.jpg",
1412
+ "image_caption": [
1413
+ "(c) Vega"
1414
+ ],
1415
+ "image_footnote": [],
1416
+ "bbox": [
1417
+ 625,
1418
+ 411,
1419
+ 857,
1420
+ 556
1421
+ ],
1422
+ "page_idx": 6
1423
+ },
1424
+ {
1425
+ "type": "image",
1426
+ "img_path": "images/1d1f458071946d6d3abea83cd29ad5bd0ab6474749c631d74286b8f1f0ca2aa0.jpg",
1427
+ "image_caption": [
1428
+ "(a) SDXL",
1429
+ "Figure 9: \"((fatty cat)) dracula, Victorian style, dracula-inspired, long red-black cloak, fangs, castle, in motion, furry paws, action-packed background, dark theme, glow\""
1430
+ ],
1431
+ "image_footnote": [],
1432
+ "bbox": [
1433
+ 114,
1434
+ 678,
1435
+ 344,
1436
+ 824
1437
+ ],
1438
+ "page_idx": 6
1439
+ },
1440
+ {
1441
+ "type": "image",
1442
+ "img_path": "images/944af38ab0d81c636c16806e7cbfb2375fa68af3536c08acd83136f4fb514ed8.jpg",
1443
+ "image_caption": [
1444
+ "(b) SSD-1B"
1445
+ ],
1446
+ "image_footnote": [],
1447
+ "bbox": [
1448
+ 370,
1449
+ 678,
1450
+ 602,
1451
+ 824
1452
+ ],
1453
+ "page_idx": 6
1454
+ },
1455
+ {
1456
+ "type": "image",
1457
+ "img_path": "images/e711b5be98f41bfeb23349cdf7b803ff0c419b10e97acdf7e9e2c11f6976d5b3.jpg",
1458
+ "image_caption": [
1459
+ "(c) Vega"
1460
+ ],
1461
+ "image_footnote": [],
1462
+ "bbox": [
1463
+ 625,
1464
+ 678,
1465
+ 857,
1466
+ 824
1467
+ ],
1468
+ "page_idx": 6
1469
+ },
1470
+ {
1471
+ "type": "header",
1472
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
1473
+ "bbox": [
1474
+ 225,
1475
+ 42,
1476
+ 767,
1477
+ 56
1478
+ ],
1479
+ "page_idx": 6
1480
+ },
1481
+ {
1482
+ "type": "page_number",
1483
+ "text": "7",
1484
+ "bbox": [
1485
+ 493,
1486
+ 935,
1487
+ 503,
1488
+ 946
1489
+ ],
1490
+ "page_idx": 6
1491
+ },
1492
+ {
1493
+ "type": "text",
1494
+ "text": "5.1 Quality Study",
1495
+ "text_level": 1,
1496
+ "bbox": [
1497
+ 112,
1498
+ 90,
1499
+ 251,
1500
+ 107
1501
+ ],
1502
+ "page_idx": 7
1503
+ },
1504
+ {
1505
+ "type": "text",
1506
+ "text": "PlaygroundAI $^{10}$ , a generative AI startup, conducted an extensive blind human preference study encompassing 1000 images and involving 1540 unique users to assess the comparative performance of SSD-1B and SDXL. Remarkably, the findings revealed that not only did SSD-1B maintain image quality, but it was also marginally preferred over the larger SDXL model. The comprehensive details of this study are presented in Table 2.",
1507
+ "bbox": [
1508
+ 109,
1509
+ 116,
1510
+ 883,
1511
+ 174
1512
+ ],
1513
+ "page_idx": 7
1514
+ },
1515
+ {
1516
+ "type": "table",
1517
+ "img_path": "images/00d77f20188694e465b2a28b885dd466992f85b50593f7b0ba401eaa5ad75a26.jpg",
1518
+ "table_caption": [],
1519
+ "table_footnote": [],
1520
+ "table_body": "<table><tr><td>Model</td><td>Pairs Won (↑)</td><td>Percentage Pairs Won (↑)</td></tr><tr><td>SSD-1B</td><td>528</td><td>52.8</td></tr><tr><td>SDXL</td><td>472</td><td>47.2</td></tr></table>",
1521
+ "bbox": [
1522
+ 302,
1523
+ 186,
1524
+ 691,
1525
+ 233
1526
+ ],
1527
+ "page_idx": 7
1528
+ },
1529
+ {
1530
+ "type": "text",
1531
+ "text": "Table 2: Human preference study",
1532
+ "bbox": [
1533
+ 385,
1534
+ 250,
1535
+ 609,
1536
+ 263
1537
+ ],
1538
+ "page_idx": 7
1539
+ },
1540
+ {
1541
+ "type": "text",
1542
+ "text": "The table illustrates the outcomes of the study, with SSD-1B securing victory in $52.8\\%$ of the image pairs, whereas SDXL, although commendable, trailed slightly with $47.2\\%$ . These results not only underscore the noteworthy quality preservation of SSD-1B but also highlight its perceptible preference among the diverse user cohort involved in the blind study.",
1543
+ "bbox": [
1544
+ 109,
1545
+ 270,
1546
+ 883,
1547
+ 325
1548
+ ],
1549
+ "page_idx": 7
1550
+ },
1551
+ {
1552
+ "type": "text",
1553
+ "text": "6 Conclusion",
1554
+ "text_level": 1,
1555
+ "bbox": [
1556
+ 112,
1557
+ 347,
1558
+ 240,
1559
+ 363
1560
+ ],
1561
+ "page_idx": 7
1562
+ },
1563
+ {
1564
+ "type": "text",
1565
+ "text": "We show that distillation of large models like SDXL via using knowledge distillation using multiple models as teachers and using feature losses can quickly converge to give similar quality outputs as the base model despite having a significantly smaller student model. Some of its limitations include but are not limited to Text, Hands and Full Body shots.",
1566
+ "bbox": [
1567
+ 109,
1568
+ 378,
1569
+ 883,
1570
+ 434
1571
+ ],
1572
+ "page_idx": 7
1573
+ },
1574
+ {
1575
+ "type": "text",
1576
+ "text": "Our work also highlights the importance of choice of dataset and teacher model as it can tremendously help boost the final model's quality. We show that progressively distilling can reduce total training time significantly. In the future this technique cnn be further explored on other large models such as LLMs, MLMs etc,",
1577
+ "bbox": [
1578
+ 109,
1579
+ 441,
1580
+ 883,
1581
+ 484
1582
+ ],
1583
+ "page_idx": 7
1584
+ },
1585
+ {
1586
+ "type": "text",
1587
+ "text": "References",
1588
+ "text_level": 1,
1589
+ "bbox": [
1590
+ 112,
1591
+ 503,
1592
+ 209,
1593
+ 520
1594
+ ],
1595
+ "page_idx": 7
1596
+ },
1597
+ {
1598
+ "type": "list",
1599
+ "sub_type": "ref_text",
1600
+ "list_items": [
1601
+ "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015.",
1602
+ "Bo-Kyeong Kim, Hyoung-Kyu Song, Thibault Castells, and Shinkook Choi. Bk-sdm: A lightweight, fast, and cheap version of stable diffusion, 2023.",
1603
+ "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017.",
1604
+ "Yanyu Li, Huan Wang, Qing Jin, Ju Hu, Pavlo Chemerys, Yun Fu, Yanzhi Wang, Sergey Tulyakov, and Jian Ren. \nSnapfusion: Text-to-image diffusion model on mobile devices within two seconds, 2023.",
1605
+ "Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao. Latent consistency models: Synthesizing high-resolution images with few-step inference, 2023a.",
1606
+ "Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu, Patrick von Platen, Apolinário Passos, Longbo Huang, Jian Li, and Hang Zhao. Lcm-lora: A universal stable-diffusion acceleration module, 2023b.",
1607
+ "Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. ArXiv, abs/2306.14824, 2023.",
1608
+ "Pablo Pernias, Dominic Rampas, Mats L. Richter, Christopher J. Pal, and Marc Aubreville. Wuerstchen: An efficient architecture for large-scale text-to-image diffusion models, 2023.",
1609
+ "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis, 2023.",
1610
+ "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation, 2021.",
1611
+ "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684-10695, June 2022."
1612
+ ],
1613
+ "bbox": [
1614
+ 112,
1615
+ 529,
1616
+ 883,
1617
+ 885
1618
+ ],
1619
+ "page_idx": 7
1620
+ },
1621
+ {
1622
+ "type": "header",
1623
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
1624
+ "bbox": [
1625
+ 225,
1626
+ 42,
1627
+ 767,
1628
+ 56
1629
+ ],
1630
+ "page_idx": 7
1631
+ },
1632
+ {
1633
+ "type": "page_footnote",
1634
+ "text": "10https://playgroundai.com/",
1635
+ "bbox": [
1636
+ 129,
1637
+ 896,
1638
+ 336,
1639
+ 912
1640
+ ],
1641
+ "page_idx": 7
1642
+ },
1643
+ {
1644
+ "type": "page_number",
1645
+ "text": "8",
1646
+ "bbox": [
1647
+ 493,
1648
+ 935,
1649
+ 503,
1650
+ 946
1651
+ ],
1652
+ "page_idx": 7
1653
+ },
1654
+ {
1655
+ "type": "list",
1656
+ "sub_type": "ref_text",
1657
+ "list_items": [
1658
+ "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding, 2022.",
1659
+ "Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter, 2020.",
1660
+ "Alex Shonenkov, Misha Konstantinov, Daria Bakshandaeva, Christoph Schuhmann, Ksenia Ivanova, and Nadija Klokova. If by deepfloyd lab at stabilityai, 2023."
1661
+ ],
1662
+ "bbox": [
1663
+ 114,
1664
+ 90,
1665
+ 883,
1666
+ 213
1667
+ ],
1668
+ "page_idx": 8
1669
+ },
1670
+ {
1671
+ "type": "header",
1672
+ "text": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss",
1673
+ "bbox": [
1674
+ 225,
1675
+ 42,
1676
+ 767,
1677
+ 56
1678
+ ],
1679
+ "page_idx": 8
1680
+ },
1681
+ {
1682
+ "type": "page_number",
1683
+ "text": "9",
1684
+ "bbox": [
1685
+ 493,
1686
+ 935,
1687
+ 504,
1688
+ 946
1689
+ ],
1690
+ "page_idx": 8
1691
+ }
1692
+ ]
2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_model.json ADDED
@@ -0,0 +1,2110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.275,
8
+ 0.061,
9
+ 0.701
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2401.02677v1 [cs.CV] 5 Jan 2024"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.126,
18
+ 0.121,
19
+ 0.873,
20
+ 0.166
21
+ ],
22
+ "angle": 0,
23
+ "content": "PROGRESSIVE KNOWLEDGE DISTILLATION OF Stable Diffusion XL USING LAYER LEVEL LOSS"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.425,
29
+ 0.199,
30
+ 0.572,
31
+ 0.213
32
+ ],
33
+ "angle": 0,
34
+ "content": "TECHNICAL REPORT"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.291,
40
+ 0.24,
41
+ 0.412,
42
+ 0.254
43
+ ],
44
+ "angle": 0,
45
+ "content": "Yatharth Gupta*"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.314,
51
+ 0.256,
52
+ 0.374,
53
+ 0.269
54
+ ],
55
+ "angle": 0,
56
+ "content": "Segmind"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.253,
62
+ 0.27,
63
+ 0.435,
64
+ 0.282
65
+ ],
66
+ "angle": 0,
67
+ "content": "yatharthg@segmind.com"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.594,
73
+ 0.24,
74
+ 0.736,
75
+ 0.254
76
+ ],
77
+ "angle": 0,
78
+ "content": "Vishnu V. Jaddipal*"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.633,
84
+ 0.256,
85
+ 0.693,
86
+ 0.268
87
+ ],
88
+ "angle": 0,
89
+ "content": "Segmind"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.58,
95
+ 0.269,
96
+ 0.745,
97
+ 0.282
98
+ ],
99
+ "angle": 0,
100
+ "content": "vishnuj@segmind.com"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.192,
106
+ 0.303,
107
+ 0.312,
108
+ 0.316
109
+ ],
110
+ "angle": 0,
111
+ "content": "Harish Prabhala"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.222,
117
+ 0.318,
118
+ 0.282,
119
+ 0.331
120
+ ],
121
+ "angle": 0,
122
+ "content": "Segmind"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.174,
128
+ 0.333,
129
+ 0.33,
130
+ 0.345
131
+ ],
132
+ "angle": 0,
133
+ "content": "harish@segmind.com"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.442,
139
+ 0.303,
140
+ 0.523,
141
+ 0.317
142
+ ],
143
+ "angle": 0,
144
+ "content": "Sayak Paul"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.435,
150
+ 0.318,
151
+ 0.53,
152
+ 0.331
153
+ ],
154
+ "angle": 0,
155
+ "content": "Hugging Face"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.396,
161
+ 0.333,
162
+ 0.568,
163
+ 0.345
164
+ ],
165
+ "angle": 0,
166
+ "content": "sayak@huggingface.co"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.663,
172
+ 0.303,
173
+ 0.796,
174
+ 0.316
175
+ ],
176
+ "angle": 0,
177
+ "content": "Patrick Von Platen"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.683,
183
+ 0.318,
184
+ 0.776,
185
+ 0.332
186
+ ],
187
+ "angle": 0,
188
+ "content": "Hugging Face"
189
+ },
190
+ {
191
+ "type": "text",
192
+ "bbox": [
193
+ 0.635,
194
+ 0.333,
195
+ 0.824,
196
+ 0.345
197
+ ],
198
+ "angle": 0,
199
+ "content": "patrick@huggingface.co"
200
+ },
201
+ {
202
+ "type": "title",
203
+ "bbox": [
204
+ 0.45,
205
+ 0.395,
206
+ 0.548,
207
+ 0.41
208
+ ],
209
+ "angle": 0,
210
+ "content": "ABSTRACT"
211
+ },
212
+ {
213
+ "type": "text",
214
+ "bbox": [
215
+ 0.17,
216
+ 0.417,
217
+ 0.825,
218
+ 0.514
219
+ ],
220
+ "angle": 0,
221
+ "content": "Stable Diffusion XL (SDXL) has become the best open source text-to-image model (T2I) for its versatility and top-notch image quality. Efficiently addressing the computational demands of SDXL models is crucial for wider reach and applicability. In this work, we introduce two scaled-down variants, Segmind Stable Diffusion (SSD-1B) and Segmind-Vega, with 1.3B and 0.74B parameter UNets, respectively, achieved through progressive removal using layer-level losses focusing on reducing the model size while preserving generative quality. We release these models weights at https://hf.co/Segmind."
222
+ },
223
+ {
224
+ "type": "text",
225
+ "bbox": [
226
+ 0.17,
227
+ 0.516,
228
+ 0.825,
229
+ 0.573
230
+ ],
231
+ "angle": 0,
232
+ "content": "Our methodology involves the elimination of residual networks and transformer blocks from the U-Net structure of SDXL, resulting in significant reductions in parameters, and latency. Our compact models effectively emulate the original SDXL by capitalizing on transferred knowledge, achieving competitive results against larger multi-billion parameter SDXL."
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.17,
238
+ 0.575,
239
+ 0.825,
240
+ 0.618
241
+ ],
242
+ "angle": 0,
243
+ "content": "Our work underscores the efficacy of knowledge distillation coupled with layer-level losses in reducing model size while preserving the high-quality generative capabilities of SDXL, thus facilitating more accessible deployment in resource-constrained environments."
244
+ },
245
+ {
246
+ "type": "title",
247
+ "bbox": [
248
+ 0.114,
249
+ 0.639,
250
+ 0.254,
251
+ 0.654
252
+ ],
253
+ "angle": 0,
254
+ "content": "1 Introduction"
255
+ },
256
+ {
257
+ "type": "text",
258
+ "bbox": [
259
+ 0.111,
260
+ 0.671,
261
+ 0.885,
262
+ 0.783
263
+ ],
264
+ "angle": 0,
265
+ "content": "Stable Diffusion (Rombach et al., 2022) has emerged as highly influential in the realm of text-to-image (T2I) synthesis, playing a pivotal role as an open-source framework. Its remarkable capabilities has spurred its integration as a backbone in various text-guided vision applications. Stable Diffusion, characterized as T2I-specialized latent diffusion models (LDMs), leverages diffusion operations within a semantically compressed space, enhancing computational efficiency. Central to the architecture of Stable Diffusion is a U-Net that employs iterative sampling to progressively denoise a random latent code. This process is further supported by a text encoder and an image decoder, orchestrating the generation of text-aligned images. SDXL (Podell et al., 2023) is the largest variant with a 2.6B Parameter UNet and two text encoders, providing the best quality among open source models."
266
+ },
267
+ {
268
+ "type": "text",
269
+ "bbox": [
270
+ 0.111,
271
+ 0.788,
272
+ 0.885,
273
+ 0.887
274
+ ],
275
+ "angle": 0,
276
+ "content": "Notably, distillation techniques have been applied to pretrained diffusion models to curtail the number of denoising steps, resulting in identically structured models with reduced sampling requirements. Additionally, methods such as post-training quantization and implementation optimizations have been explored. The exploration of removing architectural elements in large diffusion models has also been investigated for the base U-Net models (Kim et al., 2023). In this context, our work endeavors to apply knowledge distillation methods to the SDXL model (Podell et al., 2023), resulting in the creation of two streamlined variants, namely Segmind Stable Diffusion (SSD-1B) and Segmind-Vega. We use the base model as well as finetuned versions in the distillation process. These models, with 1.3B and 0.74B"
277
+ },
278
+ {
279
+ "type": "footer",
280
+ "bbox": [
281
+ 0.136,
282
+ 0.898,
283
+ 0.258,
284
+ 0.912
285
+ ],
286
+ "angle": 0,
287
+ "content": "*Equal Contribution"
288
+ }
289
+ ],
290
+ [
291
+ {
292
+ "type": "header",
293
+ "bbox": [
294
+ 0.227,
295
+ 0.044,
296
+ 0.769,
297
+ 0.058
298
+ ],
299
+ "angle": 0,
300
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.111,
306
+ 0.092,
307
+ 0.885,
308
+ 0.135
309
+ ],
310
+ "angle": 0,
311
+ "content": "parameter UNets respectively, employ layer level losses to progressively reduce the model size to \\(20\\%\\), \\(40\\%\\), \\(50\\%\\), \\(60\\%\\), and ultimately \\(70\\%\\). This reduction in model size aims to strike a balance between computational efficiency and the preservation of generative capabilities, making SDXL more accessible for diverse applications."
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.113,
317
+ 0.154,
318
+ 0.265,
319
+ 0.169
320
+ ],
321
+ "angle": 0,
322
+ "content": "2 Related Work"
323
+ },
324
+ {
325
+ "type": "title",
326
+ "bbox": [
327
+ 0.113,
328
+ 0.184,
329
+ 0.37,
330
+ 0.198
331
+ ],
332
+ "angle": 0,
333
+ "content": "2.1 Large Latent Diffusion Models"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.111,
339
+ 0.21,
340
+ 0.885,
341
+ 0.321
342
+ ],
343
+ "angle": 0,
344
+ "content": "The exploration of diffusion-based generative models has been instrumental in achieving high-fidelity synthesis with broad mode coverage by gradually removing noise from corrupted data. The integration of these models with pretrained language models has notably enhanced the quality of text-to-image (T2I) synthesis. In models such asImagen (Saharia et al., 2022) and Deepfloyd IF (Shonenkov et al., 2023), text-conditional diffusion models generate small images, subsequently upsampled through super-resolution modules. DALL-E (Ramesh et al., 2021) style models, on the other hand, employ a text-conditional prior network to produce an image embedding, transformed via a diffusion decoder and further upscaled into higher resolutions. LDMs perform diffusion modeling in a low-dimensional latent space constructed through a pixel-space autoencoder."
345
+ },
346
+ {
347
+ "type": "title",
348
+ "bbox": [
349
+ 0.113,
350
+ 0.337,
351
+ 0.337,
352
+ 0.351
353
+ ],
354
+ "angle": 0,
355
+ "content": "2.2 Efficient Diffusion Models"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.111,
361
+ 0.362,
362
+ 0.885,
363
+ 0.473
364
+ ],
365
+ "angle": 0,
366
+ "content": "Efforts to address the slow sampling process in diffusion models have been widespread. Diffusion-tailored distillation progressively transfers knowledge from a pretrained diffusion model to a model with fewer sampling steps while maintaining the same architecture. Latent Consistency Models (Luo et al., 2023a) also allow the models to generate images in very few steps. Combining this with Low Rank Adapters (LoRAs) (Luo et al., 2023b) provides a very easy way of enabling fast generation with large models. Fast high-order solvers for diffusion ordinary differential equations aim to boost sampling speed. In complement to these approaches, our network compression method reduces per-step computation and seamlessly integrates with models employing fewer sampling steps. Leveraging quantization and implementation optimizations designed for SDXL can further enhance the efficiency of our compact models."
367
+ },
368
+ {
369
+ "type": "title",
370
+ "bbox": [
371
+ 0.113,
372
+ 0.489,
373
+ 0.375,
374
+ 0.505
375
+ ],
376
+ "angle": 0,
377
+ "content": "2.3 Distillation-Based Compression"
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.111,
383
+ 0.515,
384
+ 0.885,
385
+ 0.572
386
+ ],
387
+ "angle": 0,
388
+ "content": "Knowledge Distillation (KD) has been successful in improving the performance of small-size models by exploiting output-level and feature-level information from larger source models. While classical KD has found applications in efficient GANs, and Stable Diffusion Base model. Our work demonstrates the extension of distillation pretraining techniques, proven successful in small yet capable general-purpose language models and vision transformers, to SDXL."
389
+ },
390
+ {
391
+ "type": "title",
392
+ "bbox": [
393
+ 0.113,
394
+ 0.585,
395
+ 0.304,
396
+ 0.599
397
+ ],
398
+ "angle": 0,
399
+ "content": "2.3.1 Concurrent Studies"
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.111,
405
+ 0.609,
406
+ 0.885,
407
+ 0.68
408
+ ],
409
+ "angle": 0,
410
+ "content": "Studies such as SnapFusion (Li et al., 2023) achieve an efficient U-Net for Stable Diffusion through architecture evolution and step distillation. Wuerstchen (Pernias et al., 2023) introduces two diffusion processes on low- and high-resolution latent spaces for economic training. While these works are valuable, it is essential to note that they often require significantly larger computational resources than our proposed approach. Additionally, As demonstrated on Stable Diffusion, BK-SDM proposes pruning the UNet via removal of blocks, showcasing promising compression."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.111,
416
+ 0.685,
417
+ 0.885,
418
+ 0.727
419
+ ],
420
+ "angle": 0,
421
+ "content": "This work uses the technique of classical architectural compression in achieving smaller and faster diffusion models. The approach involves the removal of multiple transformer layers from the U-Net of SDXL, followed by retraining with feature-level knowledge distillation for general-purpose T2I. The contributions of this study are summarized as follows:"
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.158,
427
+ 0.738,
428
+ 0.88,
429
+ 0.779
430
+ ],
431
+ "angle": 0,
432
+ "content": "- Architectural Compression: We compress SDXL by strategically removing architectural blocks from the U-Net, resulting in a notable reduction in model size (up to \\(70\\%\\)) and increased inference speeds (up to \\(100\\%\\) speedup)."
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.157,
438
+ 0.784,
439
+ 0.881,
440
+ 0.837
441
+ ],
442
+ "angle": 0,
443
+ "content": "- Feature Distillation: We use feature distillation for training diffusion models, demonstrating its remarkable benefits in achieving competitive T2I performance with significantly fewer resources. The cost-effectiveness of network compression is emphasized, particularly when compared to the substantial expense of training diffusion models from scratch."
444
+ },
445
+ {
446
+ "type": "text",
447
+ "bbox": [
448
+ 0.157,
449
+ 0.845,
450
+ 0.881,
451
+ 0.87
452
+ ],
453
+ "angle": 0,
454
+ "content": "- Downstream benefits: The method, to an extent preserves fidelity of generation with different LoRA and Controlnet networks, thus requiring less training to be used on the distilled model."
455
+ },
456
+ {
457
+ "type": "list",
458
+ "bbox": [
459
+ 0.157,
460
+ 0.738,
461
+ 0.881,
462
+ 0.87
463
+ ],
464
+ "angle": 0,
465
+ "content": null
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.111,
471
+ 0.884,
472
+ 0.885,
473
+ 0.913
474
+ ],
475
+ "angle": 0,
476
+ "content": "In summary, this research explores classical architectural compression for SDXL, providing a cost-effective strategy for building compact general-purpose diffusion models with compelling performance."
477
+ },
478
+ {
479
+ "type": "page_number",
480
+ "bbox": [
481
+ 0.493,
482
+ 0.936,
483
+ 0.505,
484
+ 0.948
485
+ ],
486
+ "angle": 0,
487
+ "content": "2"
488
+ }
489
+ ],
490
+ [
491
+ {
492
+ "type": "header",
493
+ "bbox": [
494
+ 0.227,
495
+ 0.044,
496
+ 0.769,
497
+ 0.058
498
+ ],
499
+ "angle": 0,
500
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
501
+ },
502
+ {
503
+ "type": "title",
504
+ "bbox": [
505
+ 0.113,
506
+ 0.09,
507
+ 0.258,
508
+ 0.108
509
+ ],
510
+ "angle": 0,
511
+ "content": "3 Methodology"
512
+ },
513
+ {
514
+ "type": "text",
515
+ "bbox": [
516
+ 0.111,
517
+ 0.122,
518
+ 0.885,
519
+ 0.193
520
+ ],
521
+ "angle": 0,
522
+ "content": "In our pursuit of compressing SDXL models, we adopt a nuanced approach that centers on the removal of transformer layers within attention blocks. Our observation reveals a redundancy in numerous blocks, and our strategy involves judicious elimination without compromising the model's generative prowess. We draw inspiration from the architectural compression techniques applied to Stable Diffusion v1.5's \\(^2\\) U-Net and extend the methodology to SDXL, yielding two scaled-down variants: Segmind Stable Diffusion (SSD-1B) and Segmind-Vega."
523
+ },
524
+ {
525
+ "type": "title",
526
+ "bbox": [
527
+ 0.113,
528
+ 0.21,
529
+ 0.245,
530
+ 0.224
531
+ ],
532
+ "angle": 0,
533
+ "content": "3.1 Architecture"
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.111,
539
+ 0.236,
540
+ 0.884,
541
+ 0.279
542
+ ],
543
+ "angle": 0,
544
+ "content": "Our compression strategy is motivated by the recognition that certain layers are dispensable without significantly affecting the model's performance. We leverage insights from various teacher models, including SDXL-base-1.0 and the fine-tuned Zavy Chroma-XL \\(^{3}\\) and Juggernaut-XL \\(^{4}\\), during the compression process."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.111,
550
+ 0.284,
551
+ 0.885,
552
+ 0.34
553
+ ],
554
+ "angle": 0,
555
+ "content": "We report similar findings as BK-SDM (Kim et al., 2023), in that the middle block of the U-Net can be removed without significantly affecting image quality. To add, we observe that removal of only the attention layers and the second residual network (He et al., 2015) block preserves image quality to a higher degree, as opposed to removal of the whole mid-block."
556
+ },
557
+ {
558
+ "type": "image",
559
+ "bbox": [
560
+ 0.125,
561
+ 0.359,
562
+ 0.403,
563
+ 0.436
564
+ ],
565
+ "angle": 0,
566
+ "content": null
567
+ },
568
+ {
569
+ "type": "image",
570
+ "bbox": [
571
+ 0.406,
572
+ 0.359,
573
+ 0.874,
574
+ 0.437
575
+ ],
576
+ "angle": 0,
577
+ "content": null
578
+ },
579
+ {
580
+ "type": "image_caption",
581
+ "bbox": [
582
+ 0.389,
583
+ 0.448,
584
+ 0.608,
585
+ 0.464
586
+ ],
587
+ "angle": 0,
588
+ "content": "Figure 1: SDXL U-Net structure"
589
+ },
590
+ {
591
+ "type": "image",
592
+ "bbox": [
593
+ 0.125,
594
+ 0.496,
595
+ 0.415,
596
+ 0.576
597
+ ],
598
+ "angle": 0,
599
+ "content": null
600
+ },
601
+ {
602
+ "type": "image",
603
+ "bbox": [
604
+ 0.419,
605
+ 0.496,
606
+ 0.874,
607
+ 0.576
608
+ ],
609
+ "angle": 0,
610
+ "content": null
611
+ },
612
+ {
613
+ "type": "image_caption",
614
+ "bbox": [
615
+ 0.383,
616
+ 0.588,
617
+ 0.614,
618
+ 0.603
619
+ ],
620
+ "angle": 0,
621
+ "content": "Figure 2: SSD-1B U-Net structure"
622
+ },
623
+ {
624
+ "type": "image",
625
+ "bbox": [
626
+ 0.125,
627
+ 0.636,
628
+ 0.415,
629
+ 0.716
630
+ ],
631
+ "angle": 0,
632
+ "content": null
633
+ },
634
+ {
635
+ "type": "image",
636
+ "bbox": [
637
+ 0.419,
638
+ 0.636,
639
+ 0.874,
640
+ 0.716
641
+ ],
642
+ "angle": 0,
643
+ "content": null
644
+ },
645
+ {
646
+ "type": "image_caption",
647
+ "bbox": [
648
+ 0.394,
649
+ 0.728,
650
+ 0.602,
651
+ 0.743
652
+ ],
653
+ "angle": 0,
654
+ "content": "Figure 3: Vega U-Net structure"
655
+ },
656
+ {
657
+ "type": "title",
658
+ "bbox": [
659
+ 0.114,
660
+ 0.775,
661
+ 0.188,
662
+ 0.789
663
+ ],
664
+ "angle": 0,
665
+ "content": "3.2 Loss"
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.111,
671
+ 0.801,
672
+ 0.885,
673
+ 0.858
674
+ ],
675
+ "angle": 0,
676
+ "content": "In contrast to the block-level losses employed in prior work, we introduce layer-level losses specific to each attention and ResNet layer. This refined approach allows for a more granular assessment of the model's internal representations, enabling us to identify and retain essential features while discarding redundant elements. Our choice of layer-level losses is influenced by their efficacy in capturing the nuanced interactions within the model's architecture."
677
+ },
678
+ {
679
+ "type": "page_footnote",
680
+ "bbox": [
681
+ 0.133,
682
+ 0.871,
683
+ 0.555,
684
+ 0.884
685
+ ],
686
+ "angle": 0,
687
+ "content": "\\(^{2}\\)https://huggingface.co/runwayml/stable-diffusion-v1-5"
688
+ },
689
+ {
690
+ "type": "page_footnote",
691
+ "bbox": [
692
+ 0.133,
693
+ 0.884,
694
+ 0.498,
695
+ 0.898
696
+ ],
697
+ "angle": 0,
698
+ "content": "<sup>3</sup>https://civitai.com/models/119229/zavychromaxl"
699
+ },
700
+ {
701
+ "type": "page_footnote",
702
+ "bbox": [
703
+ 0.133,
704
+ 0.898,
705
+ 0.568,
706
+ 0.912
707
+ ],
708
+ "angle": 0,
709
+ "content": "4https://civitai.com/models/133005?modelVersionId=240840"
710
+ },
711
+ {
712
+ "type": "list",
713
+ "bbox": [
714
+ 0.133,
715
+ 0.871,
716
+ 0.568,
717
+ 0.912
718
+ ],
719
+ "angle": 0,
720
+ "content": null
721
+ },
722
+ {
723
+ "type": "page_number",
724
+ "bbox": [
725
+ 0.494,
726
+ 0.936,
727
+ 0.504,
728
+ 0.948
729
+ ],
730
+ "angle": 0,
731
+ "content": "3"
732
+ }
733
+ ],
734
+ [
735
+ {
736
+ "type": "header",
737
+ "bbox": [
738
+ 0.227,
739
+ 0.044,
740
+ 0.769,
741
+ 0.058
742
+ ],
743
+ "angle": 0,
744
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
745
+ },
746
+ {
747
+ "type": "title",
748
+ "bbox": [
749
+ 0.113,
750
+ 0.092,
751
+ 0.29,
752
+ 0.106
753
+ ],
754
+ "angle": 0,
755
+ "content": "3.2.1 Input Generation"
756
+ },
757
+ {
758
+ "type": "text",
759
+ "bbox": [
760
+ 0.111,
761
+ 0.115,
762
+ 0.885,
763
+ 0.173
764
+ ],
765
+ "angle": 0,
766
+ "content": "To obtain the input for the U-Net, we employ pretrained text encoders for the to obtain the text embeddings. The latent representations of the image are obtained by the pretrained VAE. Both text encoders and the VAE are kept frozen during training and only the UNet is trained. The latent representation \\( z \\) of an image and its paired text embedding \\( y \\) form the basis for our training process."
767
+ },
768
+ {
769
+ "type": "title",
770
+ "bbox": [
771
+ 0.113,
772
+ 0.187,
773
+ 0.236,
774
+ 0.202
775
+ ],
776
+ "angle": 0,
777
+ "content": "3.2.2 Task Loss"
778
+ },
779
+ {
780
+ "type": "text",
781
+ "bbox": [
782
+ 0.111,
783
+ 0.211,
784
+ 0.885,
785
+ 0.267
786
+ ],
787
+ "angle": 0,
788
+ "content": "We formulate the task loss, denoted as \\( L_{Task} \\), which is computed through the reverse denoising process. The task loss measures the disparity between the sampled noise \\( \\epsilon \\) from the diffusion process and the estimated noise \\( \\epsilon_S(z_t,y,t) \\) generated by our compact UNet student. The objective is to align the noise distribution of the student with that of the teacher."
789
+ },
790
+ {
791
+ "type": "equation",
792
+ "bbox": [
793
+ 0.37,
794
+ 0.279,
795
+ 0.627,
796
+ 0.297
797
+ ],
798
+ "angle": 0,
799
+ "content": "\\[\nL _ {T a s k} = \\mathbb {E} _ {z, \\epsilon , y, t, t _ {h}} | | \\epsilon - \\epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n\\]"
800
+ },
801
+ {
802
+ "type": "title",
803
+ "bbox": [
804
+ 0.113,
805
+ 0.31,
806
+ 0.465,
807
+ 0.325
808
+ ],
809
+ "angle": 0,
810
+ "content": "3.2.3 Output-Level Knowledge Distillation (KD)"
811
+ },
812
+ {
813
+ "type": "text",
814
+ "bbox": [
815
+ 0.111,
816
+ 0.334,
817
+ 0.884,
818
+ 0.363
819
+ ],
820
+ "angle": 0,
821
+ "content": "The compact student is trained to imitate the outputs of the original U-Net teacher, denoted as \\(\\epsilon_{T}\\), using an output-level KD objective. This objective ensures that the overall output distribution of the student aligns with that of the teacher."
822
+ },
823
+ {
824
+ "type": "equation",
825
+ "bbox": [
826
+ 0.357,
827
+ 0.375,
828
+ 0.64,
829
+ 0.392
830
+ ],
831
+ "angle": 0,
832
+ "content": "\\[\nL _ {O u t K D} = \\mathbb {E} _ {z, \\epsilon , y, t, t _ {h}} | | \\epsilon_ {T} - \\epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n\\]"
833
+ },
834
+ {
835
+ "type": "title",
836
+ "bbox": [
837
+ 0.113,
838
+ 0.405,
839
+ 0.467,
840
+ 0.421
841
+ ],
842
+ "angle": 0,
843
+ "content": "3.2.4 Feature-Level Knowledge Distillation (KD)"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.111,
849
+ 0.43,
850
+ 0.884,
851
+ 0.487
852
+ ],
853
+ "angle": 0,
854
+ "content": "A pivotal component of our approach is feature-level KD, providing rich guidance for the student's training. The feature-level KD objective, denoted as \\( L_{FeatKD} \\), measures the difference between the feature maps of corresponding layers in both the teacher and student models. Importantly, our approach eliminates the need for additional regressors by ensuring that the dimensionality of feature maps already matches at the end of each layer in both models."
855
+ },
856
+ {
857
+ "type": "equation",
858
+ "bbox": [
859
+ 0.336,
860
+ 0.497,
861
+ 0.66,
862
+ 0.515
863
+ ],
864
+ "angle": 0,
865
+ "content": "\\[\nL _ {F e a t K D} = \\mathbb {E} _ {h, X _ {l}} | | f _ {l} ^ {T} (z _ {t}, y, t) - f _ {l} ^ {S} (z _ {t}, y, t) | | _ {2} ^ {2}\n\\]"
866
+ },
867
+ {
868
+ "type": "title",
869
+ "bbox": [
870
+ 0.113,
871
+ 0.528,
872
+ 0.291,
873
+ 0.544
874
+ ],
875
+ "angle": 0,
876
+ "content": "3.2.5 Overall Objective"
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.111,
882
+ 0.553,
883
+ 0.884,
884
+ 0.583
885
+ ],
886
+ "angle": 0,
887
+ "content": "The final objective encompasses the task loss, output-level KD, and feature-level KD, weighted by coefficients \\(\\lambda_{OutKD}\\) and \\(\\lambda_{FeatKD}\\). Without loss-weight tuning, our approach demonstrates effectiveness in empirical validation."
888
+ },
889
+ {
890
+ "type": "equation",
891
+ "bbox": [
892
+ 0.306,
893
+ 0.594,
894
+ 0.689,
895
+ 0.61
896
+ ],
897
+ "angle": 0,
898
+ "content": "\\[\n\\mathrm {L} = L _ {\\text {T a s k}} + \\lambda_ {\\text {O u t K D}} * L _ {\\text {O u t K D}} + \\lambda_ {\\text {F e a t K D}} * L _ {\\text {F e a t K D}}\n\\]"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.111,
904
+ 0.622,
905
+ 0.884,
906
+ 0.651
907
+ ],
908
+ "angle": 0,
909
+ "content": "Another advantage of this method of distillation is that LoRA weights created for the parent model tend to produce close results without retraining. This may reduce the number of training steps required to migrate models."
910
+ },
911
+ {
912
+ "type": "text",
913
+ "bbox": [
914
+ 0.111,
915
+ 0.656,
916
+ 0.884,
917
+ 0.7
918
+ ],
919
+ "angle": 0,
920
+ "content": "To expound on our compression strategy, we consider the analogy to DistilBERT (Sanh et al., 2020), which reduces the number of layers while initializing the compact model with original weights. Our compression methodology involves targeted removal strategies in both down and up stages."
921
+ },
922
+ {
923
+ "type": "title",
924
+ "bbox": [
925
+ 0.113,
926
+ 0.715,
927
+ 0.266,
928
+ 0.729
929
+ ],
930
+ "angle": 0,
931
+ "content": "3.3 Teacher Models"
932
+ },
933
+ {
934
+ "type": "text",
935
+ "bbox": [
936
+ 0.111,
937
+ 0.74,
938
+ 0.885,
939
+ 0.797
940
+ ],
941
+ "angle": 0,
942
+ "content": "We initially take SDXL Base \\(^{5}\\) as the teacher, but later swap it for a finetuned model, ZavychromaXL \\(^{6}\\) and finally use JuggernautXL \\(^{7}\\). We find that swapping the teacher boosts the quality significantly even if the same dataset is used again. This showcases that using multiple expert models can aid in instilling new concepts as well as improving quality of the student."
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.111,
948
+ 0.803,
949
+ 0.884,
950
+ 0.86
951
+ ],
952
+ "angle": 0,
953
+ "content": "Our compression methodology, inspired by proven techniques (Kim et al., 2023), not only reduces model size but also ensures that essential features are retained through the careful removal of redundant blocks. The introduction of layer-level losses further refines this process, contributing to the overall efficiency and efficacy of our compressed models—SSD-1B and Segmind-Vega."
954
+ },
955
+ {
956
+ "type": "page_footnote",
957
+ "bbox": [
958
+ 0.133,
959
+ 0.87,
960
+ 0.632,
961
+ 0.884
962
+ ],
963
+ "angle": 0,
964
+ "content": "<sup>5</sup>https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0"
965
+ },
966
+ {
967
+ "type": "page_footnote",
968
+ "bbox": [
969
+ 0.133,
970
+ 0.884,
971
+ 0.498,
972
+ 0.898
973
+ ],
974
+ "angle": 0,
975
+ "content": "\\(^{6}\\)https://civitai.com/models/119229/zavychromaxl"
976
+ },
977
+ {
978
+ "type": "page_footnote",
979
+ "bbox": [
980
+ 0.133,
981
+ 0.898,
982
+ 0.568,
983
+ 0.912
984
+ ],
985
+ "angle": 0,
986
+ "content": "<sup>7</sup>https://civitai.com/models/133005?modelVersionId=240840"
987
+ },
988
+ {
989
+ "type": "list",
990
+ "bbox": [
991
+ 0.133,
992
+ 0.87,
993
+ 0.632,
994
+ 0.912
995
+ ],
996
+ "angle": 0,
997
+ "content": null
998
+ },
999
+ {
1000
+ "type": "page_number",
1001
+ "bbox": [
1002
+ 0.494,
1003
+ 0.936,
1004
+ 0.504,
1005
+ 0.947
1006
+ ],
1007
+ "angle": 0,
1008
+ "content": "4"
1009
+ }
1010
+ ],
1011
+ [
1012
+ {
1013
+ "type": "header",
1014
+ "bbox": [
1015
+ 0.227,
1016
+ 0.044,
1017
+ 0.769,
1018
+ 0.058
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
1022
+ },
1023
+ {
1024
+ "type": "title",
1025
+ "bbox": [
1026
+ 0.113,
1027
+ 0.092,
1028
+ 0.214,
1029
+ 0.107
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "3.4 Pruning"
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "bbox": [
1037
+ 0.111,
1038
+ 0.117,
1039
+ 0.831,
1040
+ 0.133
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "We employ human evaluation of outputs along with heuristics to identify potential attention layers to remove."
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "bbox": [
1048
+ 0.111,
1049
+ 0.138,
1050
+ 0.884,
1051
+ 0.167
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "To create SSD-1B, along with removal of the mid-block's attention layers and the second Residual Network, we remove the following layers of SDXL:"
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.157,
1060
+ 0.177,
1061
+ 0.88,
1062
+ 0.205
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "- 4th, 5th, 7th, 8th, 9th and 10th transformer blocks of all attention layers in the 3rd downsampling stage and the first two attention layers of the first upsampling stage of the U-Net."
1066
+ },
1067
+ {
1068
+ "type": "text",
1069
+ "bbox": [
1070
+ 0.157,
1071
+ 0.21,
1072
+ 0.838,
1073
+ 0.224
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "- The second transformer block of the second and third attention layers of the second upsampling stage."
1077
+ },
1078
+ {
1079
+ "type": "list",
1080
+ "bbox": [
1081
+ 0.157,
1082
+ 0.177,
1083
+ 0.88,
1084
+ 0.224
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": null
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "bbox": [
1092
+ 0.111,
1093
+ 0.235,
1094
+ 0.495,
1095
+ 0.251
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "To create Segmind Vega, we remove the following layers:"
1099
+ },
1100
+ {
1101
+ "type": "text",
1102
+ "bbox": [
1103
+ 0.157,
1104
+ 0.261,
1105
+ 0.881,
1106
+ 0.289
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "- 3rd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the first attention layer of the third downsampling stage and all attention layers in the first upsampling stage of the U-Net."
1110
+ },
1111
+ {
1112
+ "type": "text",
1113
+ "bbox": [
1114
+ 0.157,
1115
+ 0.294,
1116
+ 0.882,
1117
+ 0.321
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "- 2nd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the second attention layer of the third down sampling stage."
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "bbox": [
1125
+ 0.157,
1126
+ 0.327,
1127
+ 0.857,
1128
+ 0.34
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "- The second transformer block of all attention layers of the second downsampling and upsampling stages."
1132
+ },
1133
+ {
1134
+ "type": "list",
1135
+ "bbox": [
1136
+ 0.157,
1137
+ 0.261,
1138
+ 0.882,
1139
+ 0.34
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": null
1143
+ },
1144
+ {
1145
+ "type": "title",
1146
+ "bbox": [
1147
+ 0.111,
1148
+ 0.361,
1149
+ 0.222,
1150
+ 0.379
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "4 Training"
1154
+ },
1155
+ {
1156
+ "type": "text",
1157
+ "bbox": [
1158
+ 0.111,
1159
+ 0.392,
1160
+ 0.884,
1161
+ 0.435
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "In our training methodology, we adopt a distillation-based retraining approach. We use a layer-level loss in an attempt to mimic the features at each stage of the teacher U-Net. This process is crucial for achieving efficient knowledge transfer and preserving the generative quality of SDXL even in significantly compressed models."
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "bbox": [
1169
+ 0.111,
1170
+ 0.44,
1171
+ 0.884,
1172
+ 0.483
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "Our training strategy, inspired by distillation-based retraining, ensures that our compressed models inherit the essential knowledge from the teacher model, enabling them to efficiently mimic the behavior of the original U-Net across various layers, including attention and residual network (ResNet) layers."
1176
+ },
1177
+ {
1178
+ "type": "text",
1179
+ "bbox": [
1180
+ 0.111,
1181
+ 0.488,
1182
+ 0.884,
1183
+ 0.558
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "We trained SSD-1B at fp16 mixed-precision for a total of 251,000 steps with a constant learning rate of 1e-5, using Adam Optimizer (Kingma & Ba, 2017), at \\(1024^{*}1024\\) image resolutions, on four 80GB A100 GPUs at an effective batch size of 32. We trained Vega at fp16 mixed-precision for a total of 540,000 steps with a learning rate of 1e-5, at \\(1024^{*}1024\\) image resolutions, on four 80GB A100 GPUs, at an effective batch size of 128. The datasets used for training and evaluation include GRIT (Peng et al., 2023) and images generated by Midjourney<sup>8</sup>."
1187
+ },
1188
+ {
1189
+ "type": "title",
1190
+ "bbox": [
1191
+ 0.113,
1192
+ 0.577,
1193
+ 0.21,
1194
+ 0.592
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "5 Results"
1198
+ },
1199
+ {
1200
+ "type": "text",
1201
+ "bbox": [
1202
+ 0.111,
1203
+ 0.608,
1204
+ 0.884,
1205
+ 0.651
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": "We present two distilled versions of Stable Diffusion XL, Segmind Stable Diffusion(SSD-1B) and Segmind Vega, which closely mimic the outputs of the base model as shown in the Figure 4, 5, 6, 7, 8 and 9. All images are generated with the DDPM Scheduler, 25 inference steps and Guidance Scale set to 9."
1209
+ },
1210
+ {
1211
+ "type": "text",
1212
+ "bbox": [
1213
+ 0.111,
1214
+ 0.657,
1215
+ 0.884,
1216
+ 0.686
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "We report up to \\(60\\%\\) speedup with SSD-1B and up to \\(100\\%\\) speedup with Segmind-Vega. The detailed metrics taken on an A100 at 25 steps with DDPM Scheduler at guidance scale 9 and batch size 1, are reported in Table 1."
1220
+ },
1221
+ {
1222
+ "type": "table",
1223
+ "bbox": [
1224
+ 0.314,
1225
+ 0.697,
1226
+ 0.681,
1227
+ 0.773
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": "<table><tr><td>Model</td><td>Inference Time (s) (↓)</td><td>Iteration/s (↑)</td></tr><tr><td>SD1.59</td><td>1.699</td><td>16.79</td></tr><tr><td>SDXL</td><td>3.135</td><td>8.80</td></tr><tr><td>SSD-1B</td><td>2.169</td><td>13.37</td></tr><tr><td>Vega</td><td>1.616</td><td>18.95</td></tr></table>"
1231
+ },
1232
+ {
1233
+ "type": "table_caption",
1234
+ "bbox": [
1235
+ 0.361,
1236
+ 0.789,
1237
+ 0.634,
1238
+ 0.804
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "Table 1: Benchmarking inference latency"
1242
+ },
1243
+ {
1244
+ "type": "page_footnote",
1245
+ "bbox": [
1246
+ 0.133,
1247
+ 0.884,
1248
+ 0.432,
1249
+ 0.912
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": "<sup>8</sup>https://www.midjourney.com/ \n<sup>9</sup>Inference Times reported at resolution 768 * 768"
1253
+ },
1254
+ {
1255
+ "type": "page_number",
1256
+ "bbox": [
1257
+ 0.494,
1258
+ 0.936,
1259
+ 0.504,
1260
+ 0.948
1261
+ ],
1262
+ "angle": 0,
1263
+ "content": "5"
1264
+ }
1265
+ ],
1266
+ [
1267
+ {
1268
+ "type": "header",
1269
+ "bbox": [
1270
+ 0.227,
1271
+ 0.043,
1272
+ 0.769,
1273
+ 0.058
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
1277
+ },
1278
+ {
1279
+ "type": "image",
1280
+ "bbox": [
1281
+ 0.115,
1282
+ 0.116,
1283
+ 0.348,
1284
+ 0.263
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": null
1288
+ },
1289
+ {
1290
+ "type": "image_caption",
1291
+ "bbox": [
1292
+ 0.212,
1293
+ 0.268,
1294
+ 0.274,
1295
+ 0.282
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "(a) SDXL"
1299
+ },
1300
+ {
1301
+ "type": "image",
1302
+ "bbox": [
1303
+ 0.37,
1304
+ 0.116,
1305
+ 0.605,
1306
+ 0.264
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": null
1310
+ },
1311
+ {
1312
+ "type": "image_caption",
1313
+ "bbox": [
1314
+ 0.463,
1315
+ 0.268,
1316
+ 0.535,
1317
+ 0.282
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "(b) SSD-1B"
1321
+ },
1322
+ {
1323
+ "type": "image",
1324
+ "bbox": [
1325
+ 0.626,
1326
+ 0.116,
1327
+ 0.861,
1328
+ 0.264
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": null
1332
+ },
1333
+ {
1334
+ "type": "image_caption",
1335
+ "bbox": [
1336
+ 0.729,
1337
+ 0.268,
1338
+ 0.781,
1339
+ 0.282
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "(c) Vega"
1343
+ },
1344
+ {
1345
+ "type": "image_caption",
1346
+ "bbox": [
1347
+ 0.112,
1348
+ 0.289,
1349
+ 0.886,
1350
+ 0.319
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": "Figure 4: \"A royal flaming wolf emerging from a magical big forest, blue flames, front facing, portrait, closeup, dark, bokeh, dawn, god rays, highly detailed, highres, Cinematic, Cinemascope, astonishing, epic, gorgeous, ral-fluff\""
1354
+ },
1355
+ {
1356
+ "type": "image",
1357
+ "bbox": [
1358
+ 0.115,
1359
+ 0.384,
1360
+ 0.348,
1361
+ 0.531
1362
+ ],
1363
+ "angle": 0,
1364
+ "content": null
1365
+ },
1366
+ {
1367
+ "type": "image_caption",
1368
+ "bbox": [
1369
+ 0.212,
1370
+ 0.535,
1371
+ 0.274,
1372
+ 0.548
1373
+ ],
1374
+ "angle": 0,
1375
+ "content": "(a) SDXL"
1376
+ },
1377
+ {
1378
+ "type": "image",
1379
+ "bbox": [
1380
+ 0.37,
1381
+ 0.384,
1382
+ 0.605,
1383
+ 0.531
1384
+ ],
1385
+ "angle": 0,
1386
+ "content": null
1387
+ },
1388
+ {
1389
+ "type": "image_caption",
1390
+ "bbox": [
1391
+ 0.463,
1392
+ 0.535,
1393
+ 0.535,
1394
+ 0.548
1395
+ ],
1396
+ "angle": 0,
1397
+ "content": "(b) SSD-1B"
1398
+ },
1399
+ {
1400
+ "type": "image",
1401
+ "bbox": [
1402
+ 0.626,
1403
+ 0.384,
1404
+ 0.861,
1405
+ 0.531
1406
+ ],
1407
+ "angle": 0,
1408
+ "content": null
1409
+ },
1410
+ {
1411
+ "type": "image_caption",
1412
+ "bbox": [
1413
+ 0.729,
1414
+ 0.535,
1415
+ 0.781,
1416
+ 0.549
1417
+ ],
1418
+ "angle": 0,
1419
+ "content": "(c) Vega"
1420
+ },
1421
+ {
1422
+ "type": "image_caption",
1423
+ "bbox": [
1424
+ 0.112,
1425
+ 0.556,
1426
+ 0.886,
1427
+ 0.613
1428
+ ],
1429
+ "angle": 0,
1430
+ "content": "Figure 5: \"raw photo, close-up, punk band cover, red brick wall, red theme, a brutal man, 40 years old, mohawk, (manly, wide jaw:1.2), leather jacket, red shirt, (vibrant colors:0.9), film grain, bokeh, fashion magazine, hdr, highly detailed photography, (muted colors, cinematic, dim colors, soothing tones:1.2), vibrant, insanely detailed, hyperdetailed, (dark shot:1.2), (vsco:0.3), (intricate details:0.9), (hdr, hyperdetailed:1.2)\""
1431
+ },
1432
+ {
1433
+ "type": "image",
1434
+ "bbox": [
1435
+ 0.115,
1436
+ 0.678,
1437
+ 0.348,
1438
+ 0.826
1439
+ ],
1440
+ "angle": 0,
1441
+ "content": null
1442
+ },
1443
+ {
1444
+ "type": "image_caption",
1445
+ "bbox": [
1446
+ 0.212,
1447
+ 0.83,
1448
+ 0.273,
1449
+ 0.843
1450
+ ],
1451
+ "angle": 0,
1452
+ "content": "(a) SDXL"
1453
+ },
1454
+ {
1455
+ "type": "image",
1456
+ "bbox": [
1457
+ 0.37,
1458
+ 0.678,
1459
+ 0.605,
1460
+ 0.826
1461
+ ],
1462
+ "angle": 0,
1463
+ "content": null
1464
+ },
1465
+ {
1466
+ "type": "image_caption",
1467
+ "bbox": [
1468
+ 0.463,
1469
+ 0.83,
1470
+ 0.534,
1471
+ 0.843
1472
+ ],
1473
+ "angle": 0,
1474
+ "content": "(b) SSD-1B"
1475
+ },
1476
+ {
1477
+ "type": "image",
1478
+ "bbox": [
1479
+ 0.626,
1480
+ 0.678,
1481
+ 0.861,
1482
+ 0.826
1483
+ ],
1484
+ "angle": 0,
1485
+ "content": null
1486
+ },
1487
+ {
1488
+ "type": "image_caption",
1489
+ "bbox": [
1490
+ 0.729,
1491
+ 0.83,
1492
+ 0.781,
1493
+ 0.844
1494
+ ],
1495
+ "angle": 0,
1496
+ "content": "(c) Vega"
1497
+ },
1498
+ {
1499
+ "type": "image_caption",
1500
+ "bbox": [
1501
+ 0.112,
1502
+ 0.851,
1503
+ 0.885,
1504
+ 0.88
1505
+ ],
1506
+ "angle": 0,
1507
+ "content": "Figure 6: \"(best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), pink Cadillac, car, driving through the country, sunset, relaxing vibes. cartoon style, line art, sticker style\""
1508
+ },
1509
+ {
1510
+ "type": "page_number",
1511
+ "bbox": [
1512
+ 0.494,
1513
+ 0.937,
1514
+ 0.504,
1515
+ 0.948
1516
+ ],
1517
+ "angle": 0,
1518
+ "content": "6"
1519
+ }
1520
+ ],
1521
+ [
1522
+ {
1523
+ "type": "header",
1524
+ "bbox": [
1525
+ 0.227,
1526
+ 0.043,
1527
+ 0.769,
1528
+ 0.058
1529
+ ],
1530
+ "angle": 0,
1531
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
1532
+ },
1533
+ {
1534
+ "type": "image",
1535
+ "bbox": [
1536
+ 0.116,
1537
+ 0.117,
1538
+ 0.348,
1539
+ 0.263
1540
+ ],
1541
+ "angle": 0,
1542
+ "content": null
1543
+ },
1544
+ {
1545
+ "type": "image_caption",
1546
+ "bbox": [
1547
+ 0.212,
1548
+ 0.268,
1549
+ 0.274,
1550
+ 0.282
1551
+ ],
1552
+ "angle": 0,
1553
+ "content": "(a) SDXL"
1554
+ },
1555
+ {
1556
+ "type": "image",
1557
+ "bbox": [
1558
+ 0.372,
1559
+ 0.117,
1560
+ 0.603,
1561
+ 0.263
1562
+ ],
1563
+ "angle": 0,
1564
+ "content": null
1565
+ },
1566
+ {
1567
+ "type": "image_caption",
1568
+ "bbox": [
1569
+ 0.463,
1570
+ 0.268,
1571
+ 0.535,
1572
+ 0.282
1573
+ ],
1574
+ "angle": 0,
1575
+ "content": "(b) SSD-1B"
1576
+ },
1577
+ {
1578
+ "type": "image",
1579
+ "bbox": [
1580
+ 0.627,
1581
+ 0.117,
1582
+ 0.859,
1583
+ 0.263
1584
+ ],
1585
+ "angle": 0,
1586
+ "content": null
1587
+ },
1588
+ {
1589
+ "type": "image_caption",
1590
+ "bbox": [
1591
+ 0.729,
1592
+ 0.269,
1593
+ 0.781,
1594
+ 0.282
1595
+ ],
1596
+ "angle": 0,
1597
+ "content": "(c) Vega"
1598
+ },
1599
+ {
1600
+ "type": "image_caption",
1601
+ "bbox": [
1602
+ 0.112,
1603
+ 0.29,
1604
+ 0.884,
1605
+ 0.348
1606
+ ],
1607
+ "angle": 0,
1608
+ "content": "Figure 7: \"Swamp marsh Poison green red Soft watercolors digital watercolors painting illustration masterpiece raining shooting stars twinkling stars glistening stars glittery stars full moon stars full moon intricate motifs perfect composition masterpiece insanely-detailed extreme-detailed hyper-detailed beautiful volumetric deep rich colors volumetric lighting shadows Ray tracing, Mark Brooks and Dan Mumford, comic book art, perfect\""
1609
+ },
1610
+ {
1611
+ "type": "image",
1612
+ "bbox": [
1613
+ 0.116,
1614
+ 0.412,
1615
+ 0.346,
1616
+ 0.557
1617
+ ],
1618
+ "angle": 0,
1619
+ "content": null
1620
+ },
1621
+ {
1622
+ "type": "image_caption",
1623
+ "bbox": [
1624
+ 0.212,
1625
+ 0.563,
1626
+ 0.273,
1627
+ 0.576
1628
+ ],
1629
+ "angle": 0,
1630
+ "content": "(a) SDXL"
1631
+ },
1632
+ {
1633
+ "type": "image",
1634
+ "bbox": [
1635
+ 0.372,
1636
+ 0.412,
1637
+ 0.603,
1638
+ 0.558
1639
+ ],
1640
+ "angle": 0,
1641
+ "content": null
1642
+ },
1643
+ {
1644
+ "type": "image_caption",
1645
+ "bbox": [
1646
+ 0.463,
1647
+ 0.563,
1648
+ 0.534,
1649
+ 0.576
1650
+ ],
1651
+ "angle": 0,
1652
+ "content": "(b) SSD-1B"
1653
+ },
1654
+ {
1655
+ "type": "image",
1656
+ "bbox": [
1657
+ 0.627,
1658
+ 0.412,
1659
+ 0.859,
1660
+ 0.558
1661
+ ],
1662
+ "angle": 0,
1663
+ "content": null
1664
+ },
1665
+ {
1666
+ "type": "image_caption",
1667
+ "bbox": [
1668
+ 0.729,
1669
+ 0.563,
1670
+ 0.781,
1671
+ 0.577
1672
+ ],
1673
+ "angle": 0,
1674
+ "content": "(c) Vega"
1675
+ },
1676
+ {
1677
+ "type": "image_caption",
1678
+ "bbox": [
1679
+ 0.112,
1680
+ 0.584,
1681
+ 0.884,
1682
+ 0.614
1683
+ ],
1684
+ "angle": 0,
1685
+ "content": "Figure 8: (\"best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), magical woods, unexplained lights, fantasy, otherworldly, mist, atmospheric, flowers, plants\""
1686
+ },
1687
+ {
1688
+ "type": "image",
1689
+ "bbox": [
1690
+ 0.116,
1691
+ 0.679,
1692
+ 0.346,
1693
+ 0.825
1694
+ ],
1695
+ "angle": 0,
1696
+ "content": null
1697
+ },
1698
+ {
1699
+ "type": "image_caption",
1700
+ "bbox": [
1701
+ 0.212,
1702
+ 0.83,
1703
+ 0.273,
1704
+ 0.843
1705
+ ],
1706
+ "angle": 0,
1707
+ "content": "(a) SDXL"
1708
+ },
1709
+ {
1710
+ "type": "image",
1711
+ "bbox": [
1712
+ 0.372,
1713
+ 0.679,
1714
+ 0.603,
1715
+ 0.825
1716
+ ],
1717
+ "angle": 0,
1718
+ "content": null
1719
+ },
1720
+ {
1721
+ "type": "image_caption",
1722
+ "bbox": [
1723
+ 0.463,
1724
+ 0.83,
1725
+ 0.534,
1726
+ 0.843
1727
+ ],
1728
+ "angle": 0,
1729
+ "content": "(b) SSD-1B"
1730
+ },
1731
+ {
1732
+ "type": "image",
1733
+ "bbox": [
1734
+ 0.627,
1735
+ 0.679,
1736
+ 0.859,
1737
+ 0.825
1738
+ ],
1739
+ "angle": 0,
1740
+ "content": null
1741
+ },
1742
+ {
1743
+ "type": "image_caption",
1744
+ "bbox": [
1745
+ 0.729,
1746
+ 0.83,
1747
+ 0.781,
1748
+ 0.844
1749
+ ],
1750
+ "angle": 0,
1751
+ "content": "(c) Vega"
1752
+ },
1753
+ {
1754
+ "type": "image_caption",
1755
+ "bbox": [
1756
+ 0.112,
1757
+ 0.851,
1758
+ 0.884,
1759
+ 0.881
1760
+ ],
1761
+ "angle": 0,
1762
+ "content": "Figure 9: \"((fatty cat)) dracula, Victorian style, dracula-inspired, long red-black cloak, fangs, castle, in motion, furry paws, action-packed background, dark theme, glow\""
1763
+ },
1764
+ {
1765
+ "type": "page_number",
1766
+ "bbox": [
1767
+ 0.494,
1768
+ 0.936,
1769
+ 0.504,
1770
+ 0.947
1771
+ ],
1772
+ "angle": 0,
1773
+ "content": "7"
1774
+ }
1775
+ ],
1776
+ [
1777
+ {
1778
+ "type": "header",
1779
+ "bbox": [
1780
+ 0.227,
1781
+ 0.044,
1782
+ 0.769,
1783
+ 0.058
1784
+ ],
1785
+ "angle": 0,
1786
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
1787
+ },
1788
+ {
1789
+ "type": "title",
1790
+ "bbox": [
1791
+ 0.113,
1792
+ 0.092,
1793
+ 0.253,
1794
+ 0.108
1795
+ ],
1796
+ "angle": 0,
1797
+ "content": "5.1 Quality Study"
1798
+ },
1799
+ {
1800
+ "type": "text",
1801
+ "bbox": [
1802
+ 0.111,
1803
+ 0.117,
1804
+ 0.885,
1805
+ 0.175
1806
+ ],
1807
+ "angle": 0,
1808
+ "content": "PlaygroundAI\\(^{10}\\), a generative AI startup, conducted an extensive blind human preference study encompassing 1000 images and involving 1540 unique users to assess the comparative performance of SSD-1B and SDXL. Remarkably, the findings revealed that not only did SSD-1B maintain image quality, but it was also marginally preferred over the larger SDXL model. The comprehensive details of this study are presented in Table 2."
1809
+ },
1810
+ {
1811
+ "type": "table",
1812
+ "bbox": [
1813
+ 0.303,
1814
+ 0.187,
1815
+ 0.692,
1816
+ 0.234
1817
+ ],
1818
+ "angle": 0,
1819
+ "content": "<table><tr><td>Model</td><td>Pairs Won (↑)</td><td>Percentage Pairs Won (↑)</td></tr><tr><td>SSD-1B</td><td>528</td><td>52.8</td></tr><tr><td>SDXL</td><td>472</td><td>47.2</td></tr></table>"
1820
+ },
1821
+ {
1822
+ "type": "table_caption",
1823
+ "bbox": [
1824
+ 0.386,
1825
+ 0.25,
1826
+ 0.61,
1827
+ 0.264
1828
+ ],
1829
+ "angle": 0,
1830
+ "content": "Table 2: Human preference study"
1831
+ },
1832
+ {
1833
+ "type": "text",
1834
+ "bbox": [
1835
+ 0.111,
1836
+ 0.271,
1837
+ 0.885,
1838
+ 0.327
1839
+ ],
1840
+ "angle": 0,
1841
+ "content": "The table illustrates the outcomes of the study, with SSD-1B securing victory in \\(52.8\\%\\) of the image pairs, whereas SDXL, although commendable, trailed slightly with \\(47.2\\%\\). These results not only underscore the noteworthy quality preservation of SSD-1B but also highlight its perceptible preference among the diverse user cohort involved in the blind study."
1842
+ },
1843
+ {
1844
+ "type": "title",
1845
+ "bbox": [
1846
+ 0.113,
1847
+ 0.348,
1848
+ 0.241,
1849
+ 0.364
1850
+ ],
1851
+ "angle": 0,
1852
+ "content": "6 Conclusion"
1853
+ },
1854
+ {
1855
+ "type": "text",
1856
+ "bbox": [
1857
+ 0.111,
1858
+ 0.38,
1859
+ 0.884,
1860
+ 0.435
1861
+ ],
1862
+ "angle": 0,
1863
+ "content": "We show that distillation of large models like SDXL via using knowledge distillation using multiple models as teachers and using feature losses can quickly converge to give similar quality outputs as the base model despite having a significantly smaller student model. Some of its limitations include but are not limited to Text, Hands and Full Body shots."
1864
+ },
1865
+ {
1866
+ "type": "text",
1867
+ "bbox": [
1868
+ 0.111,
1869
+ 0.442,
1870
+ 0.884,
1871
+ 0.486
1872
+ ],
1873
+ "angle": 0,
1874
+ "content": "Our work also highlights the importance of choice of dataset and teacher model as it can tremendously help boost the final model's quality. We show that progressively distilling can reduce total training time significantly. In the future this technique cnn be further explored on other large models such as LLMs, MLMs etc,"
1875
+ },
1876
+ {
1877
+ "type": "title",
1878
+ "bbox": [
1879
+ 0.114,
1880
+ 0.505,
1881
+ 0.21,
1882
+ 0.521
1883
+ ],
1884
+ "angle": 0,
1885
+ "content": "References"
1886
+ },
1887
+ {
1888
+ "type": "ref_text",
1889
+ "bbox": [
1890
+ 0.115,
1891
+ 0.53,
1892
+ 0.848,
1893
+ 0.546
1894
+ ],
1895
+ "angle": 0,
1896
+ "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015."
1897
+ },
1898
+ {
1899
+ "type": "ref_text",
1900
+ "bbox": [
1901
+ 0.115,
1902
+ 0.55,
1903
+ 0.884,
1904
+ 0.579
1905
+ ],
1906
+ "angle": 0,
1907
+ "content": "Bo-Kyeong Kim, Hyoung-Kyu Song, Thibault Castells, and Shinkook Choi. Bk-sdm: A lightweight, fast, and cheap version of stable diffusion, 2023."
1908
+ },
1909
+ {
1910
+ "type": "ref_text",
1911
+ "bbox": [
1912
+ 0.115,
1913
+ 0.584,
1914
+ 0.692,
1915
+ 0.6
1916
+ ],
1917
+ "angle": 0,
1918
+ "content": "Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017."
1919
+ },
1920
+ {
1921
+ "type": "ref_text",
1922
+ "bbox": [
1923
+ 0.116,
1924
+ 0.605,
1925
+ 0.885,
1926
+ 0.634
1927
+ ],
1928
+ "angle": 0,
1929
+ "content": "Yanyu Li, Huan Wang, Qing Jin, Ju Hu, Pavlo Chemerys, Yun Fu, Yanzhi Wang, Sergey Tulyakov, and Jian Ren. \nSnapfusion: Text-to-image diffusion model on mobile devices within two seconds, 2023."
1930
+ },
1931
+ {
1932
+ "type": "ref_text",
1933
+ "bbox": [
1934
+ 0.115,
1935
+ 0.639,
1936
+ 0.885,
1937
+ 0.668
1938
+ ],
1939
+ "angle": 0,
1940
+ "content": "Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao. Latent consistency models: Synthesizing high-resolution images with few-step inference, 2023a."
1941
+ },
1942
+ {
1943
+ "type": "ref_text",
1944
+ "bbox": [
1945
+ 0.115,
1946
+ 0.673,
1947
+ 0.885,
1948
+ 0.702
1949
+ ],
1950
+ "angle": 0,
1951
+ "content": "Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu, Patrick von Platen, Apolinário Passos, Longbo Huang, Jian Li, and Hang Zhao. Lcm-lora: A universal stable-diffusion acceleration module, 2023b."
1952
+ },
1953
+ {
1954
+ "type": "ref_text",
1955
+ "bbox": [
1956
+ 0.115,
1957
+ 0.707,
1958
+ 0.885,
1959
+ 0.736
1960
+ ],
1961
+ "angle": 0,
1962
+ "content": "Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. ArXiv, abs/2306.14824, 2023."
1963
+ },
1964
+ {
1965
+ "type": "ref_text",
1966
+ "bbox": [
1967
+ 0.114,
1968
+ 0.741,
1969
+ 0.885,
1970
+ 0.77
1971
+ ],
1972
+ "angle": 0,
1973
+ "content": "Pablo Pernias, Dominic Rampas, Mats L. Richter, Christopher J. Pal, and Marc Aubreville. Wuerstchen: An efficient architecture for large-scale text-to-image diffusion models, 2023."
1974
+ },
1975
+ {
1976
+ "type": "ref_text",
1977
+ "bbox": [
1978
+ 0.114,
1979
+ 0.775,
1980
+ 0.885,
1981
+ 0.804
1982
+ ],
1983
+ "angle": 0,
1984
+ "content": "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis, 2023."
1985
+ },
1986
+ {
1987
+ "type": "ref_text",
1988
+ "bbox": [
1989
+ 0.114,
1990
+ 0.809,
1991
+ 0.885,
1992
+ 0.838
1993
+ ],
1994
+ "angle": 0,
1995
+ "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation, 2021."
1996
+ },
1997
+ {
1998
+ "type": "ref_text",
1999
+ "bbox": [
2000
+ 0.114,
2001
+ 0.843,
2002
+ 0.885,
2003
+ 0.886
2004
+ ],
2005
+ "angle": 0,
2006
+ "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684-10695, June 2022."
2007
+ },
2008
+ {
2009
+ "type": "list",
2010
+ "bbox": [
2011
+ 0.114,
2012
+ 0.53,
2013
+ 0.885,
2014
+ 0.886
2015
+ ],
2016
+ "angle": 0,
2017
+ "content": null
2018
+ },
2019
+ {
2020
+ "type": "page_footnote",
2021
+ "bbox": [
2022
+ 0.13,
2023
+ 0.897,
2024
+ 0.337,
2025
+ 0.913
2026
+ ],
2027
+ "angle": 0,
2028
+ "content": "10https://playgroundai.com/"
2029
+ },
2030
+ {
2031
+ "type": "page_number",
2032
+ "bbox": [
2033
+ 0.494,
2034
+ 0.936,
2035
+ 0.504,
2036
+ 0.948
2037
+ ],
2038
+ "angle": 0,
2039
+ "content": "8"
2040
+ }
2041
+ ],
2042
+ [
2043
+ {
2044
+ "type": "header",
2045
+ "bbox": [
2046
+ 0.227,
2047
+ 0.044,
2048
+ 0.769,
2049
+ 0.058
2050
+ ],
2051
+ "angle": 0,
2052
+ "content": "Progressive Knowledge Distillation of Stable Diffusion XL using Layer Level Loss"
2053
+ },
2054
+ {
2055
+ "type": "ref_text",
2056
+ "bbox": [
2057
+ 0.115,
2058
+ 0.091,
2059
+ 0.885,
2060
+ 0.147
2061
+ ],
2062
+ "angle": 0,
2063
+ "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding, 2022."
2064
+ },
2065
+ {
2066
+ "type": "ref_text",
2067
+ "bbox": [
2068
+ 0.115,
2069
+ 0.151,
2070
+ 0.885,
2071
+ 0.18
2072
+ ],
2073
+ "angle": 0,
2074
+ "content": "Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter, 2020."
2075
+ },
2076
+ {
2077
+ "type": "ref_text",
2078
+ "bbox": [
2079
+ 0.115,
2080
+ 0.184,
2081
+ 0.885,
2082
+ 0.214
2083
+ ],
2084
+ "angle": 0,
2085
+ "content": "Alex Shonenkov, Misha Konstantinov, Daria Bakshandaeva, Christoph Schuhmann, Ksenia Ivanova, and Nadija Klokova. If by deepfloyd lab at stabilityai, 2023."
2086
+ },
2087
+ {
2088
+ "type": "list",
2089
+ "bbox": [
2090
+ 0.115,
2091
+ 0.091,
2092
+ 0.885,
2093
+ 0.214
2094
+ ],
2095
+ "angle": 0,
2096
+ "content": null
2097
+ },
2098
+ {
2099
+ "type": "page_number",
2100
+ "bbox": [
2101
+ 0.494,
2102
+ 0.936,
2103
+ 0.505,
2104
+ 0.948
2105
+ ],
2106
+ "angle": 0,
2107
+ "content": "9"
2108
+ }
2109
+ ]
2110
+ ]
2401.02xxx/2401.02677/73a719b8-38a5-40a7-9b27-cce232a8c45f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:391d567d93278a1b7b8bf8e03f00e6c915b8eb019e708504d0812b06a214a883
3
+ size 30423437
2401.02xxx/2401.02677/full.md ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PROGRESSIVE KNOWLEDGE DISTILLATION OF Stable Diffusion XL USING LAYER LEVEL LOSS
2
+
3
+ TECHNICAL REPORT
4
+
5
+ Yatharth Gupta*
6
+
7
+ Segmind
8
+
9
+ yatharthg@segmind.com
10
+
11
+ Vishnu V. Jaddipal*
12
+
13
+ Segmind
14
+
15
+ vishnuj@segmind.com
16
+
17
+ Harish Prabhala
18
+
19
+ Segmind
20
+
21
+ harish@segmind.com
22
+
23
+ Sayak Paul
24
+
25
+ Hugging Face
26
+
27
+ sayak@huggingface.co
28
+
29
+ Patrick Von Platen
30
+
31
+ Hugging Face
32
+
33
+ patrick@huggingface.co
34
+
35
+ # ABSTRACT
36
+
37
+ Stable Diffusion XL (SDXL) has become the best open source text-to-image model (T2I) for its versatility and top-notch image quality. Efficiently addressing the computational demands of SDXL models is crucial for wider reach and applicability. In this work, we introduce two scaled-down variants, Segmind Stable Diffusion (SSD-1B) and Segmind-Vega, with 1.3B and 0.74B parameter UNets, respectively, achieved through progressive removal using layer-level losses focusing on reducing the model size while preserving generative quality. We release these models weights at https://hf.co/Segmind.
38
+
39
+ Our methodology involves the elimination of residual networks and transformer blocks from the U-Net structure of SDXL, resulting in significant reductions in parameters, and latency. Our compact models effectively emulate the original SDXL by capitalizing on transferred knowledge, achieving competitive results against larger multi-billion parameter SDXL.
40
+
41
+ Our work underscores the efficacy of knowledge distillation coupled with layer-level losses in reducing model size while preserving the high-quality generative capabilities of SDXL, thus facilitating more accessible deployment in resource-constrained environments.
42
+
43
+ # 1 Introduction
44
+
45
+ Stable Diffusion (Rombach et al., 2022) has emerged as highly influential in the realm of text-to-image (T2I) synthesis, playing a pivotal role as an open-source framework. Its remarkable capabilities has spurred its integration as a backbone in various text-guided vision applications. Stable Diffusion, characterized as T2I-specialized latent diffusion models (LDMs), leverages diffusion operations within a semantically compressed space, enhancing computational efficiency. Central to the architecture of Stable Diffusion is a U-Net that employs iterative sampling to progressively denoise a random latent code. This process is further supported by a text encoder and an image decoder, orchestrating the generation of text-aligned images. SDXL (Podell et al., 2023) is the largest variant with a 2.6B Parameter UNet and two text encoders, providing the best quality among open source models.
46
+
47
+ Notably, distillation techniques have been applied to pretrained diffusion models to curtail the number of denoising steps, resulting in identically structured models with reduced sampling requirements. Additionally, methods such as post-training quantization and implementation optimizations have been explored. The exploration of removing architectural elements in large diffusion models has also been investigated for the base U-Net models (Kim et al., 2023). In this context, our work endeavors to apply knowledge distillation methods to the SDXL model (Podell et al., 2023), resulting in the creation of two streamlined variants, namely Segmind Stable Diffusion (SSD-1B) and Segmind-Vega. We use the base model as well as finetuned versions in the distillation process. These models, with 1.3B and 0.74B
48
+
49
+ parameter UNets respectively, employ layer level losses to progressively reduce the model size to $20\%$ , $40\%$ , $50\%$ , $60\%$ , and ultimately $70\%$ . This reduction in model size aims to strike a balance between computational efficiency and the preservation of generative capabilities, making SDXL more accessible for diverse applications.
50
+
51
+ # 2 Related Work
52
+
53
+ # 2.1 Large Latent Diffusion Models
54
+
55
+ The exploration of diffusion-based generative models has been instrumental in achieving high-fidelity synthesis with broad mode coverage by gradually removing noise from corrupted data. The integration of these models with pretrained language models has notably enhanced the quality of text-to-image (T2I) synthesis. In models such asImagen (Saharia et al., 2022) and Deepfloyd IF (Shonenkov et al., 2023), text-conditional diffusion models generate small images, subsequently upsampled through super-resolution modules. DALL-E (Ramesh et al., 2021) style models, on the other hand, employ a text-conditional prior network to produce an image embedding, transformed via a diffusion decoder and further upscaled into higher resolutions. LDMs perform diffusion modeling in a low-dimensional latent space constructed through a pixel-space autoencoder.
56
+
57
+ # 2.2 Efficient Diffusion Models
58
+
59
+ Efforts to address the slow sampling process in diffusion models have been widespread. Diffusion-tailored distillation progressively transfers knowledge from a pretrained diffusion model to a model with fewer sampling steps while maintaining the same architecture. Latent Consistency Models (Luo et al., 2023a) also allow the models to generate images in very few steps. Combining this with Low Rank Adapters (LoRAs) (Luo et al., 2023b) provides a very easy way of enabling fast generation with large models. Fast high-order solvers for diffusion ordinary differential equations aim to boost sampling speed. In complement to these approaches, our network compression method reduces per-step computation and seamlessly integrates with models employing fewer sampling steps. Leveraging quantization and implementation optimizations designed for SDXL can further enhance the efficiency of our compact models.
60
+
61
+ # 2.3 Distillation-Based Compression
62
+
63
+ Knowledge Distillation (KD) has been successful in improving the performance of small-size models by exploiting output-level and feature-level information from larger source models. While classical KD has found applications in efficient GANs, and Stable Diffusion Base model. Our work demonstrates the extension of distillation pretraining techniques, proven successful in small yet capable general-purpose language models and vision transformers, to SDXL.
64
+
65
+ # 2.3.1 Concurrent Studies
66
+
67
+ Studies such as SnapFusion (Li et al., 2023) achieve an efficient U-Net for Stable Diffusion through architecture evolution and step distillation. Wuerstchen (Pernias et al., 2023) introduces two diffusion processes on low- and high-resolution latent spaces for economic training. While these works are valuable, it is essential to note that they often require significantly larger computational resources than our proposed approach. Additionally, As demonstrated on Stable Diffusion, BK-SDM proposes pruning the UNet via removal of blocks, showcasing promising compression.
68
+
69
+ This work uses the technique of classical architectural compression in achieving smaller and faster diffusion models. The approach involves the removal of multiple transformer layers from the U-Net of SDXL, followed by retraining with feature-level knowledge distillation for general-purpose T2I. The contributions of this study are summarized as follows:
70
+
71
+ - Architectural Compression: We compress SDXL by strategically removing architectural blocks from the U-Net, resulting in a notable reduction in model size (up to $70\%$ ) and increased inference speeds (up to $100\%$ speedup).
72
+ - Feature Distillation: We use feature distillation for training diffusion models, demonstrating its remarkable benefits in achieving competitive T2I performance with significantly fewer resources. The cost-effectiveness of network compression is emphasized, particularly when compared to the substantial expense of training diffusion models from scratch.
73
+ - Downstream benefits: The method, to an extent preserves fidelity of generation with different LoRA and Controlnet networks, thus requiring less training to be used on the distilled model.
74
+
75
+ In summary, this research explores classical architectural compression for SDXL, providing a cost-effective strategy for building compact general-purpose diffusion models with compelling performance.
76
+
77
+ # 3 Methodology
78
+
79
+ In our pursuit of compressing SDXL models, we adopt a nuanced approach that centers on the removal of transformer layers within attention blocks. Our observation reveals a redundancy in numerous blocks, and our strategy involves judicious elimination without compromising the model's generative prowess. We draw inspiration from the architectural compression techniques applied to Stable Diffusion v1.5's $^2$ U-Net and extend the methodology to SDXL, yielding two scaled-down variants: Segmind Stable Diffusion (SSD-1B) and Segmind-Vega.
80
+
81
+ # 3.1 Architecture
82
+
83
+ Our compression strategy is motivated by the recognition that certain layers are dispensable without significantly affecting the model's performance. We leverage insights from various teacher models, including SDXL-base-1.0 and the fine-tuned Zavy Chroma-XL $^{3}$ and Juggernaut-XL $^{4}$ , during the compression process.
84
+
85
+ We report similar findings as BK-SDM (Kim et al., 2023), in that the middle block of the U-Net can be removed without significantly affecting image quality. To add, we observe that removal of only the attention layers and the second residual network (He et al., 2015) block preserves image quality to a higher degree, as opposed to removal of the whole mid-block.
86
+
87
+ ![](images/1d019a5f0399f5089149b9443b2d1fbfc16705d888ad8cf8a06dc452ba6db52c.jpg)
88
+ Figure 1: SDXL U-Net structure
89
+
90
+ ![](images/6d91e8cad379269874e14ae21ec129796abd3d3c0945f92940cb0da083a1319d.jpg)
91
+
92
+ ![](images/3f4c9aa6d87cad14f2f5d8ece3dc0dfa88df8c470e3bec012253a0551d1cf9e5.jpg)
93
+ Figure 2: SSD-1B U-Net structure
94
+
95
+ ![](images/81e599b06fce4ab63093d0146f17691a6d6109f0e9dca899876eef001c8ac47b.jpg)
96
+
97
+ ![](images/ff7a671b9cea95295b3d0700a218550c1a020684ace21132d5ef6f2ced770fbe.jpg)
98
+ Figure 3: Vega U-Net structure
99
+
100
+ ![](images/3829428b791322c0c1adcaf0a5b2fe2a13930039411f467c76cb9fd99f97ffc2.jpg)
101
+
102
+ # 3.2 Loss
103
+
104
+ In contrast to the block-level losses employed in prior work, we introduce layer-level losses specific to each attention and ResNet layer. This refined approach allows for a more granular assessment of the model's internal representations, enabling us to identify and retain essential features while discarding redundant elements. Our choice of layer-level losses is influenced by their efficacy in capturing the nuanced interactions within the model's architecture.
105
+
106
+ # 3.2.1 Input Generation
107
+
108
+ To obtain the input for the U-Net, we employ pretrained text encoders for the to obtain the text embeddings. The latent representations of the image are obtained by the pretrained VAE. Both text encoders and the VAE are kept frozen during training and only the UNet is trained. The latent representation $z$ of an image and its paired text embedding $y$ form the basis for our training process.
109
+
110
+ # 3.2.2 Task Loss
111
+
112
+ We formulate the task loss, denoted as $L_{Task}$ , which is computed through the reverse denoising process. The task loss measures the disparity between the sampled noise $\epsilon$ from the diffusion process and the estimated noise $\epsilon_S(z_t,y,t)$ generated by our compact UNet student. The objective is to align the noise distribution of the student with that of the teacher.
113
+
114
+ $$
115
+ L _ {T a s k} = \mathbb {E} _ {z, \epsilon , y, t, t _ {h}} | | \epsilon - \epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}
116
+ $$
117
+
118
+ # 3.2.3 Output-Level Knowledge Distillation (KD)
119
+
120
+ The compact student is trained to imitate the outputs of the original U-Net teacher, denoted as $\epsilon_{T}$ , using an output-level KD objective. This objective ensures that the overall output distribution of the student aligns with that of the teacher.
121
+
122
+ $$
123
+ L _ {O u t K D} = \mathbb {E} _ {z, \epsilon , y, t, t _ {h}} | | \epsilon_ {T} - \epsilon_ {S} (z _ {t}, y, t) | | _ {2} ^ {2}
124
+ $$
125
+
126
+ # 3.2.4 Feature-Level Knowledge Distillation (KD)
127
+
128
+ A pivotal component of our approach is feature-level KD, providing rich guidance for the student's training. The feature-level KD objective, denoted as $L_{FeatKD}$ , measures the difference between the feature maps of corresponding layers in both the teacher and student models. Importantly, our approach eliminates the need for additional regressors by ensuring that the dimensionality of feature maps already matches at the end of each layer in both models.
129
+
130
+ $$
131
+ L _ {F e a t K D} = \mathbb {E} _ {h, X _ {l}} | | f _ {l} ^ {T} (z _ {t}, y, t) - f _ {l} ^ {S} (z _ {t}, y, t) | | _ {2} ^ {2}
132
+ $$
133
+
134
+ # 3.2.5 Overall Objective
135
+
136
+ The final objective encompasses the task loss, output-level KD, and feature-level KD, weighted by coefficients $\lambda_{OutKD}$ and $\lambda_{FeatKD}$ . Without loss-weight tuning, our approach demonstrates effectiveness in empirical validation.
137
+
138
+ $$
139
+ \mathrm {L} = L _ {\text {T a s k}} + \lambda_ {\text {O u t K D}} * L _ {\text {O u t K D}} + \lambda_ {\text {F e a t K D}} * L _ {\text {F e a t K D}}
140
+ $$
141
+
142
+ Another advantage of this method of distillation is that LoRA weights created for the parent model tend to produce close results without retraining. This may reduce the number of training steps required to migrate models.
143
+
144
+ To expound on our compression strategy, we consider the analogy to DistilBERT (Sanh et al., 2020), which reduces the number of layers while initializing the compact model with original weights. Our compression methodology involves targeted removal strategies in both down and up stages.
145
+
146
+ # 3.3 Teacher Models
147
+
148
+ We initially take SDXL Base $^{5}$ as the teacher, but later swap it for a finetuned model, ZavychromaXL $^{6}$ and finally use JuggernautXL $^{7}$ . We find that swapping the teacher boosts the quality significantly even if the same dataset is used again. This showcases that using multiple expert models can aid in instilling new concepts as well as improving quality of the student.
149
+
150
+ Our compression methodology, inspired by proven techniques (Kim et al., 2023), not only reduces model size but also ensures that essential features are retained through the careful removal of redundant blocks. The introduction of layer-level losses further refines this process, contributing to the overall efficiency and efficacy of our compressed models—SSD-1B and Segmind-Vega.
151
+
152
+ # 3.4 Pruning
153
+
154
+ We employ human evaluation of outputs along with heuristics to identify potential attention layers to remove.
155
+
156
+ To create SSD-1B, along with removal of the mid-block's attention layers and the second Residual Network, we remove the following layers of SDXL:
157
+
158
+ - 4th, 5th, 7th, 8th, 9th and 10th transformer blocks of all attention layers in the 3rd downsampling stage and the first two attention layers of the first upsampling stage of the U-Net.
159
+ - The second transformer block of the second and third attention layers of the second upsampling stage.
160
+
161
+ To create Segmind Vega, we remove the following layers:
162
+
163
+ - 3rd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the first attention layer of the third downsampling stage and all attention layers in the first upsampling stage of the U-Net.
164
+ - 2nd, 4th, 5th, 6th, 7th, 8th, 9th and 10th transformer blocks of the second attention layer of the third down sampling stage.
165
+ - The second transformer block of all attention layers of the second downsampling and upsampling stages.
166
+
167
+ # 4 Training
168
+
169
+ In our training methodology, we adopt a distillation-based retraining approach. We use a layer-level loss in an attempt to mimic the features at each stage of the teacher U-Net. This process is crucial for achieving efficient knowledge transfer and preserving the generative quality of SDXL even in significantly compressed models.
170
+
171
+ Our training strategy, inspired by distillation-based retraining, ensures that our compressed models inherit the essential knowledge from the teacher model, enabling them to efficiently mimic the behavior of the original U-Net across various layers, including attention and residual network (ResNet) layers.
172
+
173
+ We trained SSD-1B at fp16 mixed-precision for a total of 251,000 steps with a constant learning rate of 1e-5, using Adam Optimizer (Kingma & Ba, 2017), at $1024^{*}1024$ image resolutions, on four 80GB A100 GPUs at an effective batch size of 32. We trained Vega at fp16 mixed-precision for a total of 540,000 steps with a learning rate of 1e-5, at $1024^{*}1024$ image resolutions, on four 80GB A100 GPUs, at an effective batch size of 128. The datasets used for training and evaluation include GRIT (Peng et al., 2023) and images generated by Midjourney<sup>8</sup>.
174
+
175
+ # 5 Results
176
+
177
+ We present two distilled versions of Stable Diffusion XL, Segmind Stable Diffusion(SSD-1B) and Segmind Vega, which closely mimic the outputs of the base model as shown in the Figure 4, 5, 6, 7, 8 and 9. All images are generated with the DDPM Scheduler, 25 inference steps and Guidance Scale set to 9.
178
+
179
+ We report up to $60\%$ speedup with SSD-1B and up to $100\%$ speedup with Segmind-Vega. The detailed metrics taken on an A100 at 25 steps with DDPM Scheduler at guidance scale 9 and batch size 1, are reported in Table 1.
180
+
181
+ <table><tr><td>Model</td><td>Inference Time (s) (↓)</td><td>Iteration/s (↑)</td></tr><tr><td>SD1.59</td><td>1.699</td><td>16.79</td></tr><tr><td>SDXL</td><td>3.135</td><td>8.80</td></tr><tr><td>SSD-1B</td><td>2.169</td><td>13.37</td></tr><tr><td>Vega</td><td>1.616</td><td>18.95</td></tr></table>
182
+
183
+ Table 1: Benchmarking inference latency
184
+
185
+ ![](images/07a59664e8d6b0cfca1ff3eb496f8baa5ed2886be03d7fa1841075459e4cc863.jpg)
186
+ (a) SDXL
187
+
188
+ ![](images/a0cd48cb3c3742c5ee15e9acf36f7fee1deb7d1d816ae89c8963403abdf16ddb.jpg)
189
+ (b) SSD-1B
190
+ Figure 4: "A royal flaming wolf emerging from a magical big forest, blue flames, front facing, portrait, closeup, dark, bokeh, dawn, god rays, highly detailed, highres, Cinematic, Cinemascope, astonishing, epic, gorgeous, ral-fluff"
191
+
192
+ ![](images/b6020be88cb29f929393888097eb1c0e539d0d55ce61c54c68638e3b61b6427b.jpg)
193
+ (c) Vega
194
+
195
+ ![](images/147d5f50c62fd6cdd292daf3c7ca3dcafeb294b7c2cc4248ce0363ae3b90162d.jpg)
196
+ (a) SDXL
197
+ Figure 5: "raw photo, close-up, punk band cover, red brick wall, red theme, a brutal man, 40 years old, mohawk, (manly, wide jaw:1.2), leather jacket, red shirt, (vibrant colors:0.9), film grain, bokeh, fashion magazine, hdr, highly detailed photography, (muted colors, cinematic, dim colors, soothing tones:1.2), vibrant, insanely detailed, hyperdetailed, (dark shot:1.2), (vsco:0.3), (intricate details:0.9), (hdr, hyperdetailed:1.2)"
198
+
199
+ ![](images/cda4fe33f6fe5abbcdb226295606e3231f166fb7569c7f1fddba8e6a2246be79.jpg)
200
+ (b) SSD-1B
201
+
202
+ ![](images/05e3a954da7505caa497e085efb656241ed3a19dbb0b7b6fdec6b067beb84d19.jpg)
203
+ (c) Vega
204
+
205
+ ![](images/5131f9726bb6909778cc437ed16f5972dae906c70a81ab5434722a010ebccd76.jpg)
206
+ (a) SDXL
207
+ Figure 6: "(best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), pink Cadillac, car, driving through the country, sunset, relaxing vibes. cartoon style, line art, sticker style"
208
+
209
+ ![](images/48cd02cecf92136ee610268891a2972cfab524a0e9d18d8271bf4e4542353d2e.jpg)
210
+ (b) SSD-1B
211
+
212
+ ![](images/d09a20c171fea95b894670f3da797b0968a2e85fdec86eca1c55f41eaab50c09.jpg)
213
+ (c) Vega
214
+
215
+ ![](images/ad79135e13607d7737cba03e508c5214c5138bfabc9cb78b1434e77f3c23b7cd.jpg)
216
+ (a) SDXL
217
+
218
+ ![](images/594450319e3beb0c8f4868d752c288b8004f4151d3c5c261e712f6c94c080cce.jpg)
219
+ (b) SSD-1B
220
+ Figure 7: "Swamp marsh Poison green red Soft watercolors digital watercolors painting illustration masterpiece raining shooting stars twinkling stars glistening stars glittery stars full moon stars full moon intricate motifs perfect composition masterpiece insanely-detailed extreme-detailed hyper-detailed beautiful volumetric deep rich colors volumetric lighting shadows Ray tracing, Mark Brooks and Dan Mumford, comic book art, perfect"
221
+
222
+ ![](images/1c26eca1e033343b135988c79571bb7f87627f70b7d1bc3f6e46a47f206c00a6.jpg)
223
+ (c) Vega
224
+
225
+ ![](images/ed7d848c764093464e63fe5bb933c825a97d58de0317ff8ebda9eeda62d0a37a.jpg)
226
+ (a) SDXL
227
+ Figure 8: ("best quality:1.5), (intricate emotional details:1.5), (sharpen details), (ultra detailed), (cinematic lighting), magical woods, unexplained lights, fantasy, otherworldly, mist, atmospheric, flowers, plants"
228
+
229
+ ![](images/1c434151cfd430ea2fa14c658bfc05cd6cca2e4303286be9af7563b21c143ac4.jpg)
230
+ (b) SSD-1B
231
+
232
+ ![](images/97cae3e8af3bd8774be0b5216f5b28cdd44f4126cbe291b4ae81fb32bf157158.jpg)
233
+ (c) Vega
234
+
235
+ ![](images/1d1f458071946d6d3abea83cd29ad5bd0ab6474749c631d74286b8f1f0ca2aa0.jpg)
236
+ (a) SDXL
237
+ Figure 9: "((fatty cat)) dracula, Victorian style, dracula-inspired, long red-black cloak, fangs, castle, in motion, furry paws, action-packed background, dark theme, glow"
238
+
239
+ ![](images/944af38ab0d81c636c16806e7cbfb2375fa68af3536c08acd83136f4fb514ed8.jpg)
240
+ (b) SSD-1B
241
+
242
+ ![](images/e711b5be98f41bfeb23349cdf7b803ff0c419b10e97acdf7e9e2c11f6976d5b3.jpg)
243
+ (c) Vega
244
+
245
+ # 5.1 Quality Study
246
+
247
+ PlaygroundAI $^{10}$ , a generative AI startup, conducted an extensive blind human preference study encompassing 1000 images and involving 1540 unique users to assess the comparative performance of SSD-1B and SDXL. Remarkably, the findings revealed that not only did SSD-1B maintain image quality, but it was also marginally preferred over the larger SDXL model. The comprehensive details of this study are presented in Table 2.
248
+
249
+ <table><tr><td>Model</td><td>Pairs Won (↑)</td><td>Percentage Pairs Won (↑)</td></tr><tr><td>SSD-1B</td><td>528</td><td>52.8</td></tr><tr><td>SDXL</td><td>472</td><td>47.2</td></tr></table>
250
+
251
+ Table 2: Human preference study
252
+
253
+ The table illustrates the outcomes of the study, with SSD-1B securing victory in $52.8\%$ of the image pairs, whereas SDXL, although commendable, trailed slightly with $47.2\%$ . These results not only underscore the noteworthy quality preservation of SSD-1B but also highlight its perceptible preference among the diverse user cohort involved in the blind study.
254
+
255
+ # 6 Conclusion
256
+
257
+ We show that distillation of large models like SDXL via using knowledge distillation using multiple models as teachers and using feature losses can quickly converge to give similar quality outputs as the base model despite having a significantly smaller student model. Some of its limitations include but are not limited to Text, Hands and Full Body shots.
258
+
259
+ Our work also highlights the importance of choice of dataset and teacher model as it can tremendously help boost the final model's quality. We show that progressively distilling can reduce total training time significantly. In the future this technique cnn be further explored on other large models such as LLMs, MLMs etc,
260
+
261
+ # References
262
+
263
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015.
264
+ Bo-Kyeong Kim, Hyoung-Kyu Song, Thibault Castells, and Shinkook Choi. Bk-sdm: A lightweight, fast, and cheap version of stable diffusion, 2023.
265
+ Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization, 2017.
266
+ Yanyu Li, Huan Wang, Qing Jin, Ju Hu, Pavlo Chemerys, Yun Fu, Yanzhi Wang, Sergey Tulyakov, and Jian Ren.
267
+ Snapfusion: Text-to-image diffusion model on mobile devices within two seconds, 2023.
268
+ Simian Luo, Yiqin Tan, Longbo Huang, Jian Li, and Hang Zhao. Latent consistency models: Synthesizing high-resolution images with few-step inference, 2023a.
269
+ Simian Luo, Yiqin Tan, Suraj Patil, Daniel Gu, Patrick von Platen, Apolinário Passos, Longbo Huang, Jian Li, and Hang Zhao. Lcm-lora: A universal stable-diffusion acceleration module, 2023b.
270
+ Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. Kosmos-2: Grounding multimodal large language models to the world. ArXiv, abs/2306.14824, 2023.
271
+ Pablo Pernias, Dominic Rampas, Mats L. Richter, Christopher J. Pal, and Marc Aubreville. Wuerstchen: An efficient architecture for large-scale text-to-image diffusion models, 2023.
272
+ Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis, 2023.
273
+ Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation, 2021.
274
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684-10695, June 2022.
275
+
276
+ Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding, 2022.
277
+ Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter, 2020.
278
+ Alex Shonenkov, Misha Konstantinov, Daria Bakshandaeva, Christoph Schuhmann, Ksenia Ivanova, and Nadija Klokova. If by deepfloyd lab at stabilityai, 2023.
2401.02xxx/2401.02677/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ebd73bcef1ac84efef852f97ca0a94c99e6723f2353c43f0a8de110d79e5c7e
3
+ size 529577
2401.02xxx/2401.02677/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.02xxx/2401.02695/0b4460a7-9d5c-4b40-9964-41bc630a0668_content_list.json ADDED
The diff for this file is too large to render. See raw diff