SlowGuess commited on
Commit
2a01e64
·
verified ·
1 Parent(s): 888a130

Add Batch 84bfb9ea-0d81-4dd7-ad4a-aadfdbb139b3

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_content_list.json +0 -0
  3. 2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_model.json +0 -0
  4. 2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_origin.pdf +3 -0
  5. 2502.16xxx/2502.16652/full.md +582 -0
  6. 2502.16xxx/2502.16652/images.zip +3 -0
  7. 2502.16xxx/2502.16652/layout.json +0 -0
  8. 2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_content_list.json +0 -0
  9. 2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_model.json +0 -0
  10. 2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_origin.pdf +3 -0
  11. 2502.16xxx/2502.16681/full.md +0 -0
  12. 2502.16xxx/2502.16681/images.zip +3 -0
  13. 2502.16xxx/2502.16681/layout.json +0 -0
  14. 2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_content_list.json +0 -0
  15. 2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_model.json +0 -0
  16. 2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_origin.pdf +3 -0
  17. 2502.16xxx/2502.16707/full.md +915 -0
  18. 2502.16xxx/2502.16707/images.zip +3 -0
  19. 2502.16xxx/2502.16707/layout.json +0 -0
  20. 2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_content_list.json +0 -0
  21. 2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_model.json +0 -0
  22. 2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_origin.pdf +3 -0
  23. 2502.16xxx/2502.16761/full.md +0 -0
  24. 2502.16xxx/2502.16761/images.zip +3 -0
  25. 2502.16xxx/2502.16761/layout.json +0 -0
  26. 2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_content_list.json +0 -0
  27. 2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_model.json +0 -0
  28. 2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_origin.pdf +3 -0
  29. 2502.16xxx/2502.16804/full.md +404 -0
  30. 2502.16xxx/2502.16804/images.zip +3 -0
  31. 2502.16xxx/2502.16804/layout.json +0 -0
  32. 2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_content_list.json +436 -0
  33. 2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_model.json +454 -0
  34. 2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_origin.pdf +3 -0
  35. 2502.16xxx/2502.16848/full.md +76 -0
  36. 2502.16xxx/2502.16848/images.zip +3 -0
  37. 2502.16xxx/2502.16848/layout.json +1842 -0
  38. 2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_content_list.json +888 -0
  39. 2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_model.json +1182 -0
  40. 2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_origin.pdf +3 -0
  41. 2502.16xxx/2502.16866/full.md +158 -0
  42. 2502.16xxx/2502.16866/images.zip +3 -0
  43. 2502.16xxx/2502.16866/layout.json +0 -0
  44. 2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_content_list.json +0 -0
  45. 2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_model.json +0 -0
  46. 2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_origin.pdf +3 -0
  47. 2502.16xxx/2502.16923/full.md +0 -0
  48. 2502.16xxx/2502.16923/images.zip +3 -0
  49. 2502.16xxx/2502.16923/layout.json +0 -0
  50. 2502.16xxx/2502.16932/d7ef2337-f096-458d-8785-a2073415facb_content_list.json +0 -0
.gitattributes CHANGED
@@ -4208,3 +4208,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
4208
  2503.02xxx/2503.02891/f74559d8-8ed9-4f5e-809b-4cc8a008ba0a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4209
  2503.04xxx/2503.04783/29da5167-82fb-4920-85ac-b65a6c1862eb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4210
  2503.05xxx/2503.05777/33eb3aef-2105-4565-86f6-3440f369b359_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4208
  2503.02xxx/2503.02891/f74559d8-8ed9-4f5e-809b-4cc8a008ba0a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4209
  2503.04xxx/2503.04783/29da5167-82fb-4920-85ac-b65a6c1862eb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4210
  2503.05xxx/2503.05777/33eb3aef-2105-4565-86f6-3440f369b359_origin.pdf filter=lfs diff=lfs merge=lfs -text
4211
+ 2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_origin.pdf filter=lfs diff=lfs merge=lfs -text
4212
+ 2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_origin.pdf filter=lfs diff=lfs merge=lfs -text
4213
+ 2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_origin.pdf filter=lfs diff=lfs merge=lfs -text
4214
+ 2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4215
+ 2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_origin.pdf filter=lfs diff=lfs merge=lfs -text
4216
+ 2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_origin.pdf filter=lfs diff=lfs merge=lfs -text
4217
+ 2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4218
+ 2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_origin.pdf filter=lfs diff=lfs merge=lfs -text
4219
+ 2502.16xxx/2502.16932/d7ef2337-f096-458d-8785-a2073415facb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4220
+ 2502.16xxx/2502.16982/e009a105-3d58-498a-a49d-98de9c8a6813_origin.pdf filter=lfs diff=lfs merge=lfs -text
4221
+ 2502.16xxx/2502.16983/2ddf626d-5012-447b-ab9f-0aec88dd2459_origin.pdf filter=lfs diff=lfs merge=lfs -text
4222
+ 2502.17xxx/2502.17019/be354b0a-fd88-4421-bb44-024d28ec96a0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4223
+ 2502.17xxx/2502.17041/331d0cf6-9e80-4289-9704-1cbfa80db09a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4224
+ 2502.17xxx/2502.17125/2eff8f3d-9e2b-4fb2-89e0-dea5f2007fed_origin.pdf filter=lfs diff=lfs merge=lfs -text
4225
+ 2502.17xxx/2502.17157/6ce82203-8c1e-4150-b225-1c8f1b4d3fe0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4226
+ 2502.17xxx/2502.17237/4e6c77c1-358e-41d5-8d29-df919fd87c32_origin.pdf filter=lfs diff=lfs merge=lfs -text
4227
+ 2502.17xxx/2502.17239/9d4eb8d4-febe-4ce3-aca7-7beb9be3952b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4228
+ 2502.17xxx/2502.17248/caddf4fe-8640-46f6-b3c2-80d41f4e1d90_origin.pdf filter=lfs diff=lfs merge=lfs -text
4229
+ 2502.17xxx/2502.17258/99bc752b-22c7-4f12-bea3-7a9e1bccf803_origin.pdf filter=lfs diff=lfs merge=lfs -text
4230
+ 2502.17xxx/2502.17288/56f3ad02-d85b-47ca-bcb3-4f887f76d1b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
4231
+ 2502.17xxx/2502.17298/9adaae46-d9b6-462c-8909-4cfae0e19216_origin.pdf filter=lfs diff=lfs merge=lfs -text
4232
+ 2502.17xxx/2502.17363/a94407c8-bea8-45a8-88e4-7cbd9218d7ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
4233
+ 2502.17xxx/2502.17387/7642b4f6-9089-4597-a4bc-861b22ab0849_origin.pdf filter=lfs diff=lfs merge=lfs -text
4234
+ 2502.17xxx/2502.17407/006f9e4d-cf6d-4d23-afbe-f316c9d5ee3e_origin.pdf filter=lfs diff=lfs merge=lfs -text
4235
+ 2502.17xxx/2502.17410/464f5c60-5831-4e8a-9170-e9c644f78ef2_origin.pdf filter=lfs diff=lfs merge=lfs -text
4236
+ 2502.17xxx/2502.17416/9e715c87-e42f-418e-9a34-20e3cb729c66_origin.pdf filter=lfs diff=lfs merge=lfs -text
4237
+ 2502.17xxx/2502.17419/09f2e9dc-93c2-44f3-ad16-db098847f3d2_origin.pdf filter=lfs diff=lfs merge=lfs -text
4238
+ 2502.17xxx/2502.17420/ece7c71b-ecd2-4d05-8c15-a3100621e0e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4239
+ 2502.17xxx/2502.17422/8a0b0e7d-0c0b-47a4-98c9-4c9e5af2b3de_origin.pdf filter=lfs diff=lfs merge=lfs -text
4240
+ 2502.17xxx/2502.17424/be0fd235-8cce-43a2-b0ae-f8ae6cace4cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4241
+ 2502.17xxx/2502.17432/3d934424-6e15-4b8a-83ea-11396da40f20_origin.pdf filter=lfs diff=lfs merge=lfs -text
4242
+ 2502.17xxx/2502.17437/484caff3-1b70-452a-a80a-55dbabf36df7_origin.pdf filter=lfs diff=lfs merge=lfs -text
4243
+ 2502.17xxx/2502.17578/3ae57f84-b163-401a-ac5e-d7a3468662e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4244
+ 2502.17xxx/2502.17599/952a9239-36bc-411d-9fa2-0ac8af629afa_origin.pdf filter=lfs diff=lfs merge=lfs -text
4245
+ 2502.17xxx/2502.17764/c62c35b2-74b6-45f5-912d-0b903652e10c_origin.pdf filter=lfs diff=lfs merge=lfs -text
4246
+ 2502.17xxx/2502.17814/479765c0-7c07-4912-af28-7670f2fac17b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4247
+ 2502.17xxx/2502.17898/49f80135-a940-4f17-b9aa-178b7e58021f_origin.pdf filter=lfs diff=lfs merge=lfs -text
4248
+ 2502.17xxx/2502.17905/2d54889e-789d-4e96-86b1-169e76c82264_origin.pdf filter=lfs diff=lfs merge=lfs -text
4249
+ 2502.17xxx/2502.17947/664f27db-3ea7-421f-b897-bad05e284081_origin.pdf filter=lfs diff=lfs merge=lfs -text
4250
+ 2502.18xxx/2502.18001/18e2a553-8a6b-4fcb-a39a-b9f17f9af9d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
4251
+ 2502.18xxx/2502.18008/c1569526-3cba-448f-9d1c-703c28c3a7d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
4252
+ 2502.18xxx/2502.18017/f1220509-bc2f-4ad2-bc01-68246719b5a0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4253
+ 2502.18xxx/2502.18036/51bbf89d-0f71-4aab-958c-14d3bf0cb4a3_origin.pdf filter=lfs diff=lfs merge=lfs -text
4254
+ 2502.18xxx/2502.18041/7e9cfa0c-d246-48cf-bea0-aadf3505939f_origin.pdf filter=lfs diff=lfs merge=lfs -text
4255
+ 2502.18xxx/2502.18042/417adc71-6a10-43bf-9c17-f55b9a86ad4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4256
+ 2502.18xxx/2502.18064/2a877696-875a-4f6f-9c52-dae6affc33ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
4257
+ 2502.18xxx/2502.18080/e28f94c0-4afc-45ff-ac74-7a1eb8577967_origin.pdf filter=lfs diff=lfs merge=lfs -text
4258
+ 2502.18xxx/2502.18120/731e1047-b0e6-41d1-9cd1-16bd48a8bac9_origin.pdf filter=lfs diff=lfs merge=lfs -text
4259
+ 2502.18xxx/2502.18137/7708c3e9-1c83-49a0-b8e2-eeea141ef316_origin.pdf filter=lfs diff=lfs merge=lfs -text
4260
+ 2502.18xxx/2502.18297/8f8a8d93-b0bd-49c0-a09c-41d9581b5dfd_origin.pdf filter=lfs diff=lfs merge=lfs -text
4261
+ 2502.18xxx/2502.18357/0849530e-b3d6-40e0-a268-76ac41ca694f_origin.pdf filter=lfs diff=lfs merge=lfs -text
4262
+ 2502.18xxx/2502.18364/ff2b4faa-b615-4bc3-a98e-da7c7caee41a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4263
+ 2502.18xxx/2502.18411/956ffde5-4c47-4c0d-bc6b-15a3a1dcb372_origin.pdf filter=lfs diff=lfs merge=lfs -text
4264
+ 2502.18xxx/2502.18418/3b425ff6-bde2-4f46-a51c-3b1ba27d59f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4265
+ 2502.18xxx/2502.18439/fd6647ca-98ab-410d-9b45-64928821beb4_origin.pdf filter=lfs diff=lfs merge=lfs -text
4266
+ 2502.18xxx/2502.18443/c49b4b94-bb3b-4ae8-8f40-678f9b8dcb60_origin.pdf filter=lfs diff=lfs merge=lfs -text
4267
+ 2502.18xxx/2502.18449/bf10b139-4403-4125-b0da-04807e245608_origin.pdf filter=lfs diff=lfs merge=lfs -text
4268
+ 2502.18xxx/2502.18460/78bb7b61-75c5-4fe8-8ef2-2a417ef5391c_origin.pdf filter=lfs diff=lfs merge=lfs -text
4269
+ 2502.18xxx/2502.18461/b8a8c791-431f-4083-baea-21c2c0418e58_origin.pdf filter=lfs diff=lfs merge=lfs -text
4270
+ 2502.18xxx/2502.18462/1383969c-2b01-4742-b835-690a5e4d0f20_origin.pdf filter=lfs diff=lfs merge=lfs -text
4271
+ 2502.18xxx/2502.18535/2f4cb6fa-9e84-45e7-a42b-41b28f280538_origin.pdf filter=lfs diff=lfs merge=lfs -text
4272
+ 2502.18xxx/2502.18581/b6157360-ac49-48ba-bd4c-a755faeda835_origin.pdf filter=lfs diff=lfs merge=lfs -text
4273
+ 2503.00xxx/2503.00031/3f83011d-5d80-43cf-a47b-33f937eb2d19_origin.pdf filter=lfs diff=lfs merge=lfs -text
4274
+ 2503.05xxx/2503.05770/e80b1d4c-c12c-4baa-a8ef-a2846d182ab9_origin.pdf filter=lfs diff=lfs merge=lfs -text
2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16652/a5e2dafa-f2d0-4050-903a-5789d6c41270_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28aceda1f6c6dc51d6d345f14a171b67868a80c04826a4a8c21de444cf93e3de
3
+ size 10356819
2502.16xxx/2502.16652/full.md ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dr. Splat: Directly Referring 3D Gaussian Splatting via Direct Language Embedding Registration
2
+
3
+ Kim Jun-Seong<sup>1</sup>
4
+
5
+ GeonU Kim
6
+
7
+ Jaesung Choe $^{2*}$
8
+
9
+ Kim Yu-Ji
10
+
11
+ Yu-Chiang Frank Wang $^{2}$
12
+
13
+ Tae-Hyun Oh $^{3*}$
14
+
15
+ $^{1}$ POSTECH $^{2}$ NVIDIA $^{3}$ KAIST
16
+
17
+ # Abstract
18
+
19
+ We introduce Dr. Splat, a novel approach for open-vocabulary 3D scene understanding leveraging 3D Gaussian Splatting. Unlike existing language-embedded 3DGS methods, which rely on a rendering process, our method directly associates language-aligned CLIP embeddings with 3D Gaussians for holistic 3D scene understanding. The key of our method is a language feature registration technique where CLIP embeddings are assigned to the dominant Gaussians intersected by each pixel-ray. Moreover, we integrate Product Quantization (PQ) trained on general large-scale image data to compactly represent embeddings without per-scene optimization. Experiments demonstrate that our approach significantly outperforms existing approaches in 3D perception benchmarks, such as open-vocabulary 3D semantic segmentation, 3D object localization, and 3D object selection tasks. For video results, please visit: https://drsplat.github.io/
20
+
21
+ # 1. Introduction
22
+
23
+ Open-vocabulary 3D scene understanding represents a significant challenge in the field of computer vision, with applications spanning autonomous navigation, robotics, and augmented reality. This approach aims to enable the interpretation and referencing of 3D spatial information through natural language, allowing for applicability beyond a restricted set of predefined categories [2, 3, 28, 29, 33, 38, 43]. Previously, open-vocabulary 3D scene understanding has been explored using point-cloud-based methods [12, 16, 18, 27, 30, 36, 40]. Recently, the 3D Gaussian Splatting (3DGS) [17] has introduced a continuous representation integrated on explicit 3D Gaussians, which differs from traditional point-cloud approaches, enabling rapid progress in practical applications [44]. Current research has begun to explore methods for associating language-based features with 3D Gaussian splats to enhance scene understanding capabilities.
24
+
25
+ ![](images/2669d3ab4180b75affa323c4dfbfca00d37d4817929dca600a9881b9af85d127.jpg)
26
+ Figure 1. Comparison of 2D (left) vs. our direct 3D search (right) for open-vocabulary 3D scene understanding. The 2D approach relies on multiview rendering, incurring high computational costs. Our method directly links language features to 3D Gaussians, enabling efficient and complete spatial coverage. The table highlights Dr. Splat's superior efficiency over related methods.
27
+
28
+ <table><tr><td></td><td>Search domain</td><td>Per-scene opt.</td><td>Feature distill.</td><td>Search</td><td>DB size</td></tr><tr><td>LERF [18]</td><td>2D</td><td>required</td><td>~24h</td><td>slow</td><td>large</td></tr><tr><td>LangSplat [30]</td><td>2D</td><td>required</td><td>~4h</td><td>slow</td><td>large</td></tr><tr><td>LEGaussians [35]</td><td>2D</td><td>required</td><td>~4h</td><td>slow</td><td>large</td></tr><tr><td>OpenGaussian [39]</td><td>3D</td><td>required</td><td>~1h</td><td>fast</td><td>small</td></tr><tr><td>Dr. Splat (Ours)</td><td>3D</td><td>none</td><td>~10m</td><td>fast</td><td>small</td></tr></table>
29
+
30
+ Several recent approaches [30, 35, 46] introduce 3D Gaussian representation [17] into the open-vocabulary scene understanding. This unique representation uses 3D Gaussians to achieve high-quality scene rendering, offering a more structured representation that addresses some limitations of point clouds. Building on this, these methods employ 2D vision-language models to transfer language knowledge to 3D Gaussians "via rendered feature maps".
31
+
32
+ Despite its promise, such rendering-based distillation methods [30, 35] share two limitations. First, we found that there is a discrepancy between optimized embeddings in 3D Gaussians and 2D language-aligned embeddings. This gap arises mainly from an intermediate rendering step that may distort CLIP embeddings during training. Then, the reliance on rendering impedes holistic 3D scene understanding, addi
33
+
34
+ tional task-processing such as 3D semantic segmentation and 3D object localization, and making full spatial coverage calculations less efficient than direct 3D Gaussian methods [39] including ours as illustrated in Fig. 1.
35
+
36
+ To address this issue, this work proposes Dr. Splat. Our method bypasses the rendering stage, enabling direct interaction with 3D Gaussians for registering and referring the well-preserved language-aligned CLIP embeddings in the 3D space. This makes our Dr. Splat clearly distinguishable from prior works, facilitating a seamless integration of representative embeddings from 2D vision language models into the 3D spatial structure without compromising exhaustive rendering process that has been exploited [15, 30, 35, 44-46]. Moreover, we propose to use a Product Quantization (PQ) feature encoding method to represent embeddings compactly and efficiently without any per-scene optimization. Rather than storing full-length feature vectors or per-scene specifically compressed embeddings [15, 30, 35, 44-46], each Gaussian in our Dr. Splat stores an index from a pre-trained PQ, significantly reducing memory usage up to $6.25\%$ compression ratio. By preserving the richness of embeddings while reducing memory usage, PQ is integral to our framework's high scalability and its ability to perform 3D perception tasks, such as open-vocabulary 3D object localization, 3D object selection, and 3D semantic segmentation. Our contributions are summarized as follows:
37
+
38
+ - We propose Dr. Splat, direct registration and referencing of language-aligned features in 3D Gaussians, bypassing intermediate rendering and preserving feature accuracy.
39
+ - We introduce the PQ encoding method for compact feature representation, reducing memory usage while maintaining essential 3D feature properties.
40
+ - We present a novel evaluation protocol to assess accuracy of 3D localization and segmentation for 3D Gaussians, with pseudo-labeling methods and volume-aware metrics.
41
+
42
+ # 2. Related Work and Motivation
43
+
44
+ Language-based 3D scene understanding. Open-set 3D scene understanding has seen considerable advancements, with a focus on methods that leverage language knowledge into 3D representation such as point clouds, neural radiance fields (NeRF) [26], and Gaussian Splitting [17] for 3D comprehension. Point-based methods [5, 13, 16, 24, 27, 40, 42] in open-vocabulary settings process point cloud data trained from language embeddings [22, 31] for open-set categories.
45
+
46
+ NeRF-based approaches [7, 18, 20, 23, 32] leverage semantic embeddings from 2D foundation models, such as CLIP [31], LSeg [22] and DINO [1] for open-vocabulary understanding. While the rendering process enhances 2D perception tasks, the implicit nature of NeRF constrains the holistic understanding of 3D structures and dominantly provides 'rendered' feature maps.
47
+
48
+ ![](images/661ebfb1b83ff3f234522f1754b19003d1cd42b5ac8ed6b64cc5527ec81232cb.jpg)
49
+ Input Image
50
+ Figure 2. Visualization of discrepancy in rendered 2D features and 3D features. Color indicates a cosine similarity score between query features from a text query and either (a) 3D features distilled by 2D rendering [30] or (b) directly registered 3D features.
51
+
52
+ ![](images/198ee8404cc7db1e525a8a1dfe822747625635148cdb7eb7040f049b6f5aac7a.jpg)
53
+ (a) Rendering
54
+
55
+ ![](images/70309b7198c42438b0435ecd02ee61895eaffcaf593db86360032994d2f824c9.jpg)
56
+ (b) Registration
57
+
58
+ ![](images/ef7db68bb2dbeb4cefa0dcea53a237be03b9924290b19d0f0e48ef8459882a12.jpg)
59
+
60
+ 3D Gaussian Splatting (3DGS) [17] has emerged as a promising rendering method, as well as a novel representation for open-vocabulary 3D scene understanding. Since this research is the close related work with our work, we first elucidate the preliminary of 3DGS, followed by focusing on language embedded 3DGS as follows.
61
+
62
+ Preliminary of 3D Gaussian Splatting. 3DGS [17] encodes appearance and geometry of the target scene into the 3D Gaussian representation. Each 3D primitive representation is expressed as a 3D Gaussian distribution having mean $\boldsymbol{\mu} = [x_{\mu},y_{\mu},z_{\mu}]^{\top}$ for 3D position and covariance matrix $\Sigma_{\mathrm{3D}}\in \mathbb{R}^{3\times 3}$ for 3D volume, as well as the opacity value $\alpha$ and the color c. In particular, the covariance matrix is decomposed into the scale matrix $S\in \mathbb{R}^{3\times 3}$ and the rotation matrix $R\in SO(3)$ , $\Sigma_{\mathrm{3D}} = RSS^{\top}R^{\top}$ . In brief, $N$ numbers of 3D Gaussians can be parametrized as $\Theta = \{\pmb {\theta}_i\}_{i = 1}^N = \{\pmb {\mu}_i,S_i,R_i,\alpha_i,\mathbf{c}_i\}_{i = 1}^N$ . 3D Gaussians $\Theta$ are used to render a 2D pixel color $\hat{\mathbf{c}}$ computed as:
63
+
64
+ $$
65
+ \hat {\mathbf {c}} (\theta) = \sum_ {i = 1} ^ {N} T _ {i} \tilde {\alpha} _ {i} \mathbf {c} _ {i}, \text {s . t .} \tilde {\alpha} _ {i} = \alpha_ {i} \exp \left(- \frac {1}{2} \mathbf {d} ^ {\top} \Sigma_ {2 D} ^ {- 1} \mathbf {d}\right), \tag {1}
66
+ $$
67
+
68
+ $T_{i}$ is a transmittance, $\tilde{\alpha}_{i}$ is an effective opacity value computed from the Gaussian's opacity $\alpha$ , the pixel distance $\mathbf{d} \in \mathbb{R}^{2 \times 1}$ from the target pixel to the projected center location of the Gaussian in pixel, and $\Sigma_{2\mathrm{D}}$ is the 2D covariance matrix in the image domain obtained from the splatting algorithm [17, 47]. The 3D Gaussian parameters $\Theta$ of a scene are optimized by minimizing the rendering loss between the input image color $\mathbf{c}$ and the rendered color $\hat{\mathbf{c}}(\theta)$ in Eq. (1) as $\arg \min_{\theta} \| \mathbf{c} - \hat{\mathbf{c}}(\theta) \|_F^2$ .
69
+
70
+ Language embedded 3D Gaussian Splatting. The basic idea of the language embedded Gaussian representation [10, 15, 21, 30, 35, 44-46] is to replace the color rendering to language embedding rendering. Language embedded 3D Gaussians are parameterized as $\Phi = \{\theta_i,\widetilde{\mathbf{f}}_i\}_{i = 1}^N = \{\pmb {\mu}_i,S_i,R_i,\alpha_i,\mathbf{c}_i,\widetilde{\mathbf{f}}_i\}_{i = 1}^N$ , where $\widetilde{\mathbf{f}}_i$ denotes Gaussian-registered language embeddings across $N$ numbers 3D Gaussians which will be discussed soon. Then, analogous to the color rendering Eq. (1), the language embedding rendering is expressed as:
71
+
72
+ $$
73
+ \hat {\mathbf {f}} = \sum_ {i = 1} ^ {N} T _ {i} \tilde {\alpha} _ {i} \tilde {\mathbf {f}} _ {i}, \tag {2}
74
+ $$
75
+
76
+ ![](images/83636b2cf852f9057421fae09abb6becacdfe8fba321fdda9286e53924efad02.jpg)
77
+ (a-1) Optimized 3D Gaussians $\theta$
78
+
79
+ ![](images/a3f2c4bc9e11da63660161a7d18bc9bba6ad27514fa54aef3c858480600879f7.jpg)
80
+ Large-scale images
81
+
82
+ ![](images/4a6fff1bec2e4d2023df9901ca97e68e6f18fb31582149347c89c8700bc92147.jpg)
83
+ (a-2) PQ codebook construction
84
+ (a) Preprocessing stage
85
+ Figure 3. Overview of Dr. Splat. (a) In the preprocessing stage, we compute optimized 3D Gaussians [17] and Product Quantization (PQ) codebook construction. (b) During training, we extract CLIP embeddings from given images $\{\mathbf{I}\}$ , and then proceed feature registration process (Sec. 3.1). Finally, we obtain 3D Gaussians $\Phi^{\mathrm{ours}}$ with PQ indices $\{j\}$ (Sec. 3.2).
86
+
87
+ ![](images/35d30a6231e6c6d98a340d01dc303be826a781beed7ca95e7917ab486fee929e.jpg)
88
+ (b-1) Patch-wise CLIP embedding extraction
89
+
90
+ ![](images/49188eea106b3c32244b7187e2ba454a16f0a807b57e032a09080f221faa28cc.jpg)
91
+ (b-2) Feature registration process
92
+ (b) Training stage
93
+
94
+ where $\hat{\mathbf{f}}$ denotes a rendered language embedding. Likewise, the Gaussian-registered language embeddings $\{\tilde{\mathbf{f}}\}$ are optimized by minimizing the rendering loss between the 2D language embedding $\mathbf{f}$ extracted from an input image and a rendered language embedding map $\hat{\mathbf{f}}$ as $\arg \min_{\{\tilde{\mathbf{f}}\}}\| \mathbf{f} - \hat{\mathbf{f}} \|_F^2$ at each corresponding pixel. This can be regarded as distilling vision language models into Gaussian-registered language embedding $\tilde{\mathbf{f}}$ through volume rendering Eq. (2). The Gaussian-registered language embeddings are separately trained after pre-training and fixing the pre-trained 3DGS $\Theta$ for a scene. The language embeddings to be distilled are typically obtained from CLIP [31]. Since storing 32-bit 512-D CLIP features $\mathbf{f}$ in every 3D Gaussians is memory-expensive, one can use a compressed feature per scene depending on the needs [15, 35, 44-46].
95
+
96
+ Motivation. Such language-embedded radiance fields provide useful representation and language interfaces for many practical and crucial applications. While most of existing works focus on the training efficiency, the complexity in inference time has barely been discussed. Considering a scenario to text-query a 3D location of the language-embedded Gaussians, i.e., 3D localization, the aforementioned methods first require rendering a 2D language embedding map at each specific camera pose. We cannot directly retrieve over the distributed embeddings $\{\tilde{\mathbf{f}}_i\}$ in 3D Gaussians, because the embeddings do not carry language information directly, but their weighted summed (rendered) features $\hat{\mathbf{f}}$ do. This issue becomes even severer with compressed features as in [30]: their decompression decoders are not designed for and incompatible with directly applying to the distributed compressed language embeddings in each 3D Gaussian, yielding degenerated CLIP decoding (refer to Fig. 2).
97
+
98
+ This introduces multiple challenges and hassles. First, it is challenging to find the best or proper camera rendering views that contain the object to find. One may attempt to pre-compute the minimal number of cameras and their camera poses that cover all the 3D Gaussians in a scene with
99
+
100
+ proper resolutions, similarly by point-based approach [12]. However, this is a well-known set covering problem [8] with constraints which is known to be an NP-hard problem.
101
+
102
+ Second, even with pre-computed rendered views, the retrieval complexity over the rendered images remains substantial [9]. Suppose a scene consisting of one million Gaussians, but just a single rendered language embedding map in pixel domain already has nearly a million pixels; thus, we need a dedicated system to efficiently retrieve over all the views. Third, since the retrieval is conducted in the 2D space, to find a 3D location, we need a separate mechanism to lift the localization to the 3D space, i.e., increasing the system complexity. In addition, 32-bit floating 512-Dimension CLIP features for millions of Gaussian are memory intensive, which is often not manageable. To reduce this burden, the existing methods [39] apply compressions with per-scene optimized codebooks, which hinders extension or generalization to other scenes.
103
+
104
+ To overcome these, we propose a training-free algorithm for the direct allocation of language embeddings to 3D Gaussians, allowing efficient computation and interaction within the 3D space. As a concurrent work, OpenGaussian [39] tackles a similar challenge with our work, but still requires per-scene codebook construction Fig. 1.
105
+
106
+ # 3. Dr. Splat
107
+
108
+ This section provides details of our method. We first explain how we directly register CLIP embeddings into Gaussian-registered language embeddings, Sec. 3.1. Then, we introduce Product Quantization (PQ) into our framework to efficiently store Gaussian-registered language embeddings, Sec. 3.2. Lastly, we describe the inference stage for text query-based 3D Gaussian localization, Sec. 3.3.
109
+
110
+ # 3.1. Feature registration process
111
+
112
+ Our goal is to reconstruct a language embedded 3D space represented by 3D Gaussians $\Phi$ , which we can directly inter
113
+
114
+ ![](images/16e7cd6b4844e0310b5c8e91d13060a8a830725fecc09c7a5f66b970231c06ea.jpg)
115
+
116
+ ![](images/4cdff6a3e3814ef38674dbbf601c93238aa0674283c7855be8e3a6f8f9907a0c.jpg)
117
+ (a) Map CLIP features to Gaussians
118
+ (b) Aggregate multiview features
119
+ (c) Register feature to $\Phi^{\text{ours}}$
120
+ Figure 4. Feature registration process in Dr. Splat. (a) We first map per-pixel CLIP embeddings $\{\mathbf{f}^{\mathrm{map}}\}$ to Gaussians. Here, we only map dominant $k$ Gaussians along pixel ray $r$ , named Top- $k$ Gaussians. (b) After collecting embeddings, we compute aggregated features (Eq. (6)). (c) Finally, we re-use PQ to obtain the PQ indices $j$ of aggregated features and update Gaussian parameters $\Phi^{\mathrm{ours}}$ .
121
+
122
+ act in 3D space without feature rendering Eq. (2). For that, following LangSplat [30], we begin by extracting per-pixel CLIP embedding maps $\mathbf{F}^{\mathrm{map}}\in \mathbb{R}^{D\times H\times W}$ from training images of the target scenes, where $D$ is the dimension of CLIP embeddings, $H$ and $W$ are the height and width of the training images. Given training images, we extracts a dictionary of binary masks and language embeddings extracted from the images as: $\mathcal{F}^{\mathrm{map}} = \{\mathbf{M}_j:\mathbf{f}_j^{\mathrm{map}}\mid j = 1,\dots,M\}$ , where $\mathbf{M}_j\in \mathbb{R}^{H\times W}$ is a binary mask extracted using SAM [19] and $\mathbf{f}_j^{\mathrm{map}}\in \mathbb{R}^D$ is a corresponding CLIP embedding from a cropped image with $\mathbf{M}_j$ . Each mask $\mathbf{M}_j$ belongs to an image, and the masks are not overlapped to each other. With this dictionary, a CLIP embedding map $\mathbf{F}^{\mathrm{map}}(\mathbf{I},\mathbf{r})$ at a pixel $\mathbf{r}$ in a training image $\mathbf{I}$ is computed as:
123
+
124
+ $$
125
+ \mathbf {F} ^ {\text {m a p}} (\mathbf {I}, \mathbf {r}) = \sum_ {j = 1} ^ {M} \mathbf {M} _ {j} (\mathbf {I}, \mathbf {r}) \cdot \mathbf {f} _ {j} ^ {\text {m a p}}, \tag {3}
126
+ $$
127
+
128
+ where $\mathbf{M}_j(\mathbf{I},\mathbf{r})\in \{0,1\}$ indicates whether the mask $\mathbf{M}_j$ contains the pixel $\mathbf{r}$ in the image $\mathbf{I}$ . Using $\mathbf{F}^{\mathrm{map}}$ , we reconstruct language embedded 3D Gaussians via a novel feature registration process as visualized in Fig. 3.
129
+
130
+ During the feature registration process, our algorithm iterates through training images of the scene. Using projection relation, we link 3D Gaussians $\Phi$ to CLIP embeddings. Each Gaussian can link to multiple CLIP embeddings derived from different images. Then we aggregate collected embeddings to a single embedding to be assigned to each Gaussian. To ensure a consistent aggregation of the embeddings from multi-view images, we first compute a weight $w_{i}(\mathbf{I},\mathbf{r})$ representing the contribution of $\theta_{i}$ to construct each pixel $\mathbf{r}$ in a training image $\mathbf{I}$ . The weights are computed with the volume rendering equation Eq. (1) as:
131
+
132
+ $$
133
+ w _ {i} (\mathbf {I}, \mathbf {r}) = T _ {i} (\mathbf {I}, \mathbf {r}) \cdot \tilde {\alpha} _ {i} (\mathbf {I}, \mathbf {r}), \tag {4}
134
+ $$
135
+
136
+ where $T_{i}(\mathbf{I},\mathbf{r})$ and $\tilde{\alpha}_i(\mathbf{I},\mathbf{r})$ are the transmittance and the effective opacity value of $\theta_{i}$ for a pixel $\mathbf{r}$ in an image I,
137
+
138
+ stated in Eq. (1). With the per-pixel weights, we calculate $w_{ij}$ representing a weight between each Gaussian $\theta_i$ and corresponding language embedding maps $\mathbf{f}_j^{\mathrm{map}}$ , which is for aggregating CLIP embeddings from $\mathbf{F}_{\cdot j}^{\mathrm{map}}$ and register the embedding to each Gaussian. The weights are computed as:
139
+
140
+ $$
141
+ w _ {i j} = \sum_ {\mathbf {I} \in \mathcal {I}} \sum_ {\mathbf {r} \in \mathbf {I}} \mathbf {M} _ {j} (\mathbf {I}, \mathbf {r}) \cdot w _ {i} (\mathbf {I}, \mathbf {r}), \tag {5}
142
+ $$
143
+
144
+ where $\mathcal{I}$ is the set of the training images. In this iterative process, we aggregate weights only for Top- $k$ Gaussians with the highest weights $w_{i}(\mathbf{I},\mathbf{r})$ , along the ray of each pixel ray $\mathbf{r}$ (see Fig. 4). After aggregation, we prune the Gaussians which are not assigned any weight, i.e., $\sum_{j=1}^{M} w_{ij} = 0$ . This summation aggregates weights between Gaussians and the CLIP embeddings by linking per-pixel weights $w_{i}(\mathbf{I},\mathbf{r})$ of each Gaussian to its corresponding CLIP embeddings. With the obtained weights, we register an aggregated feature $\dot{\mathbf{f}}_i$ to each Gaussian with weighted-averaging as:
145
+
146
+ $$
147
+ \dot {\mathbf {f}} _ {i} = \mathbf {f} _ {i} / | | \mathbf {f} _ {i} | | _ {2}, \text {w h e r e} \quad \mathbf {f} _ {i} = \sum_ {j = 1} ^ {M} \frac {w _ {i j}}{\sum_ {k = 1} ^ {M} w _ {i k}} \mathbf {f} _ {j} ^ {\text {m a p}}. \tag {6}
148
+ $$
149
+
150
+ This process enables 3D-aware feature registration to be consistent across various viewpoints, by aggregating features in the original high-dimensional feature space. The proposed process can be interpreted as an inverse volume rendering without gradient-based optimization, which enables our method to be faster than the prior methods requiring perscene gradient-based optimization [27, 30, 35] for feature registration in 3D space.
151
+
152
+ # 3.2. Product-Quantized CLIP embeddings
153
+
154
+ Memory efficiency is a challenge in 3D scene representations, especially when associating Gaussians with high-dimensional feature vectors. LangSplat [30] addresses this by introducing an encoder-decoder network, while LeGaussian [35] and OpenGaussian [39] utilize codebook construction. However, these approaches introduce additional per
155
+
156
+ <table><tr><td rowspan="2">Methods</td><td colspan="5">mIoU</td><td colspan="5">mAcc @ 0.25</td></tr><tr><td>waldo_kitchen</td><td>ramen</td><td>figurines</td><td>teatime</td><td>Mean</td><td>waldo_kitchen</td><td>ramen</td><td>figurines</td><td>teatime</td><td>Mean</td></tr><tr><td>LangSplat-m [30]</td><td>8.29</td><td>6.11</td><td>8.33</td><td>16.58</td><td>9.83</td><td>13.64</td><td>14.08</td><td>8.93</td><td>27.12</td><td>15.94</td></tr><tr><td>OpenGaussian [39]</td><td>34.60</td><td>23.87</td><td>59.33</td><td>54.44</td><td>43.06</td><td>50.00</td><td>35.21</td><td>80.36</td><td>72.88</td><td>59.61</td></tr><tr><td>Ours (Top-10)</td><td>37.05</td><td>24.33</td><td>54.42</td><td>57.35</td><td>43.29</td><td>63.64</td><td>35.21</td><td>80.36</td><td>77.97</td><td>64.30</td></tr><tr><td>Ours (Top-20)</td><td>38.33</td><td>24.58</td><td>53.94</td><td>56.19</td><td>43.26</td><td>63.64</td><td>35.21</td><td>82.14</td><td>76.27</td><td>64.32</td></tr><tr><td>Ours (Top-40)</td><td>39.07</td><td>24.70</td><td>53.36</td><td>57.20</td><td>43.58</td><td>63.64</td><td>35.21</td><td>80.36</td><td>76.27</td><td>63.87</td></tr></table>
157
+
158
+ Table 1. 3D object selection results on the LeRF-OVS dataset [18]. To measure 3D object selection performance, we calculate 2D segmentation accuracy on rendering of selected 3D Gaussians. Note that our model does not require per-scene optimization, demonstrating its robustness across diverse scenes. Bold and Underline stand for first and second best performance.
159
+
160
+ ![](images/59d2615fa9642f3c7f116e6b4740bb4f1966380ee253214f1db801e8b57adb78.jpg)
161
+ Figure 5. Qualitative results of the object selection on the LeRF-OVS dataset [18]. We visualize rendering of selected 3D Gaussians for LangSplat [30], OpenGaussian [39], and ours. For LangSplat, activations are often distributed randomly, fail to localize the target. OpenGaussian often struggles to distinguish closely situated objects. In contrast, our model shows activations precisely limited to the queried object regions, effectively localizing only the relevant areas.
162
+
163
+ scene computational costs for scene-specific parameter tuning of neural networks or codebooks (see Fig. 1). In contrast, we propose to use Product Quantization (PQ) on a large-scale image dataset, eliminating per-scene training.
164
+
165
+ Product Quantization. PQ [14] is a widely used technique for efficient embedding compression, particularly valuable in large-scale applications. The PQ process begins by dividing the original $D$ -dimensional feature vector $\mathbf{v}$ into $L$ sub-vectors: $\mathbf{v} = [\mathbf{v}_1,\mathbf{v}_2,\dots ,\mathbf{v}_L]$ . Each sub-vector $\mathbf{v}_i$ is then independently quantized to a predefined number of centroids $s_{ij}$ in a predefined codebook $S_{i}$ for that sub-vector. These centroids are learned via clustering, creating a codebook for each subspace. Once the centroids are established, each sub-vector is replaced by the index of the nearest centroid in its respective codebook. The centroid indices $j_{i} = [j_{i1},j_{i2},\ldots ,j_{iL}]$ are optimized by minimizing $\arg \min_k\| \mathbf{v}_i - s_{ik}\|$ to quantize a given vector $\mathbf{v}_i$ where $j_{ik}$ is an 8-bit unsigned integer.
166
+
167
+ Then, we can measure the distance between the query and
168
+
169
+ ![](images/bb53dbe74ed7a8695c5e44b04413f01b8871538d59f3c8be9303e5627a398927.jpg)
170
+ Figure 6. Limitations of point-based IoU measurement. This figure shows the effect of removing the top and bottom $30\%$ of Gaussians according to the proposed significant score, implying that volume differences significantly impact 3D accuracy. The results highlight the need for the proposed IoU metric for 3D Gaussians.
171
+
172
+ <table><tr><td></td><td>3D mIoU</td><td>IoU &gt; 0.15</td><td>19 classes IoU &gt; 0.3</td><td>IoU &gt; 0.45</td></tr><tr><td>LangSplat-m [30]</td><td>8.0</td><td>17.1</td><td>7.8</td><td>2.9</td></tr><tr><td>LEGaussians-m [35]</td><td>9.5</td><td>19.1</td><td>8.9</td><td>7.3</td></tr><tr><td>OpenGaussian [39]</td><td>25.2</td><td>59.5</td><td>38.0</td><td>18.3</td></tr><tr><td>Ours (Top-20)</td><td>25.0</td><td>60.7</td><td>40.3</td><td>20.0</td></tr><tr><td>Ours (Top-40)</td><td>25.4</td><td>60.7</td><td>40.3</td><td>25.6</td></tr></table>
173
+
174
+ (a) 3D object localization task.
175
+
176
+ <table><tr><td rowspan="2"></td><td colspan="2">19 classes</td><td colspan="2">15 classes</td><td colspan="2">10 classes</td></tr><tr><td>mIoU</td><td>mAcc.</td><td>mIoU</td><td>mAcc.</td><td>mIoU</td><td>mAcc.</td></tr><tr><td>LangSplat-m [30]</td><td>2.0</td><td>9.2</td><td>4.9</td><td>14.6</td><td>8.0</td><td>23.9</td></tr><tr><td>LEGaussians-m [35]</td><td>1.6</td><td>7.9</td><td>4.6</td><td>16.1</td><td>7.7</td><td>24.9</td></tr><tr><td>OpenGaussian [39]</td><td>30.1</td><td>46.5</td><td>38.1</td><td>56.8</td><td>49.7</td><td>71.4</td></tr><tr><td>Ours (Top-20)</td><td>28.0</td><td>44.6</td><td>38.2</td><td>60.4</td><td>47.2</td><td>68.9</td></tr><tr><td>Ours (Top-40)</td><td>29.6</td><td>47.7</td><td>38.2</td><td>60.4</td><td>50.2</td><td>73.5</td></tr></table>
177
+
178
+ (b) Open-vocabulary 3D semantic segmentation task.
179
+
180
+ Table 2. Quantitative comparison in the ScanNet dataset [4]. Left: Localization prediction is defined as 3D regions with a text similarity score above threshold. Right: We assign segmentation labels by finding max activations among all classes. Note that Bold and Underline stand for first and second best performance, respectively.
181
+
182
+ ![](images/d60ac75a2fce5055f581d9f1678d797aa1c5d82dc6c56f088e3814661eb6caa5.jpg)
183
+ Figure 7. Qualitative results of 3D object localization. We visualize 3D localization activations (yellow) for "chair" and "desk" in the ScanNet dataset, comparing our method with others. It turns out that LangSplat-m and LEGaussians-m fail to localize objects accurately, while OpenGaussian struggles with object correspondence. Our model delivers precise and consistent localization across diverse queries.
184
+
185
+ data by adding distances between coarse centroids. Once the distances between centroids are computed as a lookup table, the computation shifts to simple indexing, which reduces the search complexity from $\mathcal{O}(D)$ to $\mathcal{O}(1)$ for a $D$ dimension sample. This approach notably reduces computational complexity, making it suitable for large-scale search.
186
+
187
+ In our setup for language-based 3D scene understanding, we build PQ centroids based on CLIP embeddings using a large-scale image dataset, the LVIS dataset [11], that contains over 1.2M instances covering various long-tail classes and ground truth segmentation. We extract instance patches from images and collect patch-wise CLIP embeddings. After we build this CLIP embedding database, we proceed with the construction of the centroid codebook for our PQ. Once PQ is trained, any query embedding can be approximated by assigning the closest centroid for each subvector. This is a one-time procedure; once we determine the codebook, we can use it for any scene generally. In our setup, each embedding is represented as a sequence of centroid indices rather than a high-dimensional vector. Accordingly, our language embedded Gaussians are parametrized as $\Phi^{\mathrm{ours}} = \{\phi_i^{\mathrm{ours}}\}_{i=1}^N = \{\theta_i, j_i\}_{i=1}^N$ . where the aggregated feature $\bar{\mathbf{f}}_i$ are converted as a quantized feature $\bar{\mathbf{f}}_i$ by the corresponding PQ index $j_i$ .
188
+
189
+ # 3.3. Text-query based 3D localization
190
+
191
+ After training 3D Gaussians $\Phi^{\mathrm{ours}}$ with our feature registration process and PQ, we describe the details of an inference mode that facilitates direct interaction with 3DGS upon receiving input queries, such as text. This is related to similarity score computation between a query and sources, i.e. Gaussian embeddings. Given a text, we first extract a query feature $\mathbf{q}$ using CLIP text encoder [31]. We reconstruct the quantized features $\{\bar{\mathbf{f}}_i\}_{i = 1}^N$ from the stored PQ indices $\{j_i\}_{i = 1}^N$ . Then, we compute a cosine similarity score between the query feature $\mathbf{q}$ and all quantized features.
192
+
193
+ Despite its simplicity, solely relying on the cosine similarity may result in diminished discriminability across certain similarity scores.
194
+
195
+ To address this limitation, we incorporate a re-ranking process based on relative activation with respect to the canonical feature. For this process, we adopt the relevancy scoring method proposed in LeRF [18], which enables more precise similarity analysis for a query. Specifically, each rendered language embedding, $\mathbf{f}^{\mathrm{map}}$ and a text query feature $\mathbf{q}$ , yield a relevance score determined by $\min_{i} \frac{\exp(\mathbf{f}^{\mathrm{map}} \cdot \mathbf{q})}{\exp(\mathbf{f}^{\mathrm{map}} \cdot \mathbf{q}) + \exp(\mathbf{f}^{\mathrm{map}} \cdot \mathbf{f}^{\mathrm{canon}, i})}$ , where $(\cdot)$ is an element-wise dot product operator and $\mathbf{f}^{\mathrm{canon}, i}$ indicates CLIP embeddings of a designated canonical term selected from a set of "object," "things," "stuff," and "texture". Then, we sample 3D Gaussians based on the relevance score for downstream tasks.
196
+
197
+ ![](images/f56d8e6ee41ed3f82b9acf4db7a68eaffd5e5d7b2ec2aa1ab60f323d5fa572ed.jpg)
198
+ Figure 8. Visualization of open-vocabulary 3D semantic segmentation on the ScanNet dataset [4]. We visualize 3D Gaussian splat-based semantic segmentation using language features allocation of OpenGaussian [39] and Dr. Splat (ours) model on the same RGB-pretrained 3DGS. Note that, not specifically designed for segmentation, it achieves high performance as a result of language-based Gaussian updates.
199
+
200
+ # 4. Experiments
201
+
202
+ Dataset. We use two datasets to evaluate the 3D scene understanding performance. For the 3D object selection task (Sec. 4.1), we use the LERF [18] dataset annotated by LangSplat [30], which consists of several multi-view images of 3D scenes containing long-tail objects and includes ground truth 2D ground truth annotations for texture queries. For 3D object localization Sec. 4.2 and 3D semantic segmentation Sec. 4.3 task, we employ the ScanNet [4] dataset. ScanNet is a large-scale benchmark that provides data on indoor scenes, including calibrated RGBD images and 3D point clouds with ground-truth semantic labels. We randomly select eight scenes from ScanNet for the experiments.
203
+
204
+ Competing methods. The only method available for a fair comparison with our method is the concurrent work, Open-Gaussian [39]. To study the various aspects of our method, we introduce baseline methods modified from rasterization-based ones [30, 35], for direct 3D referring operation, denoted as LangSplat-m and LEGaussians-m. As discussed in Sec. 2, without modification, global search over a whole scene is quite demanding. To ensure fair evaluation, we use the same initial 3D Gaussians being trained only using RGB inputs for all comparing methods, and freeze the Gaussians during the language feature allocation process. Also, the per-pixel CLIP [31] embedding maps are unified for SAM-based [19] methods [30, 39] including ours. We follow the hyperparameter settings favorable to each respective paper.
205
+
206
+ # 4.1. 3D object selection
207
+
208
+ Settings. We first extract text features from an open-vocabulary text query using the CLIP model. Next, we compare text features to the 3D features embedded in each Gaussian using cosine similarity. By thresholding the similarity, we identify the 3D Gaussians that are relevant to the given text query. The selected 3D points are subsequently rendered into multi-view images using the 3DGS rasterization pipeline.
209
+
210
+ Results. We compare our model quantitatively with 3DGS-based language-embedded models as shown in Table 1. The results demonstrate that our method performs better object selection in most scenes, showing an improvement of over 0.5 in mIoU and more than 4.5 in mAcc compared to counterpart models. Notably, the rasterization-based method, LangSplat-m, often underperforms in most scenes.
211
+
212
+ Qualitative results are shown in Fig. 5. For LangSplat-m, the activations often shows random 3D Gaussians or fail to localize entirely (e.g., see "coffee mug"), highlighting the limitations of rasterization-based methods and their unsuitability for 3D understanding, aligning the observation from Fig. 2. OpenGaussian frequently exhibits false activations with incorrect text-object pairs (e.g., "apple" and "tea in a glass") and struggles to distinguish between nearby objects (e.g., "waldo," "rubik's cube"). This artifacts can be attributed to use of spatial clustering and limited encoder capacity.
213
+
214
+ In contrast, our model leverages general image features thanks to the general PQ, maintaining feature distinctiveness regardless of scene complexity. Our feature registration considers the 3D geometry of the 3D Gaussians, which results in superior performance in 3D scene understanding tasks.
215
+
216
+ # 4.2. 3D object localization
217
+
218
+ Settings. Similar to the 3D object selection task, we calculate the cosine similarity between text query and 3D features embedded in each Gaussian. By thresholding the similarity, we identify the 3D Gaussians relevant to the given text query. To measure volume-aware localization evaluation, we propose a protocol to measure the IoU of 3D Gaussians that expands the traditional metric of point cloud-based approaches by incorporating volumetric information of 3D Gaussians.
219
+
220
+ Novel evaluation protocol for 3D localization in 3DGS. Unlike conventional evaluation protocol for the 3D localization task in point clouds, it is tricky to evaluate 3D localization performance in 3D Gaussians [17]. This is primarily
221
+
222
+ due to the un-deterministic structure of Gaussian distribution. To address this issue, we compute 3DGS pseudo-labels for evaluating the 3DGS localization in a volume-aware way. The details can be found in the supplementary material.
223
+
224
+ Given the ground truth, we measure IoU considering the spatial significance of each Gaussian and define a significant score $d_{i}$ for each Gaussian $\theta_{i}$ with its scale $\mathbf{s}_i = [s_{ix}, s_{iy}, s_{iy}]$ and opacity $\alpha_{i}$ as $d_{i} = s_{ix}s_{iy}s_{iz}\alpha_{i}$ , where $s_{ix}s_{iy}s_{iz}$ denotes a relative ellipsoid volume of a Gaussian $\theta_{i}$ . With the obtained significant scores $\mathbf{d} = [d_1, d_2, \dots, d_N]$ , we compute weighted IoU of 3D Gaussians to approximate volumes. The proposed metric is designed to assign a larger weight to the Gaussians with higher significant scores, when measuring IoU. Figure 6 shows that the impact of each Gaussian on the scene extremely varies depending on their significant scores, which demonstrates the necessity of the proposed IoU metric on 3D Gaussians that regards unequal contributions of each Gaussian.
225
+
226
+ Results. We report the 3D localization performance on the Scannet dataset in Table 2a. The 2D rasterization-based methods [30, 35] struggle to achieve precise activations for 3D localization. They inherently face challenges when applying for 3D tasks because they need to render 2D images for the scene interaction. Even with the 3D space search method, OpenGaussian [39], our model consistently demonstrates superior performance and achieves higher accuracy in localization. Figure 7 also shows that LangSplat-m and LEGaussians-m fail to properly localize the objects, and OpenGaussian misses queried objects in the scene.
227
+
228
+ # 4.3. 3D semantic segmentation
229
+
230
+ Settings. For a given set of open-vocabulary text labels, we perform segmentation by assigning each Gaussian a label having the highest activation among the known label set.
231
+
232
+ Results. The numerical comparison is presented in Table 2b. Although not explicitly designed for semantic segmentation, our model achieves notable performance in this task as a result of accurately updating each Gaussian with language features. Consistent with previous observations, rasterization-based 3DGS models exhibit lower segmentation performance. While OpenGaussian performs position-based clustering, our model demonstrates comparable performance, surpassing the baseline as the Top- $k$ value increases. Our model also achieves better segmentation results, with a visual comparison of the segmented scene shown in Fig. 8.
233
+
234
+ # 4.4. Ablation study
235
+
236
+ We conduct an ablation study using the ScanNet dataset on different hyper-parameters of Dr. Splat to measure the contribution of each component.
237
+
238
+ Product Quantization. PQ introduces a trade-off between memory usage, computational efficiency, and accuracy. To better understand the balance between computational cost
239
+
240
+ ![](images/9c764171a4293e753d1212bf7a66c0a40e477b3e7a9e1027f1c92fc88580201c.jpg)
241
+ (a) Ablation on PQ parameters.
242
+
243
+ ![](images/646e4cd3c9c5fc8ce51bba6b11b5b2d0110c1a0aff672bceb637c62b9d224bd4.jpg)
244
+ (b) Ablation on Top- $k$
245
+ Figure 9. Ablation study on (a) PQ and (b) Top- $k$ Gaussians.
246
+
247
+ and localization quality, we conduct an ablation study by varying the number of sub-vectors. We evaluate performance at sub-vector sizes of 64, 128, and 256. Notably, these settings correspond to bit-size reductions of 1/32, 1/16, and 1/8 of the original CLIP feature, respectively. We measure the query distance computation time for one million data points, averaging results over 100 iterations for efficiency measure. Our findings reveal a favorable trade-off between quantization performance and accuracy (see Fig. 9-(b)) in the Pareto front with our PQ configurations. This achieves a balance that maximizes memory and computational efficiency while minimizing any loss in accuracy.
248
+
249
+ Top- $k$ Gaussians. We examine the influence of the number of Gaussians assigned per ray. This parameter affects both memory requirements and computation, serving as a critical factor in overall performance. The ratio of pruned Gaussians and the mIoU results from different $k$ are presented in Fig. 9a. We observe that increasing the aggregating number of Gaussians per ray improves localization performance; however, it results in higher memory consumption and the number of occupied Gaussians, indicating a clear trade-off.
250
+
251
+ # 5. Discussion and Conclusion
252
+
253
+ We present Dr. Splat, which is a novel approach for open-vocabulary 3D scene understanding by directly registering language embeddings to 3D Gaussians, eliminating the need for an intermediate rendering process. Compared to the previous 2D rendering-based methods [30, 35], which have limited search domain and capacity, our method directly searches 3D space while preserving the fidelity of language embeddings. This operation is further accelerated by the integration of Product Quantization (PQ)
254
+
255
+ Experimental results validate Dr. Splat's superior performance across various 3D scene understanding tasks, including open-vocabulary 3D object selection, 3D object localization, and 3D semantic segmentation. These findings highlight Dr. Splat's ability to transform 3D scene understanding by achieving a balance between highly representative quality and computational efficiency. This breakthrough paves the way for advanced applications in robotics, autonomous navigation, and augmented reality.
256
+
257
+ # References
258
+
259
+ [1] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 2
260
+ [2] Jaesung Choe, Chunghyun Park, Francois Rameau, Jaesik Park, and In So Kweon. Pointmixer: Mlp-mixer for point cloud understanding. In European Conference on Computer Vision, pages 620-640. Springer, 2022. 1
261
+ [3] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 1
262
+ [4] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, pages 5828-5839, 2017. 6, 7, 13, 15
263
+ [5] Runyu Ding, Jihan Yang, Chuhui Xue, Wenqing Zhang, Song Bai, and Xiaojuan Qi. Pla: Language-driven open-vocabulary 3d scene understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7010-7019, 2023. 2
264
+ [6] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. The faiss library. arXiv preprint arXiv:2401.08281, 2024. 11
265
+ [7] Francis Engelmann, Fabian Manhardt, Michael Niemeyer, Keisuke Tateno, Marc Pollefeys, and Federico Tombari. OpenNerf: Open Set 3D Neural Scene Segmentation with PixelWise Features and Rendered Novel Views. In ICLR, 2024. 2
266
+ [8] Michael R. Garey and David S. Johnson. Computers and intractability. a guide to the theory of np-completeness. W. H. Freeman and company, 174, 1979. 3
267
+ [9] Antoine Guédon, Tom Monnier, Pascal Monasse, and Vincent Lepetit. Macarons: Mapping and coverage anticipation with rgb online self-supervision. In CVPR, 2023. 3
268
+ [10] Jun Guo, Xiaojian Ma, Yue Fan, Huaping Liu, and Qing Li. Semantic gaussians: Open-vocabulary scene understanding with 3d gaussian splatting. arXiv preprint arXiv:2403.15624, 2024. 2
269
+ [11] Agrim Gupta, Piotr Dollar, and Ross Girshick. Lvis: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5356-5364, 2019. 6
270
+ [12] Zhening Huang, Xiaoyang Wu, Xi Chen, Hengshuang Zhao, Lei Zhu, and Joan Lasenby. Openins3d: Snap and lookup for 3d open-vocabulary instance segmentation. In European Conference on Computer Vision, pages 169-185. Springer, 2025. 1, 3
271
+ [13] Krishna Murthy Jatavallabhula, Alihusein Kuwajerwala, Qiao Gu, Mohd Omama, Tao Chen, Alaa Maalouf, Shuang Li, Ganesh Iyer, Soroush Saryazdi, Nikhil Keetha, et al. Conceptfusion: Open-set multimodal 3d mapping. arXiv preprint arXiv:2302.07241, 2023. 2, 16
272
+
273
+ [14] Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE transactions on pattern analysis and machine intelligence, 33(1):117-128, 2010. 5, 15
274
+ [15] Yuzhou Ji, He Zhu, Junshu Tang, Wuyi Liu, Zhizhong Zhang, Yuan Xie, and Xin Tan. Fastlgs: Speeding up language embedded gaussians with feature grid mapping. arXiv preprint arXiv:2406.01916, 2024. 2, 3
275
+ [16] Li Jiang, Shaoshuai Shi, and Bernt Schiele. Open-vocabulary 3d semantic segmentation with foundation models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21284-21294, 2024. 1, 2
276
+ [17] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 2023. 1, 2, 3, 7, 11, 13, 14
277
+ [18] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Leref: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 1, 2, 5, 6, 7, 12
278
+ [19] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólár, and Ross B. Girshick. Segment anything. In ICCV, 2023. 4, 7, 11
279
+ [20] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. Advances in Neural Information Processing Systems, 35:23311-23330, 2022. 2
280
+ [21] Hyunjee Lee, Youngsik Yun, Jeongmin Bae, Seoha Kim, and Youngjung Uh. Rethinking open-vocabulary segmentation of radiance fields in 3d space, 2024. 2
281
+ [22] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation. arXiv preprint arXiv:2201.03546, 2022. 2
282
+ [23] Kunhao Liu, Fangneng Zhan, Jiahui Zhang, Muyu Xu, Yingchen Yu, Abdulmotaleb El Saddik, Christian Theobalt, Eric Xing, and Shijian Lu. Weakly supervised 3d open-vocabulary segmentation. Advances in Neural Information Processing Systems, 36:53433-53456, 2023. 2
283
+ [24] Minghua Liu, Yinhao Zhu, Hong Cai, Shizhong Han, Zhan Ling, Fatih Porikli, and Hao Su. Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 21736-21746, 2023. 2
284
+ [25] Zhicheng Lu, Xiang Guo, Le Hui, Tianrui Chen, Ming Yang, Xiao Tang, Feng Zhu, and Yuchao Dai. 3d geometry-aware deformable gaussian splatting for dynamic view synthesis. In CVPR, 2024. 16
285
+ [26] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2
286
+ [27] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 815-824, 2023. 1, 2, 4, 16
287
+
288
+ [28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1
289
+ [29] Guocheng Qian, Yuchen Li, Houwen Peng, Jinjie Mai, Hasan Hammoud, Mohamed Elhoseiny, and Bernard Ghanem. Pointnext: Revisiting pointnet++ with improved training and scaling strategies. Advances in neural information processing systems, 35:23192-23204, 2022. 1
290
+ [30] Minghan Qin, Wanhua Li, Jiawei Zhou, Haoqian Wang, and Hanspeter Pfister. Langsplat: 3d language gaussian splatting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20051-20060, 2024. 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 16, 17, 18
291
+ [31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2, 3, 6, 7, 12
292
+ [32] Adam Rashid, Satvik Sharma, Chung Min Kim, Justin Kerr, Lawrence Yunliang Chen, Angjoo Kanazawa, and Ken Goldberg. Language embedded radiance fields for zero-shot task-oriented grasping. In 7th Annual Conference on Robot Learning, 2023. 2, 16
293
+ [33] Damien Robert, Hugo Raguet, and Loic Landrieu. Efficient 3d semantic segmentation with superpoint transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17195-17204, 2023. 1
294
+ [34] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In ECCV, 2022. 16
295
+ [35] Jin-Chuan Shi, Miao Wang, Hao-Bin Duan, and Shao-Hua Guan. Language embedded 3d gaussians for open-vocabulary scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5333-5343, 2024. 1, 2, 3, 4, 6, 7, 8, 11, 12, 16, 17, 18
296
+ [36] Ayca Takmaz, Elisabetta Fedele, Robert W Sumner, Marc Pollefeys, Federico Tombari, and Francis Engelmann. Openmask3d: Open-vocabulary 3d instance segmentation. arXiv preprint arXiv:2306.13631, 2023. 1
297
+ [37] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P Srinivasan, Jonathan T Barron, and Henrik Kretzschmar. Block-nerf: Scalable large scene neural view synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8248–8258, 2022. 16
298
+ [38] Xiaoyang Wu, Li Jiang, Peng-Shuai Wang, Zhijian Liu, Xihui Liu, Yu Qiao, Wanli Ouyang, Tong He, and Hengshuang Zhao. Point transformer v3: Simpler faster stronger. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4840-4851, 2024. 1
299
+ [39] Yanmin Wu, Jiarui Meng, Hajjie Li, Chenming Wu, Yahao Shi, Xinhua Cheng, Chen Zhao, Haocheng Feng, Errui Ding, Jingdong Wang, et al. Opengaussian: Towards point-level 3d gaussian-based open vocabulary understanding. arXiv
300
+
301
+ preprint arXiv:2406.02058, 2024. 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 15, 16, 17, 18
302
+ [40] Jihan Yang, Runyu Ding, Weipeng Deng, Zhe Wang, and Xiaojuan Qi. Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19823-19832, 2024. 1, 2
303
+ [41] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In CVPR, 2024. 16
304
+ [42] Junbo Zhang, Runpei Dong, and Kaisheng Ma. Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2048–2059, 2023. 2
305
+ [43] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pages 16259-16268, 2021. 1
306
+ [44] Yuhang Zheng, Xiangyu Chen, Yupeng Zheng, Songen Gu, Runyi Yang, Bu Jin, Pengfei Li, Chengliang Zhong, Zeng-mao Wang, Lina Liu, Chao Yang, Dawei Wang, Zhen Chen, Xiaoxiao Long, and Meiqing Wang. Gaussiangrasper: 3d language gaussian splatting for open-vocabulary robotic grasping. IEEE Robotics and Automation Letters, 2024. 1, 2, 3
307
+ [45] Shijie Zhou, Haoran Chang, Sicheng Jiang, Zhiwen Fan, Zehao Zhu, Dejia Xu, Pradyumna Chari, Suya You, Zhangyang Wang, and Achuta Kadambi. Feature 3dgs: Supercharging 3d gaussian splatting to enable distilled feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21676-21685, 2024.
308
+ [46] Xingxing Zuo, Pouya Samangouei, Yunwen Zhou, Yan Di, and Mingyang Li. Fmgs: Foundation model embedded 3d gaussian splatting for holistic 3d scene understanding. IJCV, 2024. 1, 2, 3
309
+ [47] Matthias Zwicker, Hanspeter Pfister, Jeroen van Baar, and Markus H. Gross. EWA volume splatting. In Visualization Conference, 2001. 2
310
+
311
+ # Dr. Splat: Directly Referring 3D Gaussian Splatting via Direct Language Embedding Registration
312
+
313
+ Supplementary Material
314
+
315
+ A Implementation Details
316
+ B Experiment Setup
317
+ C Evaluation Protocols
318
+ D Search-time Experiments
319
+ E Additional Results
320
+
321
+ E.1 Additional results on presented 3D tasks
322
+ E.2 Experiments on the ScanNet200 dataset
323
+ E.3 Experiments on the city-scale dataset
324
+
325
+ F Broader Applications and Limitations
326
+
327
+ # Supplementary Material
328
+
329
+ In this supplementary material, we provide additional details omitted from the manuscript. Sec. A covers implementation and evaluated 3D tasks. Sec. B outlines the experimental setup, and Sec. C explains our Gaussian-friendly evaluation protocol. Sec. D presents search-time experiments, while Sec. E includes qualitative results, annotation analyses, and city-scale dataset evaluations. Sec. F addresses limitations and future directions. We also provide a supplementary video that highlights city-scale experiments.
330
+
331
+ # A. Implementation Details
332
+
333
+ Overall, our method consists of (1) a pre-processing stage that constructs the codebooks in Product Quantization, and pre-training of 3DGS, (2) a training stage that aggregates multiview CLIP embeddings into unified Gaussian-registered embeddings, and (3) Inference stage that directly referring to language-embedded 3D Gaussians for the downstream task.
334
+
335
+ Pre-Training stage. In the pre-processing stage, we need to extract per-patch CLIP embeddings to build PQ codebooks. It consists of a patch extraction step, and a CLIP embedding extraction step. To obtain patches, we utilize the LVIS dataset, a large-scale dataset having ground truth image segmentations. From the segmentation mask given in the LVIS dataset, we identify object regions and crop them into individual image patches. Each cropped patch is then processed and encoded using the OpenCLIP ViT-B/16 model. Based on these predictions, we continue to build PQ codebooks. We utilize FAISS [6] open-source library for our Product Quantization implementation. We use 128 sub-vectors per embedding, with each subvector assigned to one of 256 centroids, yielding an 8-bit index per subvector. This process is illustrated in Fig. S1.
336
+
337
+ 3D Gaussian parameters $\Theta$ [17] are also optimized during
338
+
339
+ the pre-processing stage. We typically care about this initialization of the 3D Gaussians, which can potentially impact the performance of the 3D scene understanding tasks. So, we follow the original 3D Gaussian Splatting method and utilize the optimized 3D Gaussians as our initial parameters. In other words, the pre-training is conducted using the default hyperparameters from 3DGS [17] framework, running 30,000 iterations. Also, we consistently apply this paradigm across different methods for fair comparison. Especially, for LeGaussian [35] that employs mutual training, we disabled 3D Gaussian updates during feature assignment in our experiments.
340
+
341
+ Training stage. Based on the PQ and the initial 3D Gaussian parameters $\Theta$ , we begin the training stage. All competing models and our proposed model are trained and evaluated on a single NVIDIA RTX A6000 GPU to ensure fair performance comparison. The training stage consists of three main steps: extracting pixel-wise CLIP embeddings from training images, the feature aggregation stage, and lastly feature registration stage.
342
+
343
+ Given multi-view images, we extract dense CLIP features assigned to each pixel. To obtain per-pixel CLIP features, we adopt the feature extraction scheme by LangSplat [30], which utilizes SAM [19]. To collect per-patch embeddings, we followed the OpenGaussian framework and used a single-level mask, while LangSplat utilized multi-level masks.
344
+
345
+ Once these CLIP features are extracted for all images, we proceed to the feature registration step. In the feature registration step, we iteratively measure the contribution (weights) of pre-trained Gaussians for each ray assigned to the pixels in the training images, and update the 3D Gaussian embeddings. These weights are determined according to the volume rendering equation, which defines their influence during the color rasterization process (see Sec. 3.2 of the manuscript). After the registration process, we normalize the embeddings by dividing embeddings with L2 norms.
346
+
347
+ Lastly, we register aggregated features to 3D Gaussians. For memory efficiency, we quantize the aggregated Gaussians using the pre-trained PQ codebooks to encode features to indices, a set of 128-channel 8-bit integer indices (Fig. S1). While registration, Gaussians that were never selected in the top-k process are pruned to reduce noise and memory consumption. At the end of the training, we retain a set of assigned Gaussians with 128 8-bit integer indices.
348
+
349
+ Inference stage. Finally, in the inference stage, the PQ-assigned Gaussians from the previous steps are used. By recalling the PQ index list assigned to each 3D Gaussian,
350
+
351
+ ![](images/73ecd80f131809ca6aa8d348f18c70255c26c34e8610c5412fbd1f86222b107a.jpg)
352
+ Figure S1. We illustrate the process of construction and encoding process of Product Quantization we used in Dr. Splat. (left) We first construct by update subvector centroids using CLIP features extracted from large-scale object images. (right) After constructing PQ codebook, centroids for each sub-vectors are kept frozen. For query feature, we divide into sub-vectors and are encoded into centroid indices by finding nearest neighbor.
353
+
354
+ ![](images/83eabaf38b3badd146f4ad963219e6385a9515f9fe5e38ffa91ab9011bcc8f8f.jpg)
355
+
356
+ cosine similarity is computed between the embeddings of a given text query, extracted using the same CLIP encoder, and each 3D Gaussian. Detailed steps are provided in Sec. 3.3 of the manuscript. As the subvector norms do not sum to 1, normalization by the sum of the subvector L2 norms is applied. We can apply this by using the search function in the Faisss library. The resulting similarity scores are then used to perform various 3D tasks, as evaluated in the study. The following sections explain how the computed activation values are applied in each task.
357
+
358
+ # B. Experiment Setup
359
+
360
+ We conduct experiments on three different tasks: 3D object selection task, open-vocabulary 3D object localization task, and open-vocabulary 3D semantic segmentation task. These tasks are closely related to the 3D search as described in Fig. 1 of the manuscript as well as the 3D scene understanding tasks [39].
361
+
362
+ 3D object selection. To evaluate the model's 3D awareness capability, we evaluate a 3D object selection task. We first extract text features from an open-vocabulary text query using the CLIP text encoder [31]. Next, we compare these text features to the 3D Gaussian embeddings by computing the cosine similarity score. By thresholding the similarity, we identify the 3D Gaussians that are relevant to the given text query. The threshold value for each method is determined through a grid search to identify the optimal performance.
363
+
364
+ We use the LeRF-OVS dataset [18] with annotations by LangSplat [30]. As the LeRF-OVS dataset lacks 3D ground truth, we follow the 2D segmentation-based evaluation method proposed by OpenGaussian [39]. This approach evaluates 3D understanding by measuring multi-view 2D seg
365
+
366
+ mentation accuracy between the rendered occupancy mask from the selected 3D Gaussians and the GT object masks. Ground truth segmentation masks are manually annotated corresponding to text queries as described in [30]. We evaluate the IoU and localization accuracy for the metric.
367
+
368
+ Open-vocabulary 3D object localization. Given an open-vocabulary text query, we use the CLIP text encoder [31] to extract a text feature of the given text query. Then, we compute the cosine similarity score between the query text feature and the Gaussian-registered embeddings. Finally, we select highly relevant 3D Gaussians by thresholding the obtained cosine similarities. We set the threshold of each method individually by searching the thresholds that show the best mIoU on the scenes used for evaluation.
369
+
370
+ Open-vocabulary 3D semantic segmentation. We further evaluate our method using the open-vocabulary 3D semantic segmentation task. For a given set of open-vocabulary text queries representing categories, we use the CLIP text encoder to extract a language embedding for each query. We then compute the cosine similarity scores between the 3D Gaussian embeddings and the language features from the given text queries. Using the obtained cosine similarity scores, we assign each 3D Gaussian to the category with the highest the cosine similarity score.
371
+
372
+ # C. Evaluation Protocols
373
+
374
+ Limitations of existing evaluation protocols. Compared to the previous works, such as LERF [18], LEGaussian [35], and LangSplat [30], our method challenges to leverage the 3D Gaussian representation into the 3D scene understanding tasks. Similar to ours, OpenGaussian [39] is a concurrent work that aims at the open-vocabulary 3D semantic seg-
375
+
376
+ ![](images/458c6490e27753d96d2a00e83db8354f8f906a05f01d93d5b7942adc6f505843.jpg)
377
+ OpenGaussian protocol
378
+
379
+ ![](images/3b87d22bf92bf6f36447eca81f218259ad5b3441db40462c7b5eb75a5c20b0d3.jpg)
380
+
381
+ ![](images/7464d2d318e6b88d8f1f070722a077a04e0fab48eab332752a09f9ed8b1ca725.jpg)
382
+
383
+ ![](images/19f36f700209c6aac43e50b8c3529689daff7f6aee96705c5a8ae458221b17ae.jpg)
384
+
385
+ ![](images/b4a7b626c153f09d101ca36fdb0d5d6154ac819c2bc2e4bd6f9fbbe2b1ab90a8.jpg)
386
+ Our protocol
387
+
388
+ ![](images/b1e491d4749098ed2c753c006dff75c5a74c2f7f02e5fd633c26391ef237befd.jpg)
389
+ Trained scenes
390
+
391
+ ![](images/cb09eaf35b5104aa84eed92e611da47cffdcdc6186956f8ef91b1fdffbfc15a3.jpg)
392
+
393
+ ![](images/602b0317572e009136d0c5fe4c5048fb11b2ebfcb62ff70879d0143c07193b0c.jpg)
394
+ Pseudo GT Labels
395
+ Figure S2. We compare the quality of the scenes and pseudo ground truth labels obtained from different evaluation protocols. (top) Trained scenes following OpenGaussian evaluation protocol, which fix the positions and the number of the initial points during training. (bottom) Trained scenes following our evaluation protocol, which dose not require any constraint during training.
396
+
397
+ mentation task as well. However, unlike OpenGaussian, we introduce a new evaluation criterion specialized for the 3D Gaussians, instead of using point cloud-specific evaluations.
398
+
399
+ OpenGaussian [39] computes evaluation metrics directly from 3D Gaussians, using ScanNet [4] ground truth point clouds with semantic labels. It aligns Gaussian centers $\mu$ with dataset points $[x,y,z]$ and keeps both $\mu$ and the number of Gaussians $N$ fixed during parameter optimization. This differs from vanilla 3D Gaussian Splatting [17]. As shown in Fig. S2, their approach introduces significant quality issues, influenced by the evaluation metric. However, the reason behind this optimization trick is related to evaluation.
400
+
401
+ The evaluation by OpenGaussian involves predicting labels for each Gaussian and measuring their alignment with the ground truth point cloud using Intersection over Union (IoU). To compute IoU, the overlap (intersection) and total extent (union) of the points are calculated between the 3D Gaussians' center locations $\{\mu\}$ and the ground truth point clouds at fixed positions. As we discussed, since OpenGaussian does not update the locations of the 3D Gaussians, which is identical to the locations of the 3D ground truth points, they simply count the overlap and union without considering the volumetric properties of the 3D Gaussians.
402
+
403
+ We claim that such an evaluation protocol has two dominant issues. First, by pre-defining the number of Gaussians as well as the center locations of the 3D Gaussians, the optimized 3D Gaussians produce degraded rendering quality as shown in Fig. S2, which is not a practical solution. Second, the aforementioned IoU is calculated only with the number of 3D Gaussians, which does not consider the significance of each Gaussian having different shapes and densities.
404
+
405
+ Our Gaussian-friendly evaluation protocol. To address these limitations, we propose a novel evaluation protocol to compute IoU from 3D Gaussians. Our evaluation protocol follows the original 3D Gaussian Splittings' optimization scheme [17] by updating the location of the 3D Gaussians as well as the number of 3D Gaussians. After we obtain the optimized Gaussians $\Theta$ , these parameters are used to train language-embedded Gaussians. Then, the following question is how we assign the ground truth semantic labels for each Gaussian from the existing per-point semantic annotations provided by the ScanNet dataset [4].
406
+
407
+ Starting from the given $Q$ numbers of point cloud $\mathcal{P} = \{\mathbf{p}_k\}_{k=1}^Q$ and a set of semantic labels $\mathcal{S} = \{\mathbf{s}\}$ , we compose a paired set of points and their labels as $\{\mathbf{p}_k, \mathbf{s}^{\mathbf{p}_k}\}_{k=1}^Q$ , which is provided by the official datasets. We mean
408
+
409
+ sure the Mahalanobis distances between the language-embedded 3D Gaussian parameters $\Phi = \{\theta_i, \tilde{\mathbf{f}}_i\}_{i=1}^N = \{\pmb{\mu}_i, S_i, R_i, \alpha_i, \mathbf{c}_i, \tilde{\mathbf{f}}_i\}_{i=1}^N$ (Sec. 2 of the manuscript) and ground truth point clouds. Note that the Mahalanobis distances is already used in the 3DGS [17] when computing effective alpha values, as stated in the Eq. 1 of the manuscript. We maintain to use this equation to calculate the Mahalanobis distance $\mathbf{d}^{\mathrm{mahal}}(\cdot)$ between volumetric 3D Gaussian $\theta$ and 3D point $\mathbf{p}$ as:
410
+
411
+ $$
412
+ \mathbf {d} ^ {\text {m a h a l}} (\mathbf {p}, \theta) = (\mathbf {p} - \mu) ^ {\top} \boldsymbol {\Sigma} ^ {- 1} (\mathbf {p} - \mu). \tag {7}
413
+ $$
414
+
415
+ Using the Mahalanobis distance, we determine the semantic label of each Gaussian as below:
416
+
417
+ $$
418
+ \mathbf {s} ^ {\theta_ {i}} = \underset {\mathbf {s} \in \mathcal {S}} {\arg \max } \left(\sum_ {\mathbf {p} _ {k} \in \mathcal {P}} \mathbb {1} \left\{\mathbf {s} ^ {\mathbf {p} _ {k}} = \mathbf {s} \right\} \cdot \mathbf {d} ^ {\text {m a h a l}} \left(\mathbf {p} _ {k}, \theta_ {i}\right)\right), \tag {8}
419
+ $$
420
+
421
+ where $\mathbf{s}^{\theta_i}$ is the semantic label of the $i$ -th 3D Gaussian $\theta_i$ , $\mathbb{1}\{\mathbf{s}^{\mathbf{p}_k} = \mathbf{s}\}$ is an indicator function returning 1 only when $k$ -th point label is identical to a semantic label $\mathbf{s} \in S$ . In short, this equation determines the semantic label of each 3D Gaussian from the specific semantic label $\mathbf{s}$ that has the highest sum of the Mahalanobis distances from the ground truth point to each 3D Gaussian.
422
+
423
+ The proposed assignment process enables generally applicable evaluation of 3D Gaussians without any constraints. Fig. S2 shows the quality degradation of the trained scene following the OpenGaussian evaluation protocol, which fixes the position and the number of initial points during training. On the other hand, our generalizable evaluation protocol does not impose any constraints during the training of Gaussians, and it also enables high-quality scene reconstruction, effectively capturing detailed areas.
424
+
425
+ With the obtained $N$ number of pseudo GT 3D Gaussians, we measure IoU by considering the volumetric significance of each Gaussian. We define the significant score $d_{i}$ for each Gaussian $\theta_{i}$ with its scale $\mathbf{s}_i = [s_{ix}, s_{iy}, s_{iy}]^\top$ and opacity $\alpha_{i}$ as $d_{i} = s_{ix}s_{iy}s_{iz}\alpha_{i}$ where $s_{ix}s_{iy}s_{iz}$ denotes a relative ellipsoid volume of a Gaussian $\theta_{i}$ . With the obtained significant scores $\mathbf{d} = [d_1, d_2, \dots, d_N]^\top$ , we calculate IoU of $i$ -th 3D Gaussians for the label as:
426
+
427
+ Intersection $_i = \mathbf{d} \cdot (\mathbf{l}_i^{\mathrm{pred}} \odot \mathbf{l}_i^{\mathrm{gt}})$ ,
428
+
429
+ $$
430
+ \operatorname {U n i o n} _ {i} = \mathbf {d} \cdot \left(\mathbf {l} _ {i} ^ {\text {p r e d}} + \mathbf {l} _ {i} ^ {\mathrm {g t}} - \left(\mathbf {l} _ {i} ^ {\text {p r e d}} \odot \mathbf {l} _ {i} ^ {\mathrm {g t}}\right)\right), \tag {9}
431
+ $$
432
+
433
+ $\mathrm{IoU}_i = \mathrm{Intersection}_i / \mathrm{Union}_i$
434
+
435
+ where $\mathbf{l}_i^{\mathrm{pred}}\in \mathbb{R}^N$ and $\mathbf{l}_i^{\mathrm{gt}}\in \mathbb{R}^N$ are binary vectors indicating whether the predicted/GT label of each Gaussian is the $n$ -th label, $\mathbf{s}^{\theta}$ in Eq. (8). The proposed metric is designed to assign a larger weight to the Gaussians with higher significant scores when measuring IoU, and the significant score endows our metric with volume-awareness.
436
+
437
+ Volume awareness of the proposed metric. To validate that the proposed metric can effectively approximate the
438
+
439
+ volumetric IoU of the 3D scene, we compare our metric with another volume-aware IoU measurement based on voxel representation. Before measuring IoU with voxels, we train 3D Gaussians and generate labeled pseudo-GT 3D Gaussians with Eq. (8). Then we first sample voxels in the scene, and allocate a GT label to each voxel with the labeled 3D Gaussians. We obtain the most likely label of each voxel by defining the label score. The label score $l_{jn}^{\mathrm{voxel}}$ is computed with the opacity $\alpha_{i}$ and the density $\mathcal{N}(\mathbf{v}_j|\boldsymbol{\mu}_i,\boldsymbol{\Sigma}_i)$ of each Gaussian at the position of a voxel $\mathbf{v}_j$ as:
440
+
441
+ $$
442
+ l _ {j n} ^ {\text {v o x e l}} = \sum_ {\theta_ {i} \in \Theta} \alpha_ {i} \cdot \mathbb {1} \left\{\mathbf {s} ^ {\theta_ {i}} = \mathbf {s} \right\} \cdot \mathcal {N} \left(\mathbf {v} _ {j} \mid \boldsymbol {\mu} _ {i}, \boldsymbol {\Sigma} _ {i}\right), \tag {10}
443
+ $$
444
+
445
+ where $\mathbb{1}\{\mathbf{s}^{\theta_i} = \mathbf{s}\}$ is an indicator function determining whether a Gaussian $\theta_{i}$ is assigned to the $n$ -th label and $\operatorname*{det}(\Sigma_i)$ is the determinant of $\Sigma_{i}$ . With the obtained score, we first filter out empty voxels by thresholding with: $p_j = \sum_{n=1}^{L} l_{jn}^{\mathrm{voxel}}$ , where $L$ is the total number of the labels, which can be interpreted as a density of each voxel $\mathbf{v}_j$ . Then we assign a label with the highest score, as the GT label of each voxel. We can also generate predicted labels of voxels using the predicted labels of Gaussians in the same manner, and can evaluate IoU by comparing the GT and predicted labels of the voxels one-to-one.
446
+
447
+ Volume awareness is inherent in this voxel-based IoU evaluation as the voxels explicitly represent the volume of the scene. We show the volume-awareness of our evaluation metric by showing a correlation between our metric and voxel-based metric in Fig. S3. As can be seen, our metric obtains a high correlation with the voxel-based IoU evaluation metric by considering the significant score when calculating IoU. This result shows the necessity of the significant score, which endows our metric with volume awareness.
448
+
449
+ Although the voxel-based IoU evaluation effectively measures volume-aware IoU of the scene, the computational cost to assign labels is too expensive. Each time new labels of Gaussians are predicted, the process of assigning them to the voxels is required for evaluation. Different from voxel-based IoU evaluation, our IoU evaluation protocol has a low computational cost, since there is no repeated assignment process after we once generate the labeled pseudo-GT Gaussians. In other words, our proposed IoU evaluation protocol is a fast and volume-aware evaluation for measuring the IoU of scenes represented by 3D Gaussians.
450
+
451
+ # D. Search-time Experiments
452
+
453
+ In addition to its memory efficiency, Product Quantization significantly enhances search speed. Product quantization can approximate distances between vectors using quantized sub-vectors. By precomputing and storing distances between subvector centroids in a Look-Up Table (LUT), distance calculations between query and database vectors during the
454
+
455
+ <table><tr><td></td><td colspan="2">OpenGaussian evaluation</td><td colspan="2">Our evaluation</td></tr><tr><td></td><td>OpenGaussian</td><td>Ours</td><td>OpenGaussian</td><td>Ours</td></tr><tr><td>IoU &gt; 0.15</td><td>52.7</td><td>54.3</td><td>57.8</td><td>52.6</td></tr><tr><td>IoU &gt; 0.30</td><td>36.4</td><td>39.4</td><td>38.0</td><td>40.3</td></tr><tr><td>IoU &gt; 0.45</td><td>14.7</td><td>15.5</td><td>18.3</td><td>25.6</td></tr><tr><td>3D mIoU</td><td>23.1</td><td>25.0</td><td>25.2</td><td>25.4</td></tr></table>
456
+
457
+ Table S3. We compare different metrics for measuring IoU, proposed by OpenGaussian [39] and our work.
458
+
459
+ ![](images/6f64af1742aacc1743feca5f2a534419f3e7724b9b535c04388107bbaba29fb1.jpg)
460
+ Figure S3. Scatter plot of mIoUs with different mIoU evaluation protocols, measured from eight scenes of the ScanNet [4] dataset. (left) Low correlation between voxel-based metric and our metric without significant score, i.e., same score $d_{i}$ for all Gaussians. (right) High correlation between voxel-based metric and our metric.
461
+
462
+ ![](images/cfe3fc5e8f84d1b81ef0c9dda58baad823dbf029686466c5addd3fe49af48aad.jpg)
463
+
464
+ search phase are reduced to simple indexing operations. The precomputation shifts the complexity of vector distance calculations from $O(ND)$ for a $D$ dimensional vector to $O(N)$ per subvector.
465
+
466
+ Use of LUT can be described as follows. For trained PQ centroids $c_{lj}$ , for $l = 1,2,\ldots,L$ , and $j = 1,\dots,2^k$ , where $L$ is number of sub-vectors, and $k$ refers the number of bits used for indexing each centroids. LUT is stored as follows:
467
+
468
+ $$
469
+ \mathrm {L U T} _ {l} [ i, j ] = \left| \left| c _ {l i} - c _ {l j} \right| \right| _ {2} ^ {2}, \text {w h e r e} \quad i, j \in \{1, 2, \dots 2 ^ {k} \}. \tag {11}
470
+ $$
471
+
472
+ Then for vectors, $\mathbf{v}_1 = [\mathbf{v}_{11},\dots,\mathbf{v}_{1L}]$ , $\mathbf{v}_2 = [\mathbf{v}_{21},\dots,\mathbf{v}_{2L}]$ mapped to indices $j_1 = [j_{11},j_{12},\dots,j_{1L}]$ , and $j_2 = [j_{21},j_{22},\dots,j_{2L}]$ , distance is computed as summation of each retrieved LUT values following each PQ indices:
473
+
474
+ $$
475
+ d \left(v _ {1}, v _ {2}\right) = \sum_ {l = 1} ^ {L} \mathrm {L U T} _ {l} \left(j _ {1 l}, j _ {2 l}\right). \tag {12}
476
+ $$
477
+
478
+ We can also compute the cosine similarity of the vectors, by computing inner products rather than distances, following normalization by the sum of each norm of each sub-vector. Despite the quantization errors, previous literature [14] shows that these errors remain within certain quantization bounds, preserving the correlation between the approximated and actual distances.
479
+
480
+ The scalability and speed of the proposed approach make it particularly suitable for handling complex 3D data. We compared search speed between computing cosine similarity
481
+
482
+ of CLIP features and distance computation in product quantization (see Fig. S4). Under identical hardware conditions, the proposed LUT-based approach demonstrated substantial speed improvements compared to cosine similarity computation between CLIP features: with a subvector size of 128, 64, 32 search performance improved by approximately $2 \times$ , $6.6 \times$ , $14.1 \times$ respectively. These improvements underscore the computational advantages of the proposed method.
483
+
484
+ Considering that rendering-based methods require significantly greater computation compared to 3D data processing, Dr. Splat's approach demonstrates its superior efficiency in search efficiency, and establishes itself as a practical and scalable solution for 3D data search and processing at scale.
485
+
486
+ ![](images/8ddbd00c7520d05b4dee838682aa1361ad18afc32d3cc8bb8a8651f0bc935fd8.jpg)
487
+ Figure S4. We compare inference speed between product quantization LUT based method and ordinary cosine similarity calculation. We calculate average inference time spent over one million feature points. We report mean values over 100 repeated experiments
488
+
489
+ # E. Additional Results
490
+
491
+ In this section, we present additional results that are not shown in the manuscript due to space constraints.
492
+
493
+ # E.1. Additional results on presented 3D tasks
494
+
495
+ We first show more experimental results for the 3D object selection task in Fig. S5, and the 3D localization task in Fig. S6 which are not included in the manuscript due to the space limit. Consistent with our earlier observations, the LangSplat model struggles to learn accurate 3D features. While it occasionally follows feature patterns, it frequently produces significant noise, making it unsuitable for real-world applications such as localization, object grabbing, or 3D image editing. Additionally, we observe persistent spatial bias in the OpenGaussian method, as previously noted, see red cup, plate, or wavy noodles, and bed cases in Fig. S6, it fails to select relevant regions in others. In contrast, our proposed method, which allows direct search and inference in 3D space, consistently identifies favorable localization performance. This demonstrates the robustness and practicality of our approach compared to competing methods.
496
+
497
+ <table><tr><td></td><td>3D mIoU</td><td>IoU &gt; 0.15</td><td>200 classes IoU &gt; 0.3</td><td>IoU &gt; 0.45</td></tr><tr><td>LangSplat-m [30]</td><td>3.9</td><td>7.6</td><td>3.5</td><td>0.8</td></tr><tr><td>LEGaussians-m [35]</td><td>4.0</td><td>7.4</td><td>3.8</td><td>1.4</td></tr><tr><td>OpenGaussian [39]</td><td>14.7</td><td>34.2</td><td>18.9</td><td>11.0</td></tr><tr><td>Ours (Top-20)</td><td>14.6</td><td>36.3</td><td>18.6</td><td>9.4</td></tr><tr><td>Ours (Top-40)</td><td>14.9</td><td>36.0</td><td>19.3</td><td>14.0</td></tr></table>
498
+
499
+ Table S4. We compare evaluate our method with previous methods on the ScanNet-200 dataset.
500
+
501
+ In the Sec.C, we demonstrated that our metric provides superior volumetric alignment compared to existing approaches. To further validate the superiority of our model, we also evaluated its performance using the metric proposed by OpenGaussian. We confirm that our method outperforms even using other evaluation protocols as shown in Table S3
502
+
503
+ # E.2. Experiments on the ScanNet200 dataset
504
+
505
+ The proposed model and its counterparts are designed to operate effectively in open-vocabulary settings. To evaluate performance under more comprehensive open-vocabulary cases, we conducted additional experiments using the ScanNet-200 annotation [34], which extends the ScanNet limited-label of 20 to 200 semantic categories, including tail categories such as armchair and windowsill. These rare classes provide a closer approximation to real-world scenarios and enable a robust assessment of the models' generalization capabilities. For consistency, experiments are conducted using the same scenes as previous benchmarks, following ground truth annotations as described in Sec. C.
506
+
507
+ The results, summarized in Table S4, demonstrate that the proposed model consistently outperforms its counterparts, which highlights superior generalization across diverse object spaces. The results validate the proposed model's ability to excel across both constrained and diverse object spaces, emphasizing its potential for practical application in complex real-world scenarios.
508
+
509
+ # E.3. Experiments on the city-scale dataset
510
+
511
+ The proposed method is further evaluated in a large-scale scenario using the Waymo San Francisco Mission Bay dataset [37], which features expansive spatial contexts. For each scene, the dataset comprises approximately 12,000 images captured by 12 cameras, providing a challenging and diverse testing environment for 3D localization tasks. We select 3 blocks of the scene for large-scale scene tests.
512
+
513
+ We conducted comparisons against the LangSplat-m model for the 3D text-query localization task as shown in Fig. S7. Our evaluation focused on qualitatively assessing how well each model performs in localizing queries within the 3D space. Our method consistently succeeds in localizing diverse text queries, demonstrating robust and accurate performance across various contexts. In contrast, LangSplat-
514
+
515
+ m struggles to make precise predictions, particularly with its 3D Gaussian representations failing to align with the expected ground-truth values. These findings are consistent with our earlier observations regarding the limitations of LangSplat-m's approach.
516
+
517
+ As shown in Fig. S8, we can see that the results reflect not only objects, but also attributes like color to some extent. Additional visualizations of the results can be found in Fig. S9 and the supplementary video, which provides a more comprehensive view of the qualitative differences between the methods. We strongly encourage readers to refer to these supplementary materials for further insights.
518
+
519
+ The differences between the methods become even more pronounced when considering search speed in large-scale scenarios. For example, the Waymo dataset contains over 2.9M Gaussians, with individual images requiring nearly 1M computations per image for over 100 images. The computational efficiency of the proposed method allows it to handle such large-scale data more effectively, highlighting its scalability and practical applicability in real-world scenarios.
520
+
521
+ # F. Broader Applications and Limitations
522
+
523
+ Broader application. The proposed method offers the potential for broader applications across diverse scenarios. Similar to works that explore the application in point cloud [13, 27] and MLP-based methods [32], our approach, using 3DGS, can be extended to support various input modalities, such as click or image queries, by leveraging a self-referencing mechanism. Additionally, integrating our method with Large Language Models (LLMs) could facilitate dialogue-based interactions, allowing users to dynamically issue commands or explore the environment. This integration suggests promising avenues for developing 3D interactive systems that go beyond simple search tasks.
524
+
525
+ Furthermore, applying the method to canonical forms could support dynamic 3D scenes [25, 41]. This adaptation would extend the applicability of our approach beyond static environments, demonstrating its versatility in handling complex, real-world scenarios.
526
+
527
+ Limitation. While our method has demonstrated robust performance across diverse combinations of nouns and adjectives (e.g., "tea in a glass," wavy noodles," and red light" in Fig. S5 and Fig. S9) as well as unfamiliar nouns (e.g., nori," waldo," and safety cone"), without additional training, generalization remains an area for improvement. Exploring additional training techniques for Product Quantization (PQ) could further enhance the method's capabilities. Further exploration of Product Quantization (PQ) training, such as using more diverse datasets or finer-grained query representations, could enhance adaptability across varied contexts.
528
+
529
+ Despite its advantages, some limitations of the proposed method have also been identified, particularly related to
530
+
531
+ ![](images/a9ab72fbdb98b32217ab87e02869d8eb60d0fcf8a5fc44185418457d875caba2.jpg)
532
+ Figure S5. We compare 3D object selection task in LeRF dataset with Langsplat [30], and OpenGaussian [39]. We visualize selected Gaussians with high similarity to query text. Langsplat shows noisy, 3D uncorrelated activations, and Opengaussian often show false positive activations, while our method show accurate localization showing superiority on generalizability.
533
+
534
+ ![](images/19de9f9a6e2ae1a6d410358dda1edf0b79932fa5fa88aa8d1dd7aa22e83ac418.jpg)
535
+
536
+ CLIP features. Occasionally, related but distinct objects are simultaneously activated for a given query. For instance, the query "red apple" might activate non-red apples or unrelated red objects. This stems from CLIP's semantic associations and could be mitigated with post-processing techniques like re-ranking to improve query specificity.
537
+
538
+ Lastly, similar to previous methods [30, 35, 39], ours also requires to set an appropriate threshold. In this study, we utilize a fixed similarity threshold employed a fixed similarity threshold across all scenes, ensuring stable and reproducible results. However, optimizing thresholds for specific scenarios or implementing dynamic adjustments could further refine localization accuracy in diverse environments.
539
+
540
+ ![](images/e49f345c8dd1e27a661195068e70c20802342ec37063a45500c9b4fa6119e2ed.jpg)
541
+ Figure S6. We compare 3D object localization results between competing methods [30, 35, 39] with Dr. Splat. 3DGS with similarity above the threshold (0.562) are shown in yellow, while those below the threshold are displayed in blue. Greenbox indicates successful localization, while red boxes indicate missing or false positive in 3D localization.
542
+
543
+ ![](images/cd0eb7bd032d76da4968ceb6aae714e7df34fbb5bbae5ea8a7fc841839395f30.jpg)
544
+
545
+ ![](images/23fe4e480398c17c0e5b03d16d45283e02f92027e307749685a6d18d3e692ed6.jpg)
546
+
547
+ ![](images/699cb676364e1732d4cc348470455f604666505b18b8be48e37566faebb15ee0.jpg)
548
+
549
+ ![](images/281752db43a736df5a4ec1ef608326a359e4b392151eaba2598010b3822976f1.jpg)
550
+ 3DGS scene
551
+
552
+ ![](images/8c085fc8a4626123008020a8cdb1be57c0b427f332e77cdb4a95bf387712eb7b.jpg)
553
+ LangSplat-m
554
+
555
+ ![](images/b1dfa60e9e391d07c675e0df2c136f961986efdee6521bb9c4a6cb66f39496d6.jpg)
556
+ Dr. Splat (ours)
557
+ Figure S7. We compare 3D localization between rendering based Langsplat-m with registration based Dr. Splat. While LangSplat-m shows randomly distributed activations, fail to localize the target, ours model successfully detect the target in both cases.
558
+
559
+ ![](images/27cd10a3efadc0df28fcec12be58073ebee0ef917919f234fbe2dff33edc33b8.jpg)
560
+
561
+ ![](images/cbf54a518f4160ab0b54ef3c33705d031050bc9bc504f5c2cfac02e122354db6.jpg)
562
+ 3DGS scene
563
+
564
+ ![](images/4c101c3eb77bc8acded4911c996abb632917bb162dddcb1258c73ffe2a09c87c.jpg)
565
+ "green light"
566
+
567
+ ![](images/eff4643f5fb1d7693f45d683b1a4638507d29509fd7a888c48d2e79da461055e.jpg)
568
+ Activation of Dr. Splat
569
+
570
+ ![](images/3b496f3f6ac93d35f1518b204f00a6e7543d4d71de627c89785d1f7caa77b870.jpg)
571
+ "red light"
572
+
573
+ ![](images/87cdbd2292bb647b484ab2c3eb03696ccf40f92050eb5bf2f57b8ac75ac7d2b7.jpg)
574
+ Figure S8. Visualization of 3d localization in different attributes (e.g., color) given as query. The result highlights the ability of Dr. Splat (ours) to effectively distinguish attributes such as "green light" and "red light" in scenes based on text queries, demonstrating the robustness in open-vocabulary understanding.
575
+
576
+ ![](images/4101047f844d5afe4f5da2ee20d1beaabde564fca0efad5d5981c39d01ccd03a.jpg)
577
+ "ree
578
+
579
+ ![](images/400c502910154b2463e10816f4630005c5ceef7416aba180e29e183a7c3110f0.jpg)
580
+ Figure S9. Qualitative results of Dr. Splat on 3D localization task in city-scale data showcasing Dr. Splat's generalization performance across diverse text queries includes various target objects and concepts.
581
+
582
+ ![](images/55cd5743015be46d861985eb91dc32d3055f84733093ad03480cfc1e25c5f3b3.jpg)
2502.16xxx/2502.16652/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8026dc69ec9c69a96ad839d5d2375d711a9e43abeac3e378d10366ee9b465750
3
+ size 1793443
2502.16xxx/2502.16652/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16681/b6838fbf-d279-4d07-b092-0bd11a464037_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:931a0da6e3af0e2a4530c627c135eb5a656a9dd21b9dffe24ea23d093f78cf11
3
+ size 3212806
2502.16xxx/2502.16681/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16681/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d09daabdce6a42aa6b22512bbe144b9de187fd9290a910ae5a1c3f21961da0
3
+ size 3023363
2502.16xxx/2502.16681/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16707/8aac8171-3975-48d2-ba3c-89dd50963261_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa890d965abb7e0cfa7e9fd4e5a5b93db215ca02265eb33e8a35c6f2504b8b9f
3
+ size 26322876
2502.16xxx/2502.16707/full.md ADDED
@@ -0,0 +1,915 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Reflective Planning: Vision-Language Models for Multi-Stage Long-Horizon Robotic Manipulation
2
+
3
+ Yunhai Feng<sup>1</sup> Jiaming Han<sup>2</sup> Zhuoran Yang<sup>3</sup> Xiangyu Yue<sup>2</sup> Sergey Levine<sup>4</sup> Jianlan Luo<sup>4†</sup>
4
+
5
+ # Abstract
6
+
7
+ Solving complex long-horizon robotic manipulation problems requires sophisticated high-level planning capabilities, the ability to reason about the physical world, and reactively choose appropriate motor skills. Vision-language models (VLMs) pretrained on Internet data could in principle offer a framework for tackling such problems. However, in their current form, VLMs lack both the nuanced understanding of intricate physics required for robotic manipulation and the ability to reason over long horizons to address error compounding issues. In this paper, we introduce a novel test-time computation framework that enhances VLMs' physical reasoning capabilities for multi-stage manipulation tasks. At its core, our approach iteratively improves a pretrained VLM with a "reflection" mechanism - it uses a generative model to imagine future world states, leverages these predictions to guide action selection, and critically reflects on potential suboptimalities to refine its reasoning. Experimental results demonstrate that our method significantly outperforms several state-of-the-art commercial VLMs as well as other post-training approaches such as Monte Carlo Tree Search (MCTS). Videos are available at https://reflect-vlm.github.io.
8
+
9
+ # 1. Introduction
10
+
11
+ Complex multi-stage manipulation tasks remain a fundamental challenge in robotics (Luo et al., 2024a; Kroemer et al., 2020; Cui & Trinkle, 2021), particularly when they require reasoning about sophisticated physical interactions and their consequences over long time horizons. These tasks often involve intricate sequences of actions where each step
12
+
13
+ †Project Advisor ¹Cornell University ²The Chinese University of Hong Kong ³Yale University ⁴University of California, Berkeley. Correspondence to: Yunhai Feng <yunhaif@cs.cornell.edu>, Xiangyu Yue <xyyue@ie.cuhk.edu.hk>, Jianlan Luo <jianlanluo@eecs.berkeley.edu>.
14
+
15
+ Copyright 2025 by the author(s).
16
+
17
+ ![](images/1f81b18db903fb89eee999a0aa0ec90c210db79344e20b365134c509b258a1b9.jpg)
18
+ Figure 1. Reflective planning. Our method uses a VLM to propose actions and a diffusion dynamics model to imagine the future state of executing the plan. The imagined future helps the VLM reflect the initial plan and propose better action.
19
+
20
+ must account for physical constraints and potential consequences, making them particularly challenging for planning systems. Success requires not only understanding the immediate effects of actions but also their long-term implications, the ability to adapt plans based on execution outcomes, and generalizing to novel scenarios.
21
+
22
+ While classical planning approaches, such as task and motion planning (TAMP) (Kaelbling & Lozano-Pérez, 2011; Garrett et al., 2020a), can in principle address such problems, their reliance on predefined symbolic representations and explicit state estimation makes them difficult to apply in settings without known models that require visual perception (Driess et al., 2020; Wang et al., 2021). This limitation has motivated the search for more flexible approaches to robotic planning. Recent advances in vision-language models (VLMs) have shown remarkable capabilities in processing visual scenes and natural language instructions by leveraging internet-scale knowledge (Chen et al., 2023; Bai et al., 2023; OpenAI, 2024a; Google, 2024; Liu et al., 2023). These models can effectively parse complex visual environments and comprehend high-level task descriptions ex
23
+
24
+ pressed in natural language, making them promising candidates for robotic planning problems (Driess et al., 2023; Brohan et al., 2023b;a; Shi et al., 2024; Liu et al., 2024a). However, state-of-the-art VLMs still struggle with complex physical reasoning tasks, and this limitation becomes particularly pronounced when precise physics concepts and long-horizon planning are involved (Gao et al., 2024; Chen et al., 2024).
25
+
26
+ In this paper, we study how to effectively leverage VLMs' Internet-scale knowledge while addressing their limitations in physical reasoning and long-horizon planning. We focus on a challenging class of robotic manipulation problems that involve sequentially manipulating interlocking objects to achieve desired configurations, as illustrated in Fig. 5. These tasks are particularly difficult as they require precise understanding of physical constraints, careful reasoning about action sequences, and the ability to plan over extended horizons while maintaining physical feasibility at each step.
27
+
28
+ To address these challenges, we present a novel test-time computation framework that significantly enhances VLMs' capabilities for multi-stage robotic manipulation tasks. The key insight of our method, ReflectVLM, is that by combining VLMs with a reflection mechanism and targeted post-training, we can create a system that better understands physical constraints and their implications for action planning. We use the term "reflection" to refer to a process where a VLM iteratively refines its decisions by critically examining the predicted outcomes of its proposed actions, akin to self-critique methods in large language models (Huang et al., 2024; Wang et al., 2023; Madaan et al., 2024). Our approach introduces two key components: (1) a look-ahead mechanism that uses a diffusion-based dynamics model to generate visual predictions of future states resulting from planned actions, and (2)a reflection process that allows the VLM to critique and refine its planned actions by analyzing these predicted outcomes. This combination of visual prediction and iterative refinement allows the VLM to develop a more sophisticated understanding of physical constraints and improve its decision-making capabilities without requiring extensive retraining.
29
+
30
+ Experimental results demonstrate that our approach significantly outperforms both the latest commercial state-of-the-art VLM models and traditional planning approaches like Monte Carlo Tree Search (MCTS) on this class of problems. Notably, our method achieves superior performance compared to post-training techniques such as supervised fine-tuning (SFT) while using the same amount of labeled data and maintaining computational efficiency. The success of our approach suggests that enhancing VLMs with structured reasoning mechanisms at test time can be a powerful strategy for improving their performance on physically-grounded tasks.
31
+
32
+ Our primary contribution is the mentioned test-time compu
33
+
34
+ tation framework that enhances VLMs' physical reasoning capabilities for multi-stage manipulation tasks. Through extensive experiments, we demonstrate that our approach not only outperforms existing methods but also maintains computational efficiency. Importantly, while we demonstrate our framework's effectiveness on manipulation tasks, it is designed to be general and can be readily extended to other domains requiring visual understanding and sequential decision-making. This generality suggests broader applications in robotics and autonomous systems where physical reasoning and long-horizon planning are essential.
35
+
36
+ # 2. Related Work
37
+
38
+ Our framework incorporates a VLM with the reflection mechanism to solve long-horizon robotic planning problems. We therefore survey reflection techniques in the broader context in large models, VLM for robotic planning, as well as existing techniques for solving robot task and motion planning.
39
+
40
+ # 2.1. Reflection
41
+
42
+ Recent work has shown that large language models can benefit from reflection mechanisms - processes where models iteratively refine their outputs through self-critique and revision (Renze & Guven, 2024; Shinn et al., 2024; Pan et al., 2023; Madaan et al., 2024; Asai et al., 2023; Wang et al., 2023; Huang et al., 2024). For example, Madaan et al. (2024) introduced an iterative refinement approach where models critique and improve their own outputs through self-feedback. Chain-of-thought prompting and its variants (Wei et al., 2022; Wang et al., 2022; Yao et al., 2024) demonstrated that guiding models to show their reasoning process leads to better performance. Similarly, Cheng et al. (2024); Yu et al. (2025) extended such reflection mechanisms to vision-language models.
43
+
44
+ However, these approaches focus primarily on language-only or visual comprehension tasks, without addressing physical reasoning or robotics applications. Our work extends reflection to long-horizon robotic planning by incorporating a diffusion model that generates imagined future visual states. This allows the VLM to reflect on and revise its plans based on concrete visual predictions rather than relying solely on symbolic reasoning.
45
+
46
+ # 2.2. VLM for Robotic Planning
47
+
48
+ In robotics, several recent works have explored using VLMs for planning (Driess et al., 2023; Brohan et al., 2023b;a; Hu et al., 2023; Huang et al., 2023; Belkhale et al., 2024; Nasiriany et al., 2024; Liu et al., 2024a; Shi et al., 2024; Wake et al., 2024). However, these approaches either rely on symbolic state representations or make decisions in a single-step manner based only on current observations, without explicitly reasoning about future consequences or utilizing
49
+
50
+ ![](images/fe75e946f6ef87c97111d3af6261bd00dd5a2f8572a1aa699a362d5b223e3c3a.jpg)
51
+ Figure 2. Training data generation. Training data for the reflection mechanism is collected by relabeling the rollouts. For each timestep, two training examples are generated: (Q1, A1) for action proposal and (Q2, A2) for reflection. $H$ is the imagination horizon, and $h$ is the history length. $a_{t}^{*}$ is the action label given by the expert policy.
52
+
53
+ reflection mechanisms.
54
+
55
+ While ReplanVLM (Mei et al., 2024b) and GameVLM (Mei et al., 2024a) use VLMs to replan robot actions based on execution feedback, they still rely on symbolic state representations rather than visual imagination of future states. Black et al. (2023) utilized a diffusion model to generate future visual states and executed them with a low-level goal-conditioned policy, but did not leverage these predictions for plan reflection or revision. Du et al. (2023) combines a VLM with video prediction for beam search, but suffers from prediction error accumulation and struggles with physics-based reasoning tasks.
56
+
57
+ Our framework addresses these limitations by enabling VLMs to imagine and evaluate potential future states through a diffusion-based dynamics model. This allows for sophisticated multi-step planning while maintaining the benefits of VLMs' pre-trained visual-language understanding. The reflection mechanism further enables the VLM to critique and refine its plans based on these imagined futures, leading to more robust long-horizon manipulation.
58
+
59
+ # 2.3. Robotic Task and Motion Planning
60
+
61
+ Robotic Task and Motion Planning (TAMP) has been extensively studied (Kaelbling & Lozano-Pérez, 2011; Garrett et al., 2020a,b). Traditional approaches often combine symbolic planning with motion planning but struggle with real-world physical interactions and visual inputs. Learning-based methods (Wang et al., 2021; Driess et al., 2020) show promise in handling uncertainty and complex dynamics but typically require significant task-specific engineering.
62
+
63
+ Our approach bridges this gap by leveraging VLMs' broad knowledge while adding structured physical reasoning through visual imagination and reflection. This enables robust long-horizon planning without requiring extensive task-specific engineering or large amounts of training data.
64
+
65
+ # 3. Preliminaries and Problem Statement
66
+
67
+ We formulate the multi-stage robotic manipulation planning problem as a partially observable Markov decision process (POMDP), defined by the tuple $(\mathcal{S},\mathcal{A},\mathcal{T},\mathcal{O},\mathcal{Z})$ . Here, $\mathcal{S}$ is the state space containing the full physical state of the environment, including object poses and physical properties; $\mathcal{A}$ is the action space consisting of high-level manipulation primitives $\{\text{pick up, insert,reorient, put down}\} \times \{\text{objects}\}$ , assuming a failure rate $\epsilon$ for each primitive; $\mathcal{T}(s_{t+1}|s_t,a_t)$ represents the transition dynamics capturing physical interactions; $\mathcal{O}$ is the observation space of RGB images; and $\mathcal{Z}(o_t|s_t)$ is the observation model mapping states to images.
68
+
69
+ Given a goal state $s_g$ , the objective is to find a policy $\pi$ that generates a sequence of actions to reach $s_g$ . Due to partial observability, the policy only has access to image observations, taking the form $\pi(a_t|I_t, I_g)$ where $I_t$ is the current observation and $I_g$ is the goal image. The policy is instantiated as a VLM agent $\pi_{\mathrm{VLM}}$ , which takes a multi-modal input of images and text, and generates action primitives in the form of text.
70
+
71
+ Our framework includes a pre-training phase and a post-training phase. The post-training phase builds on the framework of interactive imitation learning (Ross et al., 2011; Kelly et al., 2018), which learns a policy by interacting with environment and receiving expert supervision in real-time. Thus under the standard assumption, we assume access to an interactive expert policy $\pi_E$ that generates near-optimal actions $a^* = \pi_E(s)$ for any state $s$ at training time. In this paper, we instantiated such an expert policy with access to the full state of the environment to generate optimal actions, though it could be obtained via other formats as well, e.g., human demonstrations. However, the VLM policy will only have access to image observations.
72
+
73
+ # 4. Reflective Planning with Vision Language Models
74
+
75
+ To address the challenges of physical interaction and long-horizon reasoning, we present a framework that incorporates VLMs with reflective planning. Our approach combines two key components: (1) a diffusion-based dynamics model that enables the VLM to imagine and evaluate future states, and (2) an interactive learning mechanism that allows the VLM to reflect on and revise its decisions based on these imagined outcomes. As shown in Fig. 1, these components work together to enable more robust manipulation planning while preserving the benefits of pre-trained VLMs.
76
+
77
+ # 4.1. Interactive VLM Policy Post-Training
78
+
79
+ While VLMs can generate actions based on visual inputs, they may hallucinate physically implausible solutions without actual interaction experience. To overcome this limitation and enable long-horizon reasoning, we introduce an interactive learning algorithm that teaches the VLM to reflect on and improve its decisions through direct interaction with the physical environment. This process further enhances a base VLM policy, which is initially trained on a fixed set of expert demonstrations. Similar to DAgger (Ross et al., 2011), we iteratively collect new data by rolling out the VLM policy in the environment and finetune the VLM policy with the aggregated data. As formulated in Algorithm 1, $N$ trajectories are collected in each iteration. At each timestep, we generate a learner action $a_{t}^{\dagger}$ by prompting the VLM with the images of the goal and current states, as well as an expert action $a_{t}^{*}$ from the oracle policy. The pairs $((I_g,I_t),a_t^*)$ are then added to the dataset for finetuning. To facilitate convergence, we execute the learner action $a_{t}^{\dagger}$ with a probability of $p$ and the expert action $a_{t}^{*}$ with a probability of $1 - p$ , instead of always following the actions from the learner.
80
+
81
+ To generate training data for reflection, we can simply relabel a trajectory after it is terminated, as also illustrated in Fig. 2. Specifically, the image $I_{t + H}$ , which is a future observation following the action sequence $a_{t:t + H - 1}$ , is added to the context for reflection at timestep $t$ , and the VLM is still supervised to output the same expert action $a_t^*$ . Intuitively, this image provides additional information about the effect of executing the action sequence as a feedback, which can be leveraged by the VLM to decide whether the initially proposed action sequence leads to a promising future state.
82
+
83
+ In essence, we are generating two forms of question answering examples from interaction with the environment. The first is to predict an optimal action given images of the goal and current state, and the second is to reflect and revise an initial action sequence proposal by looking into an additional future image. Since a VLM can flexibly take any text and images as input, these two tasks can be handled by a single VLM with two different prompt templates, as
84
+
85
+ Algorithm 1 Interactive VLM Post-Training
86
+ Require: initial state distribution $\rho_0$ , goal state distribution $\rho_{g}$ , number of iterations $K$ , number of trajectories per iteration $N$ , episode length $T$ , imagination horizon $H$ , expert policy $\pi_{E}$ , expert demonstrations $\mathcal{D}^*$
87
+ 1: train base policy $\pi_{\mathrm{VLM}}$ on $\mathcal{D}^*$
88
+ 2: $\mathcal{D} \gets \mathcal{D}^*$
89
+ 3: for $i \gets 1$ to $K$ do
90
+ 4: $\mathcal{D}_{i} \gets \emptyset$
91
+ 5: // rollout out policy $\pi_{\mathrm{VLM}}$ to collect data $\mathcal{D}_{i}$
92
+ 6: for $n \gets 1$ to $N$ do
93
+ 7: $s_0 \sim \rho_0$ ; $I_0 \gets \mathcal{Z}(s_0)$
94
+ 8: $s_g \sim \rho_g$ ; $I_g \gets \mathcal{Z}(s_g)$
95
+ 9: for $t \gets 0$ to $T - 1$ do
96
+ 10: $a_t^\dagger \sim \pi_{\mathrm{VLM}}(I_g, I_t)$ ; $a_t^* \sim \pi_E(s_g, s_t)$
97
+ 11: $a_t \gets a_t^\dagger$ if random() < p else $a_t^*$
98
+ 12: $s_{t+1} \gets \mathcal{T}(s_t, a_t)$ ; $I_{t+1} \gets \mathcal{Z}(s_{t+1})$
99
+ 13: end for
100
+ 14: $\mathcal{D}_i \gets \mathcal{D}_i \cup \{((I_g, I_t), a_t^*)\}_{0 \leq t < T}$
101
+ 15: $\mathcal{D}_i \gets \mathcal{D}_i \cup \{((I_g, I_t, I_{t+H}, a_{t:t+H-1}), a_t^*)\}_{0 \leq t < T}$
102
+ 16: end for
103
+ 17: $\mathcal{D} \gets \mathcal{D} \cup \mathcal{D}_i$
104
+ 18: finetune $\pi_{\mathrm{VLM}}$ on $\mathcal{D}$
105
+ 19: end for
106
+
107
+ summarized in Fig. 2. See App. E for full prompts, and App. D.1 for detailed VLM architecture.
108
+
109
+ The VLM is trained to generate actions aligned with expert actions in the dataset with a cross entropy loss:
110
+
111
+ $$
112
+ \begin{array}{l} \min _ {\pi_ {\mathrm {V L M}}} \mathbb {E} _ {\mathcal {D}} \left[ \mathcal {L} _ {\mathrm {C E}} \left(\pi_ {\mathrm {V L M}} ^ {\text {p r o p o s e}} \left(a _ {t} \mid I _ {g}, I _ {t}\right), a _ {t} ^ {*}\right) \right. \tag {1} \\ \left. + \mathcal {L} _ {\mathrm {C E}} \left(\pi_ {\mathrm {V L M}} ^ {\text {r e f l e c t}} \left(a _ {t} \mid I _ {g}, I _ {t}, I _ {t + H}, a _ {t: t + H - 1}\right), a _ {t} ^ {*}\right) \right]. \\ \end{array}
113
+ $$
114
+
115
+ # 4.2. Diffusion Dynamics Model
116
+
117
+ A key component in reflective planning is predicting future states accurately when evaluating potential action sequences. While our interactive learning mechanism enables the VLM to learn from physical interactions, we need an additional capability during inference - the ability to imagine and evaluate hypothetical futures without actually executing actions in the environment. To address this, we develop a diffusion-based dynamics model (DDM) that efficiently generates predicted visual observations by conditioning on the current observation and a proposed action sequence. This allows the VLM to simulate the consequences of its actions before committing to them.
118
+
119
+ Building on advances in diffusion-based generative models (Rombach et al., 2021; Ho et al., 2020; Song et al., 2021), we formulate the forward dynamics prediction as an image-to-image translation task. Our diffusion dynamics model takes the current observation $I_{t}$ and action $a_{t}$ as input to
120
+
121
+ ![](images/58111c66a5badaeea96ae67453f9ac224608b0a9ee0cac77ed7eb85b72b158a5.jpg)
122
+ Figure 3. Architecture of Diffusion Dynamics Model, which consists of a latent encoder, text encoder, Diffusion UNet and latent decoder. The latent encoder and text encoder are frozen during training, while Diffusion UNet and latent decoder are finetuned on our task data. $\mathcal{N}$ : random noise.
123
+
124
+ predict the next observation $I_{t + 1}$ . Rather than training a diffusion model from scratch, which would require substantial computational resources and training data, we leverage the pretrained Instructpix2pix model (Brooks et al., 2022) that has been trained on large-scale image editing datasets as our base model.
125
+
126
+ Data. We curate a dataset for training the diffusion model. To encourage broader coverage of visisted states, the data collection policy is a noised version of the oracle policy. Due to the difficulty of this task, we also include a few test data points to improve the fidelity and accuracy of the DDM. Details can be found in App. D.2.
127
+
128
+ Architecture. The model architecture is shown in Fig. 3. For the input $(I_{t},a_{t})$ , we first encode them into latent representation $z_{t}$ and $z_{a_t}$ with pretrained latent encoder and text encoder. Then we feed $z_{t}$ , a sampled noise $\mathcal{N}$ and the action condition $z_{a_t}$ into the diffusion UNet for de-noising. Finally, we decode the predicted $z_{t + 1}$ into a future observation $I_{t + 1}$ with a latent decoder.
129
+
130
+ Training. The training of DDM consists of two separate phases: UNet training and decoder training. The UNet training phase is to learn transformations from $z_{t}$ to $z_{t+1}$ conditioned on $z_{a_{t}}$ , while the latent decoder training is to adapt the pretrained VAE models into our task domain because our task requires precise reconstruction of small pieces on the table. Since we keep the latent encoder frozen, we can train the two phases in parallel.
131
+
132
+ # 4.3. Reflective Planning
133
+
134
+ With the VLM policy trained via interactive learning and the diffusion model serving as a dynamics proxy to imagine future outcomes, we now introduce our reflective planning
135
+
136
+ mechanism for decision making at inference time. Alg. 2 shows the detailed process. We use $\tilde{I}$ and $\tilde{a}$ to denote the generated image and action, which are not actually observed or executed in the environment. To get the future image after $H$ steps, where $H$ is the planning horizon, we perform $H$ iterations of action proposal and diffusion generation. At each iteration, the VLM policy is prompted by the goal image $I_{g}$ and the generated image $\tilde{I}_{t + k}$ at the previous iteration to propose an action $\tilde{a}_{t + k}$ . The diffusion model $\tilde{\mathcal{T}}$ then generates the future image $\tilde{I}_{t + k + 1}$ conditioned on the previous image $\tilde{I}_{t + k}$ and the action $\tilde{a}_{t + k}$ . For the first iteration, the input image $\tilde{I}_t$ is just the current observation $I_{t}$ . After this process of imagination, the generated future image $\tilde{I}_{t + H}$ and the plan $\tilde{a}_{t:t + H - 1}$ are concatenated with the goal and current observation, and fed into the VLM policy for reflection. The VLM policy will then output the final action $a_{t}$ to be executed. Again, action proposal and reflection are performed by the same VLM policy with two different prompt templates, as indicated by the superscripts "propose" and "reflect".
137
+
138
+ # Algorithm 2 Reflective Planning (Inference)
139
+
140
+ Require: current image $I_{t}$ , goal image $I_{g}$ , imagination horizon $H$
141
+
142
+ 1: $\bar{I}_t\gets I_t$
143
+ 2: for $k \gets 0$ to $H - 1$ do
144
+ 3: $\tilde{a}_{t + k}\gets \pi_{\mathrm{VLM}}^{\mathrm{propose}}(I_g,\tilde{I}_{t + k})$
145
+ 4: $\tilde{I}_{t+k+1} \gets \tilde{\mathcal{T}}(\tilde{I}_{t+k}, \tilde{a}_{t+k})$
146
+ 5: end for
147
+ 6: $a_{t}\gets \pi_{\mathrm{VLM}}^{\mathrm{reflect}}(I_{g},I_{t},\tilde{I}_{t + H},\tilde{a}_{t:t + H - 1})$
148
+ 7: Output: $a_{t}$
149
+
150
+ # 5. Multi-Stage Robotic Manipulation Planning Tasks
151
+
152
+ Inspired by Luo et al. (2024b), we procedurally generated a suite of multi-stage long-horizon manipulation tasks that require understanding of physical interactions and reasoning about the effects of long-term action sequences. The task is initialized with a board and a set of small pieces randomly placed on a table. The goal is to fully assemble the board by inserting the pieces into the board one by one. Examples of the initial and goal configurations are shown in Fig. 5. Detailed task generation process is included in App. A. Notably, most tasks include inter-locking pieces so that they can be inserted into the board only in a specific order. This requires strategically choosing the object to be manipulated at each step and inferring possible interaction between this object and the other objects already in the board. As an example, Fig. 5(b) shows the dependencies between the pieces in one of the tasks. The interlocking feature further necessitates the agent's ability to replan, enabling it to recover from failures caused by previous mistakes or bad initialization.
153
+
154
+ ![](images/191fcd1521b18f7ffd2d26518428f76336e352cd666931e8434e82e51e13bb9f.jpg)
155
+ Figure 4. Filmstrip of our method solving a complicated assembly task. Frames are indexed by timestep. The goal image is in the top-left corner (with a green border). Each frame is the observation after executing the action (in black) above it. The other action in gray is the original action proposed by the VLM if it is revised after reflection. We highlight the reflection process at timestep 15, where the VLM first proposes an action to pick up the purple brick, but after reflection, it chooses to pick up the yellow brick instead as the generated future state (red-bordered image) shows little progress towards the goal.
156
+
157
+ ![](images/5ede940bb1096191292566158dc312c0236dab89af03172d511c134fa94892df.jpg)
158
+ (a)
159
+
160
+ ![](images/1a7c781469ad16742ac30fc8f1250c87361a2087e9a682436f90bc1f20cc26b4.jpg)
161
+ (b)
162
+ Figure 5. Task examples. (a) Generated multi-stage manipulation tasks with interlocking pieces. Top: initial configurations. Bottom: goal configurations. See App. B for more examples. (b) The graph shows the dependencies between the objects in the blue assembly board on the left. Each node represents an object, and each directed edge indicates the predecessor object should be assembled before the successor object.
163
+
164
+ We focus on the high-level planning of this long-horizon manipulation task. We define a set of actions in the form of “[act] [obj]", where [act] $\in$ {pick up, insert, reorient, put down} is an action primitive, and [obj] denotes the object to be manipulated. Specifically, “pick up” grasps a piece that is not in hand
165
+
166
+ and picks it up. It can then be inserted into the board using the "insert" action, or put back on the table using "put down". By invoking "reorient", the object in hand can be reoriented with the black fixture if necessary, so that it is in a suitable pose for insertion. Each action primitive is implemented as a rule-based script controller; however, integrating other low-level controllers, such as learning-based policies like behavior cloning, is also possible. We also designed an expert policy for the mentioned motor primitives, see App. C for implementation details.
167
+
168
+ # 6. Experiments
169
+
170
+ Our experiments evaluate the effectiveness of our method and analyze its key components. We aim to answer three key research questions. First, how well does our method perform in long-term planning, particularly when handling complex physical interactions? Second, how effectively does our method generalize across different object configurations and types, while maintaining the ability to reason and plan reactively in dynamic environments? Third, what is the impact of the reflection mechanism on the overall performance of our method? To address these questions, we conduct comprehensive experiments comparing ReflectVLM against: (1) state-of-the-art VLM models tested in zero-shot fashions, (2) model-based planning approaches like MCTS, and (3)
171
+
172
+ ablation studies examining the reflection mechanism. In this section, we first describe our experimental setup, followed by quantitative results and qualitative analysis.
173
+
174
+ # 6.1. Experiment Setup and Policy Training
175
+
176
+ To evaluate the generalization capabilities of different models, we generate two distinct task sets: a training set using the procedure described in Sec. 5, and a separate evaluation set containing previously unseen configurations. The evaluation tasks are specifically designed to test generalization across varying object configurations, colors, and spatial arrangements. We particularly emphasize challenging scenarios that require sophisticated physical reasoning and multi-step planning. For instance, some tasks begin with objects in physically obstructing positions that prevent direct task completion - requiring the policy to first remove the obstructing pieces and then develop a new plan for the original objective. Specifically, the training set contains 1000 different tasks, each generated task was randomized to five different initial spatial arrangements, these tasks are used to pre-train the VLM policy. At each iteration of posttraining, we randomly sample 200 out of these 1000 tasks to further train the VLM policy with the reflection mechanism. The evaluation set contains 100 different tasks that are unseen in the training set.
177
+
178
+ As mentioned in Sec. 3, our method utilizes an oracle policy operating in the environment's symbolic state space to generate expert demonstrations for training. This oracle achieves a $97\%$ success rate across tasks, but importantly, it operates with access to ground-truth state information. In contrast, our VLM policy must rely solely on visual observations. While alternative data sources like human demonstrations could be used for training, we chose this oracle-based approach to systematically study our method's capabilities under controlled conditions.
179
+
180
+ During the policy pre-training phase, we utilize the oracle policy to provide action labels, then finetune an LLaVa-1.5-13B model (Liu et al., 2023; 2024b) with standard supervised learning loss. This pre-training used 5,000 expert demonstrations (1,000 unique tasks $\times$ 5 initial configurations per task). In the post-training phase, we use the same oracle policy to further train the VLM policy from the previous stage using the procedure described in Alg. 1. For each iteration of post-training, we collect 1k trajectories by rolling out the VLM policy in the environment to generate examples for fine-tuning. See App. D for training details.
181
+
182
+ # 6.2. Experiment Results
183
+
184
+ In this subsection, we report the results of different methods, and discuss their implications. Unless otherwise noted, numbers are reported across five runs, for some commercial VLMs such as GPT-o1, we only report one run due to cost consideration.
185
+
186
+ Table 1. Post-training performance Success rates (%) of posttraining variants over the number of iterations.
187
+
188
+ <table><tr><td>Method</td><td>Iter. 1</td><td>Iter. 2</td><td>Iter. 3</td></tr><tr><td>w/o reflect</td><td>58.2</td><td>74.4</td><td>77.8</td></tr><tr><td>w/o reflect@test</td><td>64.4</td><td>76.0</td><td>82.2</td></tr><tr><td>reflect w/ diffusion</td><td>66.2</td><td>75.8</td><td>82.4</td></tr><tr><td>reflect w/ sim</td><td>66.8</td><td>75.4</td><td>85.4</td></tr></table>
189
+
190
+ Table 2. Inference computation cost. Inference wall clock time per step. MCTS result is averaged over 100 tasks and 1 seed; the others are averaged over 100 tasks and 5 seeds. All experiments are done on a single A100 GPU.
191
+
192
+ <table><tr><td>Method</td><td>Inference time (s)</td></tr><tr><td>Ours w/o reflect@test</td><td>0.45</td></tr><tr><td>Ours w/ diffusion</td><td>11.10</td></tr><tr><td>Ours w/ sim</td><td>6.05</td></tr><tr><td>MCTS</td><td>391.42</td></tr></table>
193
+
194
+ VLM zero-shot To evaluate the capabilities of state-of-the-art vision-language models, we tested several leading VLMs including LLaVAOneVision (Li et al., 2024), Gemini-2.0-flash (Google, 2024), Gemini-2.0-flash-thinking (Google, 2024), GPT-4o (OpenAI, 2024a), and GPT-o1 (OpenAI, 2024b). We specifically included Gemini-2.0-flash-thinking and GPT-o1 as they have demonstrated superior reasoning capabilities across various VLM benchmarks. As shown in Fig. 6, all models achieved notably low success rates on our tasks. Even GPT-o1, currently the most advanced proprietary model, succeeded in only 15 out of 100 tasks, primarily on simpler cases that did not require sophisticated physical reasoning about interlocking mechanisms. While Gemini-2.0-flash-thinking and GPT-o1 showed marginally better performance compared to other models, indicating some improved reasoning capabilities, their performance remains insufficient for solving our complex manipulation tasks. This significant performance gap confirms the necessity of our proposed method for handling physically-grounded reasoning tasks. Detailed evaluation procedures and results can be found in App. F.
195
+
196
+ MCTS To compare with model-based planning approaches, we implemented a VLM-based MCTS policy. This implementation uses our pretrained VLM policy as a base policy for generating candidate actions when expanding tree nodes, with value estimation provided by the oracle policy from the simulator. See App. F for implementation details. As shown in Fig. 6, MCTS achieves a $24.0\%$ success rate—higher than zero-shot VLMs but lower than our method. Notably, while the pretrained VLM policy alone achieves a $47.8\%$ success rate, adding MCTS actually degrades performance. Our analysis revealed that although
197
+
198
+ ![](images/cc5dd1f631ea40d0aaa6c90f86946103b92b7ba100039c2c91cb52dbf0a0857d.jpg)
199
+ Figure 6. Performance of our method and baselines. Success rate $(\%)$ on 100 tasks. For the zero-shot test of state-of-the-art VLMs and MCTS, the experiments were conducted once; for other methods, the results are the average of five seeds.
200
+
201
+ MCTS helped with some challenging tasks, it would sometimes incorrectly override valid plans from the base VLM policy. We found MCTS to be particularly challenging to tune effectively for our domain for several reasons: (1) it is highly sensitive to value function quality, (2) our tasks require nuanced physical reasoning that is difficult to capture in a value function, and (3) the possibility of succeeding from any state (by clearing the board and starting over) creates minimal value differences between states. These limitations highlight the advantages of our proposed method, which offers a lightweight, flexible approach that requires minimal tuning and can be readily integrated with any VLM policy.
202
+
203
+ ReflectVLM Our full method outlined in Alg. 1 and 2 incorporates reflection mechanisms in both training and inference phases. To systematically evaluate the impact of reflection, we conducted ablation experiments across several variants of our method. As reported in Fig. 6, the variant without reflection in both training and inference achieved the lowest performance among our method's variants, though it still significantly outperformed the pretrained VLM baseline. The full method using a simulator during inference achieves the highest success rate, serving as an upper bound for our method's performance. When using a diffusion model instead of a simulator during inference, performance degrades slightly. This is unsurprising, as our tasks require nuanced understanding of physics and temporal dynamics—areas where current generative models still face challenges (Kang et al., 2024; Motamed et al., 2025). We expect our method's performance to improve as generative models advance. We also report the post-training dynamics in Table 1. It's observed that the performance of all variants increases as more training is performed and the full method did achieve the highest performance as mentioned above. While the absolute performance gap between variants may appear modest, the additional tasks solved by including reflection are qualitatively significant. These are typically complex scenarios requiring multiple replanning attempts, such as removing previously placed objects to explore alternative solutions—tasks the pretrained VLM
204
+
205
+ consistently failed to solve. Notably, even without reflection during inference, our method achieves higher success rates than the pretrained baseline. This suggests that the natural language reflection prompts during training help the VLM policy develop better implicit reasoning capabilities. Fig. 4 illustrates a representative example. In this complex task, the reflection mechanism iteratively revised suboptimal actions initially proposed by the VLM policy by identifying potentially unfavorable future states. This reflection capability proved crucial for success, as the long-horizon nature of the task required reactive planning and continuous adjustment of the solution strategy. Another point to consider is computation efficiency. Table 2 shows the wall-clock time required per inference step. Compared to MCTS, our method requires only a fraction of the computation time while achieving substantially higher performance, making it particularly appealing as a lightweight and flexible solution for real-world applications.
206
+
207
+ # 7. Discussion
208
+
209
+ In this work, we presented a novel post-training strategy with reflection to improve VLM policies for long-horizon manipulation tasks, demonstrating superior planning capabilities with significantly less compute than traditional approaches like MCTS. Our current implementation opens up exciting future directions: while we currently use final outcomes for reflection due to VLM context constraints, future architectures with expanded context windows could enable richer intermediate feedback for more precise action refinement; the diffusion model's generation capabilities could be augmented with physical constraints and improved architectures to enhance prediction stability over longer horizons; and our single-round reflection approach could be extended to multiple rounds for iterative refinement while maintaining computational efficiency. We believe our method would benefit from continued advances in VLMs and generative models, and we hope it could establish a new foundation with broad applicability to sequential decision-making domains requiring visual understanding, physical reasoning, and long-horizon planning.
210
+
211
+ # References
212
+
213
+ Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023.
214
+ Bai, J., Bai, S., Du, S., Han, S., Liu, P., et al. Qwen-vl: A versatile vision-language model for understanding, generation, and retrieval. arXiv preprint arXiv:2308.12966, 2023.
215
+ Belkhale, S., Ding, T., Xiao, T., Sermanet, P., Vuong, Q., Tompson, J., Chebotar, Y., Dwibedi, D., and Sadigh, D. Rt-h: Action hierarchies using language, 2024. URL https://arxiv.org/abs/2403.01823.
216
+ Black, K., Nakamoto, M., Atreya, P., Walke, H., Finn, C., Kumar, A., and Levine, S. Zero-shot robotic manipulation with pretrained image-editing diffusion models, 2023. URL https://arxiv.org/abs/2310.10639.
217
+ Brohan, A., Brown, N., Carbajal, J., Chebotar, Y., Chen, X., Choromanski, K., Ding, T., Driess, D., Dubey, A., Finn, C., Florence, P., Fu, C., Arenas, M. G., Gopalakrishnan, K., Han, K., Hausman, K., Herzog, A., Hsu, J., Ichter, B., Irpan, A., Joshi, N., Julian, R., Kalashnikov, D., Kuang, Y., Leal, I., Lee, L., Lee, T.-W. E., Levine, S., Lu, Y., Michalewski, H., Mordatch, I., Pertsch, K., Rao, K., Reymann, K., Ryoo, M., Salazar, G., Sanketi, P., Sermanet, P., Singh, J., Singh, A., Soricut, R., Tran, H., Vanhoucke, V., Vuong, Q., Wahid, A., Welker, S., Wohlhart, P., Wu, J., Xia, F., Xiao, T., Xu, P., Xu, S., Yu, T., and Zitkovich, B. Rt-2: Vision-language-action models transfer web knowledge to robotic control, 2023a. URL https://arxiv.org/abs/2307.15818.
218
+ Brohan, A., Brown, N., Carbajal, J., Chebotar, Y., Dabis, J., Finn, C., Gopalakrishnan, K., Hausman, K., Herzog, A., Hsu, J., Ibarz, J., Ichter, B., Irpan, A., Jackson, T., Jesmonth, S., Joshi, N. J., Julian, R., Kalashnikov, D., Kuang, Y., Leal, I., Lee, K.-H., Levine, S., Lu, Y., Malla, U., Manjunath, D., Mordatch, I., Nachum, O., Parada, C., Peralta, J., Perez, E., Pertsch, K., Quiambao, J., Rao, K., Ryoo, M., Salazar, G., Sanketi, P., Sayed, K., Singh, J., Sontakke, S., Stone, A., Tan, C., Tran, H., Vanhoucke, V., Vega, S., Vuong, Q., Xia, F., Xiao, T., Xu, P., Xu, S., Yu, T., and Zitkovich, B. Rt-1: Robotics transformer for real-world control at scale, 2023b. URL https:// arxiv.org/abs/2212.06817.
219
+ Brooks, T., Holynski, A., and Efros, A. A. Instructpix2pix: Learning to follow image editing instructions. arXiv preprint arXiv:2211.09800, 2022.
220
+ Chen, B., Xu, Z., Kirmani, S., Ichter, B., Driess, D., Florence, P., Sadigh, D., Guibas, L., and Xia, F. Spatialvlm:
221
+
222
+ Endowing vision-language models with spatial reasoning capabilities, 2024. URL https://arxiv.org/abs/2401.12168.
223
+ Chen, X., Dai, J., Li, X., Peng, B., Singh, M., Tao, S., Wang, X., Wang, Y., Xia, Y., et al. Pali-x: On scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565, 2023.
224
+ Cheng, K., Li, Y., Xu, F., Zhang, J., Zhou, H., and Liu, Y. Vision-language models can self-improve reasoning via reflection, 2024. URL https://arxiv.org/abs/2411.00855.
225
+ Cui, J. and Trinkle, J. Toward next-generation learned robot manipulation. Science Robotics, 6, 2021.
226
+ Driess, D., Ha, J.-S., and Toussaint, M. Deep visual reasoning: Learning to predict action sequences for task and motion planning from an initial scene image, 2020. URL https://arxiv.org/abs/2006.05398.
227
+ Driess, D., Black, A., Kataoka, H., Tsurumine, Y., Koyama, Y., Mansard, N., Fox, D., Choromanski, K., Ichter, B., Hausman, K., et al. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023.
228
+ Du, Y., Yang, M., Florence, P., Xia, F., Wahid, A., Ichter, B., Sermanet, P., Yu, T., Abbeel, P., Tenenbaum, J. B., Kaelbling, L., Zeng, A., and Tompson, J. Video language planning, 2023. URL https://arxiv.org/abs/2310.10625.
229
+ Gao, J., Sarkar, B., Xia, F., Xiao, T., Wu, J., Ichter, B., Majumdar, A., and Sadigh, D. Physically grounded vision-language models for robotic manipulation, 2024. URL https://arxiv.org/abs/2309.02561.
230
+ Garrett, C. R., Chitnis, R., Holladay, R., Kim, B., Silver, T., Kaelbling, L. P., and Lozano-Pérez, T. Integrated task and motion planning, 2020a. URL https://arxiv.org/abs/2010.01083.
231
+ Garrett, C. R., Lozano-Pérez, T., and Kaelbling, L. P. Pddlstream: Integrating symbolic planners and blackbox samplers via optimistic adaptive planning, 2020b. URL https://arxiv.org/abs/1802.08705.
232
+ Google. Introducing gemini: Our largest and most capable ai model. https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#ceo-message, 2024. Accessed: 2024-02-14.
233
+ Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models, 2020. URL https://arxiv.org/abs/2006.11239.
234
+
235
+ Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., and Chen, W. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9.
236
+ Hu, Y., Lin, F., Zhang, T., Yi, L., and Gao, Y. Look before you leap: Unveiling the power of gpt-4v in robotic vision-language planning, 2023. URL https://arxiv.org/abs/2311.17842.
237
+ Huang, J., Chen, X., Mishra, S., Zheng, H. S., Yu, A. W., Song, X., and Zhou, D. Large language models cannot self-correct reasoning yet, 2024. URL https://arxiv.org/abs/2310.01798.
238
+ Huang, W., Wang, C., Zhang, R., Li, Y., Wu, J., and Fei-Fei, L. Voxposer: Composable 3d value maps for robotic manipulation with language models, 2023. URL https://arxiv.org/abs/2307.05973.
239
+ Kaelbling, L. P. and Lozano-Pérez, T. Hierarchical task and motion planning in the now. In 2011 IEEE International Conference on Robotics and Automation, pp. 1470-1477, 2011. doi: 10.1109/ICRA.2011.5980391.
240
+ Kang, B., Yue, Y., Lu, R., Lin, Z., Zhao, Y., Wang, K., Huang, G., and Feng, J. How far is video generation from world model: A physical law perspective, 2024. URL https://arxiv.org/abs/2411.02385.
241
+ Kelly, M., Sidrane, C., Driggs-Campbell, K., and Kochenderfer, M. J. Hg-dagger: Interactive imitation learning with human experts. 2019 International Conference on Robotics and Automation (ICRA), pp. 8077-8083, 2018. URL https://apisemantic scholar.org/CorpusID:52939433.
242
+ Kroemer, O., Niekum, S., and Konidaris, G. A review of robot learning for manipulation: Challenges, representations, and algorithms, 2020. URL https://arxiv.org/abs/1907.03146.
243
+ Li, B., Zhang, Y., Guo, D., Zhang, R., Li, F., Zhang, H., Zhang, K., Zhang, P., Li, Y., Liu, Z., and Li, C. Llava-onevision: Easy visual task transfer, 2024. URL https://arxiv.org/abs/2408.03326.
244
+ Liu, F., Fang, K., Abbeel, P., and Levine, S. Moka: Openworld robotic manipulation through mark-based visual prompting, 2024a. URL https://arxiv.org/abs/2403.03174.
245
+ Liu, H., Li, C., Wu, Q., and Lee, Y. J. Visual instruction tuning, 2023. URL https://arxiv.org/abs/2304.08485.
246
+
247
+ Liu, H., Li, C., Li, Y., and Lee, Y. J. Improved baselines with visual instruction tuning, 2024b. URL https:// arxiv.org/abs/2310.03744.
248
+ Luo, J., Xu, C., Geng, X., Feng, G., Fang, K., Tan, L., Schaal, S., and Levine, S. Multistage cable routing through hierarchical imitation learning. IEEE Transactions on Robotics, 40:1476-1491, 2024a. doi: 10.1109/TRO.2024.3353075.
249
+ Luo, J., Xu, C., Liu, F., Tan, L., Lin, Z., Wu, J., Abbeel, P., and Levine, S. Fmb: A functional manipulation benchmark for generalizable robotic learning. The International Journal of Robotics Research, 2024b.
250
+ Madaan, A., Tandon, N., Gupta, P., Hallinan, S., Gao, L., Wiegreffe, S., Alon, U., Dziri, N., Prabhumoye, S., Yang, Y., et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36, 2024.
251
+ Mei, A., Wang, J., Zhu, G.-N., and Gan, Z. Gamevm: A decision-making framework for robotic task planning based on visual language models and zero-sum games. arXiv preprint arXiv:2405.13751, 2024a.
252
+ Mei, A., Zhu, G.-N., Zhang, H., and Gan, Z. Replanvlm: Replanning robotic tasks with visual language models. IEEE Robotics and Automation Letters, 2024b.
253
+ Motamed, S., Culp, L., Swersky, K., Jaini, P., and Geirhos, R. Do generative video models learn physical principles from watching videos?, 2025. URL https://arxiv.org/abs/2501.09038.
254
+ Nasiriany, S., Xia, F., Yu, W., Xiao, T., Liang, J., Dasgupta, I., Xie, A., Driess, D., Wahid, A., Xu, Z., Vuong, Q., Zhang, T., Lee, T.-W. E., Lee, K.-H., Xu, P., Kirmani, S., Zhu, Y., Zeng, A., Hausman, K., Heess, N., Finn, C., Levine, S., and Ichter, B. Pivot: Iterative visual prompting elicits actionable knowledge for vlms, 2024. URL https://arxiv.org/abs/2402.07872.
255
+ OpenAI. Gpt-4o system card, 2024a. URL https:// arxiv.org/abs/2410.21276.
256
+ OpenAI. Openai o1 system card, 2024b. URL https://arxiv.org/abs/2412.16720.
257
+ Pan, L., Saxon, M., Xu, W., Nathani, D., Wang, X., and Wang, W. Y. Automatically correcting large language models: Surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188, 2023.
258
+ Renze, M. and Guven, E. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682, 2024.
259
+
260
+ Rombach, R., Blattmann, A., Lorenz, D., Esser, P., and Ommer, B. High-resolution image synthesis with latent diffusion models, 2021.
261
+ Ross, S., Gordon, G., and Bagnell, D. A reduction of imitation learning and structured prediction to no-regret online learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 627-635, 2011.
262
+ Shi, L. X., Hu, Z., Zhao, T. Z., Sharma, A., Pertsch, K., Luo, J., Levine, S., and Finn, C. Yell at your robot: Improving on-the-fly from language corrections, 2024. URL https://arxiv.org/abs/2403.12910.
263
+ Shinn, N., Cassano, F., Gopinath, A., Narasimhan, K., and Yao, S. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024.
264
+ Silver, D., Schrittwieser, J., Simonyan, K., Antonoglou, I., Huang, A., Guez, A., Hubert, T., Baker, L., Lai, M., Bolton, A., et al. Mastering the game of go without human knowledge. nature, 550(7676):354-359, 2017.
265
+ Song, Y., Sohl-Dickstein, J., Kingma, D. P., Kumar, A., Ermon, S., and Poole, B. Score-based generative modeling through stochastic differential equations, 2021. URL https://arxiv.org/abs/2011.13456.
266
+ Wake, N., Kanehira, A., Sasabuchi, K., Takamatsu, J., and Ikeuchi, K. Gpt-4v (isdiction) for robotics: Multimodal task planning from human demonstration. IEEE Robotics and Automation Letters, 2024.
267
+ Wang, X., Wei, J., Schuurmans, D., Le, Q., Chi, E., Narang, S., Chowdhery, A., and Zhou, D. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022.
268
+ Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H. Self-instruct: Aligning language models with self-generated instructions, 2023. URL https://arxiv.org/abs/2212.10560.
269
+ Wang, Z., Garrett, C. R., Kaelbling, L. P., and Lozano-Pérez, T. Learning compositional models of robot skills for task and motion planning, 2021. URL https://arxiv.org/abs/2006.06444.
270
+ Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.
271
+ Yao, S., Yu, D., Zhao, J., Shafran, I., Griffiths, T., Cao, Y., and Narasimhan, K. Tree of thoughts: Deliberate problem
272
+
273
+ solving with large language models. Advances in Neural Information Processing Systems, 36, 2024.
274
+ Yu, X., Peng, B., Vajipey, V., Cheng, H., Galley, M., Gao, J., and Yu, Z. Exact: Teaching ai agents to explore with reflective-mcts and exploratory learning, 2025. URL https://arxiv.org/abs/2410.02052.
275
+
276
+ # A. Task generation
277
+
278
+ We here describe the procedure to generate assembly boards in detail with an example. A board is discretized into voxels and can be represented by a 3d array, where each value indicates the piece the voxel belongs to. Initially none of the voxels is occupied, so they are all set to an empty value 0, as shown in Fig. 7(a). Then we iteratively add pieces to the board. We first sample the size of the base board, which is (12, 12, 3) in this example (Fig. 7(b)). Then we set these voxels to 1 to indicate they belong to the base board. We also maintain a variable max_height, which represents the highest layer that contains non-zero voxels. To generate a brick, we sample its size and position subject to some constraints (Fig. 7(c)). The first two constraints ensure that this brick is within the range of the base board, and the third constraint makes sure this brick will intersect with some previously generated brick. As before, we set the value of the red voxels to 2 to indicate they are from the new brick. Note that the voxels in the lower layer previously have a value of 1 since they belonged to the base board, but now their value is rewritten to 2. This also creates a hole on the base board. After generating this brick, we also update max_height to 4 since we have 4 layers now. Fig. 7(d) shows the process of generating another brick. As the new blue brick intersects with the old red brick at the four critical voxels highlighted in purple (Fig. 7(e)), we can assign the value of these critical voxels to either that of the red one or the blue one. For example, keep these voxels to the red brick results in an opening on the blue one (Fig. 7(f)). Stopping the generation process here gives us a board with three interlocking pieces, as shown in Fig. 7(g).
279
+
280
+ ![](images/5502bdeee74231e8b9efe5eff6b30eaf416430ff1ad6e49512a9f7ce8306fbc7.jpg)
281
+
282
+ ![](images/3f12a7789066a29238f7aac709c0a20272b2a0ef1e6941105f41c5ce5eac7cc3.jpg)
283
+
284
+ ![](images/687de5c9e1f3a8524fc356b64b4ffb81c06e7c61a5d2da199e9ce32b4064ac3e.jpg)
285
+
286
+ ![](images/015a891f271359971f95828585e7385546f1aac2cd35a11f0463b59e158d8b70.jpg)
287
+
288
+ ![](images/99252aef05d197bc41f028d8c64f44435966696d935d98085f39936e4a45dd52.jpg)
289
+ Figure 7. Example of task generation. (a) Voxel representation of the board. (b) Generating a base board. (c) Generating a red brick. (d) Generating another blue brick. (e) Critical voxels (highlighted in purple) at the intersection of the two bricks. (f) Handling intersection by assigning the critical voxels to the red brick. (g) Explosion view of the board consisting of three interlocking pieces.
290
+
291
+ ![](images/cf375759c4760677398e0b85cab776a97ba1d7415a337952ffcd88a947bbf160.jpg)
292
+
293
+ ![](images/51d94dfeec8c42a87a9502ac90638b3ac7f4f0c0f20421d4199798c58c7cf9f1.jpg)
294
+
295
+ ![](images/ff309af81b8380e8ac2108ab783322d74a608eb4732df349a6b11ac3b5065d9e.jpg)
296
+
297
+ # B. Samples of generated tasks
298
+
299
+ ![](images/7340ebaa6e35b022a61007cd320f4e16ac9f3622cb5c8475caaf08553e00213c.jpg)
300
+ Figure 8. Samples of generated tasks. We procedurally generate a variety of multi-stage manipulation tasks, ranging from simple peg insertion to complex assembly tasks that contains multiple interlocking pieces. Top: initial configurations. Bottom: goal configurations.
301
+
302
+ # C. Expert policy
303
+
304
+ The expert policy assumes access to the states of the objects in the simulator, such as the position and orientation of each piece. It is also provided with the dependency graph of the task, as discussed in Sec. 5. We define the status of each piece to be one of the following:
305
+
306
+ - DONE: if it is properly inserted into board;
307
+ - READY: if it is not inserted yet but ready to be manipulated;
308
+ - BAD_B: if it is in bad state since it is blocking other bricks, implying it needs to be removed;
309
+ - BAD_D: if it is in bad state since it is down, implying it needs to be reoriented;
310
+ - BLOCKED_P: if it is blocked since some predecessor brick(s) should be inserted before;
311
+ - BLOCKED_S: if it is blocked since some successor brick(s) is inserted before.
312
+
313
+ Based on the status of each piece, we can also define a set of possible statuses for the whole assembly task:
314
+
315
+ - DONE: if the board is fully assembled, i.e., all pieces are in DONE state;
316
+ - READY: if some brick is in READY or BAD_D state;
317
+ - BAD_B: if we need to reset some brick(s) to proceed as it is blocking other bricks.
318
+
319
+ When queried, the expert policy first checks the status of each piece according to the simulation states, and decide the status of the whole task based on the statuses of all pieces. Then it decides the action to take following Algorithm 3.
320
+
321
+ Algorithm 3 Expert Policy
322
+ Require: task status status global, object in hand objhand, 1: if objhand is not None then 2: if all predecessors of objhand are DONE then 3: if objhand is in BAD_D state then 4: return "reorient objhand" 5: else if objhand is in BLOCKED_S state then 6: return "put down objhand" 7: else 8: return "insert objhand" 9: end if 10: else 11: return "put down objhand" 12: end if 13: else 14: if status global == READY then 15: choose an object obj in READY or BAD_D state 16: return "pick up obj" 17: else if status global == BAD_B then 18: choose an object obj in BAD_B state 19: return "pick up obj" 20: else 21: return "done" 22: end if 23: end if
323
+
324
+ # D. Training details
325
+
326
+ # D.1. VLM Policy
327
+
328
+ Architecture. As shown in Fig. 9, the architecture of our VLM consists of a vision encoder and a Large Language Model (LLM). By default, we use clip-vit-large-patch14-336 as the vision encoder, and vicuna-13b-v1.5 as the LLM. We initialize our VLM with LLaVA-v1.5 weights that are pre-trained on general visual instruction tuning datasets. Since
329
+
330
+ ![](images/c413e30b6fbbfbd1020b89b5c20d06ef882a565228e264fae7f97702ec2cde09.jpg)
331
+ Figure 9. Architecture of our VLM. The model consists of a vision encoder and an LLM. We also add Low-Rank Adaptation (LoRA) (Hu et al., 2022) layers to LLM for efficient adaptation. The input sequence contains interleaved images and text, where images are encoded into latent embeddings with a shared vision encoder. Finally, the concatenation of text and image embeddings are fed into VLM for multimodal reasoning.
332
+
333
+ our task prompts consist of interleaved images and text (refer to Sec. E), we use a shared vision encoder to extract latent embeddings and concatenate them back to an input sequence.
334
+
335
+ Training Parameters. The full training parameters are listed in Table 3. For efficient adaptation of VLM to our task, we only finetune newly added LoRA (Hu et al., 2022) layers. The rank of LoRA layers is 128 by default.
336
+
337
+ Table 3. Training parameters of VLM.
338
+
339
+ <table><tr><td rowspan="2">Res</td><td rowspan="2">LoRA Rank</td><td rowspan="2">Training Epoch</td><td rowspan="2">Batch Size</td><td rowspan="2">Optimizer</td><td rowspan="2">Warmup Epoch</td><td colspan="2">Learning rate</td><td rowspan="2">Weight Decay</td><td rowspan="2">LR Schedule</td></tr><tr><td>BC</td><td>Iter. 1,2,3</td></tr><tr><td>336px</td><td>128</td><td>1</td><td>128</td><td>AdamW</td><td>0.03</td><td>5e-5</td><td>1e-5</td><td>0.0</td><td>Cosine</td></tr></table>
340
+
341
+ # D.2. Diffusion Dynamics Model
342
+
343
+ Data Generation. We generate $10K$ different boards and use sub-optimal policies to collect transitions. The sub-optimal policies are implemented by setting a probability $p = \{0.2, 0.5, 0.7, 0.9, 1.0\}$ to replace the expert action by a random action. We collect $50K$ trajectories; each has a maximum length of 50 and is terminated upon success. In total, we have about $1M$ transitions. We randomly sample $50K$ transitions for evaluation, and the rest is used for training.
344
+
345
+ Training Parameters. The full training parameters are listed in Table 4. We initialize the Diffusion Dynamics Model with pretrained Instructpix2pix (Brooks et al., 2022) $^{4}$ .
346
+
347
+ Table 4. Training parameters of Diffusion Dynamics Models.
348
+
349
+ <table><tr><td>Model</td><td>Res</td><td>Training Steps</td><td>Batch Size</td><td>Optimizer</td><td>Warmup Steps</td><td>Learning Rate</td><td>Weight Decay</td><td>Beta1, Beta2</td><td>Grad Norm</td><td>LR Schedule</td></tr><tr><td>UNet</td><td>512px</td><td>20K</td><td>640</td><td>AdamW</td><td>2K</td><td>1e-4</td><td>0.01</td><td>0.9, 0.999</td><td>1.0</td><td>Cosine</td></tr><tr><td>Decoder</td><td>512px</td><td>4K</td><td>160</td><td>AdamW</td><td>1K</td><td>1e-7</td><td>0.01</td><td>0.9, 0.999</td><td>1.0</td><td>Cosine</td></tr></table>
350
+
351
+ # E. Prompts
352
+
353
+ # E.1. Action proposal prompt
354
+
355
+ There is a puzzle consisting of a board and several pieces with different colors on the table. The goal is to assemble the puzzle with the robot arm. In each step, one of the following four actions can be taken: pick up [obj], put down [obj], reorient [obj], and insert [obj], where [obj] refers to the piece to be manipulated. The image of the goal state is: <image>. The image of the current state is: <image>. The most recently executed actions are: {history}. What action should be taken next? Note that [obj] should be a color chosen from the following list: {colors}.
356
+
357
+ # E.2. Reflection prompt
358
+
359
+ There is a puzzle consisting of a board and several pieces with different colors on the table. The goal is to assemble the puzzle with the robot arm. In each step, one of the following four actions can be taken: pick up [obj], put down [obj], reorient [obj], and insert [obj], where [obj] refers to the piece to be manipulated. The image of the goal state is: <image>. The image of the current state is: <image>. The most recently executed actions are: {history}. The next five steps planned by the model is {init_plan}, from which we are going to only execute the first action. Note that if the full plan was executed sequentially, the future state would be: <image>. What action should be taken for the immediate next step? Note that [obj] should be a color chosen from the following list: {colors}. You can modify the initial plan if it leads to an undesired future state.
360
+
361
+ # F. Baseline details
362
+
363
+ # F.1. Zero-shot VLMs
364
+
365
+ We prompt state-of-the-art close-sourced and open-sourced VLMs for zero-shot evaluation, including LLaVA-Onevision, Gemini-2.0 (gemini-2.0-flash-exp), Gemini-2.0-thinking (gemini-2.0-flash-thinking-exp-1219), GPT-4o and GPT-o1. We resize all input images to $336 \times 336$ pixels for fair comparisons with our model. We set the generation temperature and max planing step to 0 and 50. The evaluation prompt is:
366
+
367
+ You are an intelligent robot equipped with cameras and robotic arms, your primary task is to observe and interact with the objects on the desktop.
368
+
369
+ {Action proposal prompt (Sec. E.1)}
370
+
371
+ You can only output the action, e.g., pick up red. Do not output anything else.
372
+
373
+ Since the instruction following capability of LLaVA-Onevision is quite limited, we cannot extract valid actions from its response. For other close-sourced VLMs, we list the detailed evaluation results in Table 5. We also visualize some success cases in Figures 10 and 11, and failure cases in Figures 12 to 15.
374
+
375
+ Table 5. Detailed evaluation results of zero-shot VLMs.
376
+
377
+ <table><tr><td>Model</td><td>Success Trajectory ID / Planing Steps</td><td>Max Steps</td><td>Min Steps</td><td>Avg Steps</td></tr><tr><td>Gemini-2.0</td><td>5/6, 12/4, 16/18, 47/11, 60/4, 86/6</td><td>18</td><td>4</td><td>8.2</td></tr><tr><td>Gemini-2.0-Thinking</td><td>5/6, 12/4, 40/20, 47/16, 50/8, 60/8, 86/10, 90/11</td><td>20</td><td>4</td><td>10.4</td></tr><tr><td>GPT-4o</td><td>12/15, 16/5, 19/4, 47/10, 60/4, 90/6</td><td>15</td><td>4</td><td>7.3</td></tr><tr><td>GPT-o1</td><td>12/9, 16/6, 17/15, 47/8, 50/16, 58/18, 60/14, 62/33, 66/6, 67/12, 72/32, 77/9, 85/9, 86/6, 90/4</td><td>33</td><td>4</td><td>13.1</td></tr></table>
378
+
379
+ # F.2. MCTS
380
+
381
+ We implemented MCTS similar to AlphaGo Zero (Silver et al., 2017) but with a VLM policy for action proposal and a heuristic value estimator. States and actions are represented by nodes and edges, respectively. The algorithm iteratively expands the search tree and estimates the value for different actions. We store the visit count $N(s,a)$ , total action value $W(s,a)$ , and action value $Q(s,a) = W(s,a) / N(s,a)$ on edges. Each iteration consists of three phases: (1) select, (2) expand, and (3) backup.
382
+
383
+ In select phase, it traverses the tree by selecting the edge that has the largest action value $Q(s,a)$ plus an upper confidence bound $U(s,a) = c_{\mathrm{explore}}\sqrt{\sum_{a'}N(s,a')} /(1 + N(s,a))$ , where $c_{\mathrm{explore}}$ is the factor to balance exploring less visited edges and exploiting edges with high value. We use $c_{\mathrm{explore}} = 0.5$ in our experiments. If there is no actions associated to a node yet, it samples 5 top-likelihood actions with the VLM, with duplicates removed, and adds them to the node.
384
+
385
+ In expand phase, it expands the selected edge by simulating the action in the simulator, getting the next state, and adding the new state to the tree as a new node. It then estimates the value of the new state by rolling out the expert policy from that state. The estimated value is $V = \exp(-\lambda S)$ , where $S$ is the number of steps the expert policy takes to reach the goal from the new state, and $\lambda = 0.1$ is a scaling factor.
386
+
387
+ In backup phase, it updates the statistics of the edges on the path from the root to the expanded node: $N(s,a) \gets N(s,a) + 1$ , $W(s,a) \gets W(s,a) + V$ , and $Q(s,a) \gets W(s,a) / N(s,a)$ .
388
+
389
+ The search completes after 50 iterations. Among all actions connected to the root node, the action with the highest $Q$ value is chosen to execute. We replan with MCTS at each timestep.
390
+
391
+ ![](images/1f50936224e61837117bacdc58c11d2b223b8147ee18a659dbd3a9e58b71026f.jpg)
392
+ Goal
393
+
394
+ ![](images/97311cd48093eadefb61f0966bc2de6e9b9dcda56bb4f576bc599fcc87bbe356.jpg)
395
+
396
+ ![](images/78c09a0c30746e4ff48a4de33906500c8a40ce8be7b399cae7adff9e24a40c72.jpg)
397
+ 1: pick up red
398
+
399
+ ![](images/6515cff10bb56e3e96657760697ecf3cfe3c5c33d7f13ebf8353b0687f1a1539.jpg)
400
+
401
+ ![](images/96abde1d292a037784ff7bb53ed35c8fbb67fefc23e2875356acab90946d26a4.jpg)
402
+ 3: pick up orange
403
+
404
+ ![](images/bd79715aa8b2c2657c480ec2a88d6987150e27847f5ea2954adb44a8b5ffa943.jpg)
405
+ 4: insert orange
406
+
407
+ ![](images/39252bd72e0501d6525929422dc4a4177354ff21a3786601b0b98138603f1c7c.jpg)
408
+
409
+ ![](images/a7a575a2261e0896e18b3258cf479b91529184b4d1bb4942c49cd3312ddcbc30.jpg)
410
+
411
+ ![](images/c2932e0e1ae7e345ff1bf5b733311723877cb5d0c37cd81060ee6aa58df94deb.jpg)
412
+ 0: Initial
413
+ 6: insert blue
414
+
415
+ ![](images/cb4ac830c933fb65e4884332521bf026b11e211a1b32bf01f83c5bb5fc60e207.jpg)
416
+ 7: pick up brown
417
+
418
+ ![](images/84791758ab7cf0456f1aab2fd496aa1e6ab3b33d8394202c410ce116a4f24fcb.jpg)
419
+ 2: insert red
420
+ 8: insert brown
421
+
422
+ ![](images/57048a5f1c8911e8bde542bfdd6b08b3d295e658bb58eeaa1670fef8e7de004f.jpg)
423
+ 9: pick up gray
424
+
425
+ ![](images/c15ce5fcaa68fc7de779dba690646e9910de63cde2b2d589a1c5671c167e38a6.jpg)
426
+
427
+ ![](images/b9b6296b85f932e136bbe407630c4b8b524be0979e311872769a444975a9ecc8.jpg)
428
+ Goal
429
+
430
+ ![](images/047ffe88898896ba6e82d9dcb0588378bfa9e8bee8a0ad529db9b4ddc722df54.jpg)
431
+ 0: Initial
432
+
433
+ ![](images/20aaf926dac4fa15d2e6445a04fa22fe99803d12ebabb9067a9ca4cdfaf7a966.jpg)
434
+
435
+ ![](images/c8c20bdff0b5961b87f9cce5c89768bbc57f321a14c69e880f92f7ceeab490bf.jpg)
436
+
437
+ ![](images/3fa2c86f62c782dbcc8d607c33a2bee7f978cb33d8e4ae02fb62939316271246.jpg)
438
+ 3: insert brown
439
+
440
+ ![](images/326353949d0499c6bb596a2a8066606f0880e3bf6b629a91d274e4ca466490b8.jpg)
441
+ 4: pick up orange
442
+
443
+ ![](images/c6731ed7979b23a235bb809866c527db7aa44e7e3c9d048c321e31632bfb492d.jpg)
444
+
445
+ ![](images/8dc50e0b726ced1c1ffa74ec9d8cc4fa208c968a05b230cb0af97ea0b9653ddd.jpg)
446
+
447
+ ![](images/1aabe5956e98ce064443d3ec816fb56f3d8623502abf7737133097c3b531bc05.jpg)
448
+ 1: pick up brown
449
+ 7: put down brown
450
+
451
+ ![](images/8f95a44b0108bc0e2b3ec3811d5c3a01a9100fa9b3bbd37cd7c589c168b8e23d.jpg)
452
+ 2: put down brown
453
+ 8: reorient brown
454
+
455
+ ![](images/4946fd9845a0c3ce2056a2d6e9256dc4d31dec0fbdb3ccd9838a8bef8168763a.jpg)
456
+ 9: insert brown
457
+
458
+ ![](images/f53b80fc5108c887f1c8a3959d87369d07f31af69ad0a258d5fd223d9b6431b0.jpg)
459
+ 10: pick up brown
460
+
461
+ ![](images/4e47c9b7176d85e74191a2b075055b601f249895fa0edde980cda075ac77737c.jpg)
462
+ 5: insert orange
463
+ 6: pick up brown
464
+ 11: insert brown
465
+
466
+ ![](images/78b69f87735a8396e2c9f5d7c8f3f6346681aeff60b5a10318339149bec4e554.jpg)
467
+
468
+ ![](images/1bd40c70a9939fb1d04348824de910cc44441e9fb791e44b253bf3378fe28d77.jpg)
469
+ 1: pick up orange
470
+
471
+ ![](images/38758d0b8523535d3617b780ab5380059b5145b58b20cc5faa805ccc3ce7a166.jpg)
472
+
473
+ ![](images/69258ab2f5886f2a6d8cbbc0fa21c7cad9478d72c125cbb672a14b7d2e9a483f.jpg)
474
+ 3: pick up brown
475
+
476
+ ![](images/47fb10d634687f27eb974ed722fed9201d18fca799d47b9d0a98a87db1f509eb.jpg)
477
+
478
+ ![](images/d603f9bd12f26818786948e48628b8935dfdc6c2a1f06ba3569585aa00c88df5.jpg)
479
+ 5: pick up pink
480
+
481
+ ![](images/477e3fed4c59496ae8b90e4a1adf588c3df57a95b61e594f48b873851f69060a.jpg)
482
+ 0: Initial
483
+ 6: reorient pink
484
+
485
+ ![](images/c8d002f73d14b758ce943269614656464809365b6e1806614ce1a31cc7000d44.jpg)
486
+
487
+ ![](images/a2dd5a9fce6ab2aaa919f887f46cfda799dea27fe2550aae62d2235a76741816.jpg)
488
+ 2: put down orange
489
+ 8: pick up orange
490
+
491
+ ![](images/120fa0e33191e10d5497d536378b5d7b29aa74502f63d43bb820bde61021741a.jpg)
492
+
493
+ ![](images/39d03584870e523ac24448457ed2bd6d95d7cfaa26c3d471cd5a634ac0e8ec78.jpg)
494
+ 4: insert brown
495
+
496
+ ![](images/48203f4630efa7431e5e540ffa7f3ce8891f4391d7a7d6e99367b9ef6be83b2d.jpg)
497
+ 11: insert purple
498
+
499
+ ![](images/d6ce38117cd3efe2edb9829db4de9729fb65b5d3e0feca8b56b5bf09ca1edefc.jpg)
500
+ 12: pick up yellow
501
+ 13: put down yellow
502
+
503
+ ![](images/fa4600d51c5814cc72ab581b6d8df970661d81a2bddde646803cc7325976c725.jpg)
504
+ 7: insert pink
505
+
506
+ ![](images/4b7f995b215806af1c4b17ea2274e37520eec45476244aa660173f0ebe65e6b9.jpg)
507
+ 14: pick up yellow
508
+
509
+ ![](images/64bc5685bbfeb589ac460d905294f66bd4bdd35a1bb2152a4a734a8d2ea68324.jpg)
510
+ 9: insert orange
511
+ Figure 10. Success cases of zero-shot VLMs. Top: Gemini-2.0; Middle: Gemini-2.0-Thinking; Bottom: GPT-4o.
512
+
513
+ ![](images/9dafa19dd2c4a55bb87e799a3fe8451cb9ad3d8cc12513dcd11caa6adf2faef4.jpg)
514
+ Goal
515
+
516
+ ![](images/671a480ac7d7c80fa5be0d9c5653608cd78a28978c190aae3e956e7e6d6a4920.jpg)
517
+
518
+ ![](images/5af66116f68a81f14b773460893223456ec14d05e997db9b34fe850772b1d7bb.jpg)
519
+
520
+ ![](images/ebe364d36210e92e824bd137b86c6af0b63c684bc9d532b8232ca771d32aa9b6.jpg)
521
+ 2: reorient pink
522
+
523
+ ![](images/ce91058835af13a24a11ab29d5b06b85316a521801ec64ca4de61e1cdcf4bd82.jpg)
524
+
525
+ ![](images/f1a6b76563785a10064a707670e318b04439eb30487c84de6a1ff7b3e796a2dd.jpg)
526
+
527
+ ![](images/30b2ffd319de9614d5b704ffc46ed9b0a06209c6dd1aec0b67593e41b3ec1acc.jpg)
528
+ 0: Initial
529
+
530
+ ![](images/6dac38efee337d2ee09abbab75e21aba7a343fcce20f6f949c6ca68fe6ea192b.jpg)
531
+
532
+ ![](images/a6b6547183b63343a53787d4e2936bf4749832c375f7c4414fb03713c8479ea4.jpg)
533
+ 1: pick up pink
534
+
535
+ ![](images/b99deddccf80957d1bebb5f7cd7c9712f8666973d7d5a59269242a2bfd72f03b.jpg)
536
+
537
+ ![](images/744df1c8a4955f5414dcb1a2f3cd3528d6e0832203ca4508206a65939e1f27f1.jpg)
538
+ 3: insert pink
539
+
540
+ ![](images/52d15e0faf419dd9cdc90c321394e56187246635840ca64d9d5daf8aee38a2dc.jpg)
541
+ 4: pick up orange
542
+
543
+ ![](images/46774a90b706da0f4c036cef1ff70577c630a42b2e67e8288b067f22ebeaa452.jpg)
544
+ 5: reorient orange
545
+
546
+ ![](images/5be7d25377f4321c1d64e8dff2ea1bc025c6632d285bac05944c70ed85464bb5.jpg)
547
+ 6: insert orange
548
+
549
+ ![](images/5b29bfd873d0df30546f6de32641c4b92ebe68ea1b3ed40d53b01f313afdb724.jpg)
550
+ 7: pick up purple
551
+
552
+ ![](images/5ac8adb65243f6e6cb4efa3d9a4968fba1f486e99b44afb7a1238c91774a5814.jpg)
553
+ 8: reorient purple
554
+
555
+ ![](images/1e6e6636d9d786de81365bbd845a9956e12c65b2a6ee79fb6982d292f2e195e0.jpg)
556
+ 9: insert purple
557
+
558
+ ![](images/0ddec9c605cd55693a89581eff0c029b8cd4c04fb8f046df7db73d3f59ac5f15.jpg)
559
+ 10: pick up brown
560
+
561
+ ![](images/25ca60dd02154333ef7a3033355cb7c7a39def088d6d392ea595ad2d259d5776.jpg)
562
+ 11: reorient brown
563
+
564
+ ![](images/c8a015b44fc802d1da9e1df5586d65281f5c23a525d38e2ba94f910649f6799c.jpg)
565
+ 12: insert brown
566
+
567
+ ![](images/9079fc1ba0cd1f1b748ba1310372587dce4f818bb538c6abf3c1a7842dc46188.jpg)
568
+ 13: pick up blue
569
+
570
+ ![](images/9be80ddeb7eae213c51ff980a633a5d0b0b5103067745e46cf09f6c5adb39d98.jpg)
571
+ 14: reorient blue
572
+
573
+ ![](images/371b4f2dff80d9d9239042b602e3730c24515ae0a2b37447ed070a48d0aed59c.jpg)
574
+ 15: insert blue
575
+
576
+ ![](images/f296632f1fc57684d412e0a875bf594132e4c7557e41b850a0abf7b854b5eba4.jpg)
577
+ 16: no action
578
+
579
+ ![](images/ab467524cc77820eb88c194056610199f1ad32bc5aaed2149625dca26be8dbd6.jpg)
580
+ 17: insert pink
581
+
582
+ ![](images/f1b06f5e44d5265591a9226bdcc025ea48d6b9f0c48d17ab742a9a61464f9b76.jpg)
583
+ 18: no action
584
+
585
+ ![](images/17d3ceac585f09b7f65f43a2a5d41021a86bd87ac59ac59edd557fbe31df855e.jpg)
586
+ 19: reorient pink
587
+
588
+ ![](images/497a8b8efc56ee3f379f3bee1622dc1814e4ba6df04eedab84f70dbf45f1ba43.jpg)
589
+ 20: insert pink
590
+
591
+ ![](images/50bfb59ad9e9f648b28b91cfda2f989c2867a3f338ff2a1ef3336c686b023351.jpg)
592
+ 21: pick up pink
593
+
594
+ ![](images/dc97b269d5acdce265d7b19c88d1b7c8490ad2c669ede9a9eecf711de7afe176.jpg)
595
+ 22: insert pink
596
+ 28: pick up brown
597
+
598
+ ![](images/82881e8e2cb3d380ea71b8e17e9c63db1deea04ed1291c00c1085fef93bc682a.jpg)
599
+ 23: pick up purple
600
+ 29: reorient brown
601
+
602
+ ![](images/602f583ece73e37a2874b814b85c33c83fab134bfc0bab1b8b1fe757c609a1d0.jpg)
603
+ 24: reorient purple
604
+ 30: insert brown
605
+ Figure 11. Success cases of zero-shot VLMs (GPT-o1).
606
+
607
+ ![](images/fa1cb81c88f8afd5a16f085c266a127621eec6237697a3fbef795c959bf424bc.jpg)
608
+ 25: insert purple
609
+ 31: pick up orange
610
+
611
+ ![](images/5e479daa4f4b734ba082c0f3195953071b0d638730239007ec6e597c9a0fe9ee.jpg)
612
+ 26: pick up brown
613
+ 32: reorient orange
614
+
615
+ ![](images/66a550b663532e8b9b0d0fa03cc34e070568d0fba1ca05f757c042f3c9d8f400.jpg)
616
+ 27: insert brown
617
+ 33: insert orange
618
+
619
+ ![](images/e6b05cecc9c68f0c181be9cc6ca6b0db84e0110d0c1bb66842f4b7f918bf5a1e.jpg)
620
+ Goal
621
+
622
+ ![](images/75ab4173c3ecbc491b167244b175fbb88e6004d671938c9c15b31528206a6eec.jpg)
623
+ 0: Initial
624
+
625
+ ![](images/a699262c30cdb0b66f00ef1994752e6fcf30a7bad61365bd9b489d62eb4f5561.jpg)
626
+ 1: pick up orange
627
+ 2: insert orange
628
+
629
+ ![](images/fd30d72b7cfd62732c9a8d029a9e76e8794daa9c1113b41725aeba870facdc2c.jpg)
630
+
631
+ ![](images/20d422ed114a4e83e8f2419f0a90a9dd080d4a76f21c1c22b127b592101a2764.jpg)
632
+ 3: pick up blue
633
+ 4: put down blue
634
+
635
+ ![](images/8336610ce6164b5378ae68f20411eff84fbfc1aa104fba9f8817bb3441bd5fb8.jpg)
636
+
637
+ ![](images/dfb71e96e08bdb886fb68ee379eaa8d294c3fb43977f118121c2d0b112575675.jpg)
638
+ 5: pick up gray
639
+
640
+ ![](images/63555f82c7527e0df62ffb0351818ac83c388797860803d755affc18fa341998.jpg)
641
+ 6: put down gray
642
+
643
+ ![](images/07acf82b0c201232793c4304f1216d1754f8ab6b2de1e1a6e641dd5c1d70ce49.jpg)
644
+ 7: pick up green
645
+
646
+ ![](images/84e478519f0e8dbb94f31c2094176dde7318439a6068f1d40498d43a57342978.jpg)
647
+ 8: put down green
648
+
649
+ ![](images/00cc4c11b1f4867c2c06168fa1e6f9821583c4e45f5503d0cde83868ae510a94.jpg)
650
+ 9: pick up blue
651
+
652
+ ![](images/d716b01664a8c8300c3ab96f076c90558a758c26ee54d5ab81665cbc405abedd.jpg)
653
+ 10: insert blue
654
+
655
+ ![](images/a6505d24efc780739f64b33194994dced939ea2a6f94e15426d68030fb939170.jpg)
656
+
657
+ ![](images/3c62daff426a88b494df47fbc043771fc33b9ef5486583c47a976ef58f2c2752.jpg)
658
+
659
+ ![](images/ed2e5d3bf451f3b2192609530ccdf121fd5f54cc5903005bfe52e4757977f721.jpg)
660
+
661
+ ![](images/3f8290bf2b6c5b3527c3afe10c85b30fb9a88ccac86fa885c2fd24fda350367e.jpg)
662
+
663
+ ![](images/8b1aee4eba9831d92d64b345c1ffe8c8ee67e87f9b48dbb23487ed346bdf872f.jpg)
664
+
665
+ ![](images/9f91fff3d62bb285048dd02df40d61e1b139ec8da6292c87eced3df4479c967a.jpg)
666
+
667
+ ![](images/00edd99426e3a4c6ab5f75ada63c96faf1be44a2974365eeae0a929a651e030d.jpg)
668
+ 11: pick up yellow
669
+
670
+ ![](images/579f48ae3bd374466f3983a5b81b7504f7fb33c16df7fcf769907f81b08f6aef.jpg)
671
+ 12: insert yellow
672
+
673
+ ![](images/768f2caae6f50c70b9d095997887d7eca7fa282573e3fca4be6c09b59ad10b82.jpg)
674
+ 13: pick up pink
675
+
676
+ ![](images/03293f9b637fe7304f71804cae2eee823a2228c36543dade91ac488324d9d86d.jpg)
677
+ 14: insert pink
678
+
679
+ ![](images/0be1234c52b63ca5ad3ebc780543ceb4811a70917f930e69270579a6b64ec8a5.jpg)
680
+ 15: pick up gray
681
+
682
+ ![](images/beafe7483124052a33659c623084943aee3240b61822395542e747717ae9adb8.jpg)
683
+ 16: put down gray
684
+
685
+ ![](images/750ae175607cb3b590d1f3ae4731ad1e4a494cd749856d3e7595b0b09534688b.jpg)
686
+ 17: insert gray
687
+
688
+ ![](images/b1ed6e2a823e3bedbab3cac709b9b4b91a0e2d29d7dc4c184e7d54596eacda91.jpg)
689
+ 18: pick up green
690
+
691
+ ![](images/0b6d994b34d12f1a4573080bdc9dfd1a09061a1d8d34d5e2899c6ca9e379b7fd.jpg)
692
+ 19: insert green
693
+
694
+ ![](images/c2264ed494a912bcf1c822e47491047faf371ab3b620f310b211faaa14b1af11.jpg)
695
+ 20: pick up purple
696
+
697
+ ![](images/8237e0f637ab493b177b0282a1c1aac07da51482ab9cdce6742d67d0522ef4fd.jpg)
698
+ 21: insert purple
699
+
700
+ ![](images/897242617036004444d46ae7ea8332d2327c127714f25a4024e6a052c0f712ec.jpg)
701
+ 22: pick up pink
702
+
703
+ ![](images/0e83fa48c51fb526fcb4342bcda4a7b5ef83023aa29c6e41ffb195c6c6908c60.jpg)
704
+ 23: put down pink
705
+
706
+ ![](images/a80f9cade332ec1c7e2a480caa323856ae87d41f83c172b9e6c69efa80791395.jpg)
707
+ 24: pick up pink
708
+
709
+ ![](images/9ed0e472dcd94e05f3070a22b0be3fe62653fee4ee9d63581751915ed852b188.jpg)
710
+ 25: insert pink
711
+
712
+ ![](images/ecfda97163449406b2b7c9dd185a198529b69e2c3226a35c1e167276404e3b82.jpg)
713
+ 26: put down pink
714
+
715
+ ![](images/a7f837ffb6486939f5d41aa80b6e597af58fc6f4dc5ab731ce7be9923917a28b.jpg)
716
+ 27: pick up pink
717
+
718
+ ![](images/2a1e5ab1994317a0adfd49397d7324bb1b72b0f196da0d66383d15bbda3c8903.jpg)
719
+ 28: insert pink
720
+
721
+ ![](images/ae676b29f4f05a19393abb228c4f9121f78f8ee0fe81eaf9687f3ab223efedd2.jpg)
722
+ 29: pick up pink
723
+
724
+ ![](images/fb5b8502734f5f6bca4bb3f1eb0c81a116252f7b0820a7e94505ccb430209f28.jpg)
725
+ 30: put down pink
726
+
727
+ ![](images/69565923fe82f0c5c5f15489573f5f1ba10cce567261aae50d1f1c0ff98b699f.jpg)
728
+ 31: pick up pink
729
+
730
+ ![](images/fa88340499de4a982b194a0ecebfd7d80239709ef01f610fd94c5ad575255362.jpg)
731
+ 32: insert pink
732
+
733
+ ![](images/0ffc0f0e01b53bf7881617185fd056f69c0080bddf0d0d2761c1e6d682adcf4a.jpg)
734
+ 33: pick up pink
735
+
736
+ ![](images/bbb63c8abc5ec211ac5b8cd62102b02d5e9ddd5303e85329a5b92d92947313e2.jpg)
737
+ 34: put down pink
738
+
739
+ ![](images/cb177ea8c7a9e7b969dca591bece121744f24b8a24ad0dd28aa08dad131d5812.jpg)
740
+ 35: pick up pink
741
+ 41: insert pink
742
+
743
+ ![](images/68ee15a01bd69aea9317b8b7eb3a2a9eaf3edf2d9d5036d7d613a6713be1a83d.jpg)
744
+ 36: put down pink
745
+ 42: pick up pink
746
+
747
+ ![](images/6cdc939f8d56c563de41a7617da6f367e00df2f66ac9722eb3d02148003a8b3f.jpg)
748
+ 37: put down pink
749
+ 43: put down pink
750
+ Figure 12. Failure case of Gemini-2.0.
751
+
752
+ ![](images/3dba8eb941c89660f56a060df9e4a92d3782ec731dc0f9f220059609acd49ca9.jpg)
753
+ 38: pick up pink
754
+ 44: pick up pink
755
+
756
+ ![](images/5908129f140532bdc0f9ac8714ba1da98d22825eaaf9cd249e2e66d31881677d.jpg)
757
+ 39: insert pink
758
+ 45: insert pink
759
+
760
+ ![](images/825a32a50b3eade73fd02e5423122413dda0b399687757c0ae6734a97c425a06.jpg)
761
+ 40: pick up pink
762
+ 46: put down pink
763
+
764
+ ![](images/f2757973891822d6fece126ee76a4af913777b94a30599efb2e06b5385ba22f8.jpg)
765
+ Figure 13. Failure case of Gemini-2.0-Thinking.
766
+
767
+ ![](images/3af0c42ce4ae14e80274dbad45ad9fa84e72d7819f890e3ae048d3855c418edf.jpg)
768
+ Goal
769
+
770
+ ![](images/428d9aa1ba69596f4a039039cb8e256457d3b14075533e5b15177cd7d536bfd7.jpg)
771
+
772
+ ![](images/0d2b15ac9154c57eae95bc73f12cb12f8fe2b9f275a10528ab8fc049b951b72a.jpg)
773
+
774
+ ![](images/9760cad72854acc0a4a94a4167f7b8d9e8f28ecce071935743c45efb86074dec.jpg)
775
+ 2: insert orange
776
+
777
+ ![](images/c2ab6b6c5bbc782760f93aec22e0ae515ad71e7b7c12f5bda65169342ba64d88.jpg)
778
+ 3: pick up red
779
+
780
+ ![](images/4697b74e8564a5dbf881172e1330301c1cf223f3c163c6f724faa5ea13a05ad0.jpg)
781
+ 4: put down red
782
+
783
+ ![](images/0855ac7826edca0b205d9258c76ccabc6c567f595932c26cecc45daf9717d269.jpg)
784
+ 0: Initial
785
+
786
+ ![](images/f38a47c3abb3feafc8d188162a938b655d145bbd62e125763452b85ddbe4b3a8.jpg)
787
+
788
+ ![](images/27396b3e55e750e4068fb230568f592f29733093eef5e5b3c358b9cc547b56ce.jpg)
789
+ 1: pick up orange
790
+
791
+ ![](images/e56c95520df0f4883b13144d6ca9315ee9ea0d82cbf7ce3fbd810f5627a399c7.jpg)
792
+
793
+ ![](images/3cabe2b577de5d1a2f0c326b21d9050ba969cb65551f0d5b75d552417e9896a6.jpg)
794
+
795
+ ![](images/5169b4e2b11c8fa25c6f0cf655c4da9c55a47d827b50869462a20d7f42b6bef3.jpg)
796
+
797
+ ![](images/290619c70b0af1577e8de4ab4b883cc00813ae890a23419dbeedb499caa2ed7b.jpg)
798
+
799
+ ![](images/7addb3ce4c5399b5b86251974bc720ac1a165dbb00db254f981cdf726777a50b.jpg)
800
+
801
+ ![](images/cb07a4db301650901198d99adce06c7904a711ddbbfd6aba21336c72d8a434c6.jpg)
802
+
803
+ ![](images/e4ddc8dbdcf94bf8d5657e7f6d9b4b6ea96a91c7782dc22e0db3ac15538938ee.jpg)
804
+
805
+ ![](images/ffd1c14854a330501959be9a81de81fda2168a69d4ac1c69913510a62b9b8fb4.jpg)
806
+
807
+ ![](images/14a318ecce1d900c13d81ba1911d161f1c9453f9ddf5fa1c988bf597667cfa6a.jpg)
808
+
809
+ ![](images/1b190c1a0e232740d5bb817c3e197ed0c55aff1a7b0ca97ae2848943620d554b.jpg)
810
+ 11: pick up yellow
811
+
812
+ ![](images/b841dffa02d590045cc90ac8308d729d155278f7fc95c0c8918e9165cb92e199.jpg)
813
+ 12: put down yellow
814
+
815
+ ![](images/0b850469895b0e5471d55041f158d108c2c1adc6874996ef8ddcf0c5c0b8d762.jpg)
816
+ 13: pick up yellow
817
+
818
+ ![](images/5d820be0051dc9e856d5ccdd496c31913e73eef51e97d67840822faab894d6fd.jpg)
819
+ 14: insert yellow
820
+
821
+ ![](images/8d34cdb4ef9f7d9a44e606712881b7379428e6579b42ec597bfe940c79b7e83e.jpg)
822
+ 15: pick up yellow
823
+
824
+ ![](images/2d2ec892a45e0df72b14ea27a8a3331903883084bb80b35879eadeba9e62d7f6.jpg)
825
+ 16: insert yellow
826
+
827
+ ![](images/1b0ef31379aa291e55de72fd555c373ac9e9edcb73b16fac0e4cb405ae39977c.jpg)
828
+ 17: insert yellow
829
+
830
+ ![](images/b3459657371e4517ca074159a2236944460828bd4d127c77d151305863c9a1f6.jpg)
831
+ 18: pick up red
832
+
833
+ ![](images/7c093a8d51dcce3cf244babe989d5717be605b85c2374a3e47decc6c45febdbd.jpg)
834
+ 19: insert red
835
+
836
+ ![](images/3890d832b266befbb72b57ef3fe02d3a41d2c8a8affb9e444694e05f5c5b1be3.jpg)
837
+ 20: insert pink
838
+
839
+ ![](images/c3c21ab13c532fcf2e6125bdc1e70bfec49d1ccec943caa98c1b45bb4e8c9e3b.jpg)
840
+ 21: pick up pink
841
+
842
+ ![](images/c5670a094b610cdace2e64acaf9beed37ef64b9bad56e0f025659d24a2e9b92f.jpg)
843
+ 22: put down pink
844
+
845
+ ![](images/cfa3dc33b746f79ad7f8a0fd794c9f8e42c16bcb197cba16a73d6a90dbbe338d.jpg)
846
+ 23: pick up pink
847
+
848
+ ![](images/d2f192ed140829040f690c70eabf373e001e4dc41c197f1eb9b3131191d1cd22.jpg)
849
+ 24: insert pink
850
+
851
+ ![](images/32454586b856ea30661f688d9ed59b85549329ff4f2fbd91cfe777b6d1ed391e.jpg)
852
+ 25: pick up red
853
+
854
+ ![](images/31ad4c9c088ffa826de60133c1d672852b5809952bcc1f472094668c34e5d1c4.jpg)
855
+ 26: insert red
856
+
857
+ ![](images/2592aff89e5a4a01b6dd4f7ec6b526160df9a696eb5405700dba9a6c1cbd80dc.jpg)
858
+ 27: pick up pink
859
+
860
+ ![](images/02d41848f7ab2f3832f8411d2a3c6419a04d45c82398507355230f37da98d3c2.jpg)
861
+ 28: put down pink
862
+
863
+ ![](images/a728943bc51b79e799d321de0f10e112bb684ac8434b0b9a90994a762b81e6ea.jpg)
864
+
865
+ ![](images/b79e93e1b9cd8cb4128b0a6d5242c1e74b7281ee73747a7ec8a76a35ab46d3e1.jpg)
866
+
867
+ ![](images/02f95b48752c63eefe6dc301b012f1ba5c737f84361d0f36a43a459921adc9c0.jpg)
868
+ 30: insert pink
869
+
870
+ ![](images/2ea9aca2ba30b4cbb667f96ee79fd203706e41ac6c8c92ceafdeee0672a41c96.jpg)
871
+ 31: insert yellow
872
+
873
+ ![](images/130e9aa7ddc7a37043ebcd621240c63feb72e33970c409ff1aa270b2d44083d8.jpg)
874
+ 32: pick up pink
875
+
876
+ ![](images/e003354a1cb0d1bbcd842ccf0148d24ca284d5964eb4e8d91d3e997813770c22.jpg)
877
+ 33: insert pink
878
+
879
+ ![](images/8f0d48bfc4d1ab73260762fef5f389e4ed7fc8a435b5427375980e5299b89af1.jpg)
880
+ 34: insert yellow.
881
+
882
+ ![](images/8d0ff9b15e1d08bf41d7ece085db89c536b1cf0b8a23c370149aa435eb29a9d3.jpg)
883
+ 35: pick up red
884
+ 41: pick up yellow.
885
+
886
+ ![](images/c9ca25bafd5394a0abac0350f29dcf62e435b4a8fa35be3c9c3561e0c80968ca.jpg)
887
+ 36: insert red
888
+ 42: insert yellow
889
+
890
+ ![](images/d0e3466cc189e3554d074030c63db78b0cd7d6edd2177daf8b517cb67702bf92.jpg)
891
+ 37: insert yellow
892
+ 43: pick up blue.
893
+ Figure 14. Failure case of GPT-4o.
894
+
895
+ ![](images/950e758e52e2b1f0ff55dd9bb5ce3e9d3cd57659c6a391ed36ce7643137d0f71.jpg)
896
+ 38: pick up blue.
897
+ 44: insert blue
898
+
899
+ ![](images/1a2ac442a1e905d1dbdb7e3dc6beb8146a888185b013d69cec2970010a245210.jpg)
900
+ 39: insert blue
901
+ 45: insert yellow.
902
+
903
+ ![](images/cb40539fd6f58199b6ddf21744c1016e33dd2fe7b6d20be79a8707769ed203a8.jpg)
904
+ 40: insert yellow
905
+ 46: pick up yellow.
906
+
907
+ ![](images/80974276c7046232447926054a3d673ec6a50d6be2a117f5764bd3683fadac67.jpg)
908
+ Figure 15. Failure case of GPT-o1.
909
+
910
+ ![](images/715324d33c002e5e890ccdbc7dfefae6e40aa83732a88305600243f3ac9b9568.jpg)
911
+
912
+ ![](images/489bc81827b6324fee18158eb70abd11198b58aae075d48d87d370f351fc77b5.jpg)
913
+
914
+ ![](images/acdc4ff20193c92c7d4ee633496e8afbf5d96dd9318adcd52abbda5d829b5920.jpg)
915
+ Figure 16. Examples of Diffusion Dynamic Models.
2502.16xxx/2502.16707/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b23f28a6ccfae68c2fd6902ec1011fa4ab3c9875b0ebf47800a1cb435c137d16
3
+ size 2870613
2502.16xxx/2502.16707/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16761/86e62f74-b450-44db-b1e0-1b0eb639035b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:104d3b552ca09b56d2ec8ff6188b08022c3dfa716dca29aee8c7bbc3d1b507e2
3
+ size 10909867
2502.16xxx/2502.16761/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16761/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ead1f4ee9e74a88e18b94f4b2f12f800065c4b89885a5cef09ef5b1bba7b332
3
+ size 1447735
2502.16xxx/2502.16761/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16804/410f7d37-d7b7-4074-a5d3-01545bef8b63_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d7511d49dbecccbd174680eaaa96eff0b91ae03c36fdfab67b8ef827a1b8cc
3
+ size 2264312
2502.16xxx/2502.16804/full.md ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multi-Agent Autonomous Driving Systems with Large Language Models: A Survey of Recent Advances, Resources, and Future Directions
2
+
3
+ Yaozu Wu $^{1*}$ , Dongyuan Li $^{1*}$ , Yankai Chen $^{2,3\dagger}$ , Renhe Jiang $^{1\dagger}$ , Henry Peng Zou $^{3}$ , Wei-Chieh Huang $^{3}$ , Yangning Li $^{3}$ , Liancheng Fang $^{3}$ , Zhen Wang $^{1}$ , Philip S. Yu $^{3}$
4
+
5
+ <sup>1</sup>The University of Tokyo, <sup>2</sup>Cornell University, <sup>3</sup>University of Illinois Chicago
6
+
7
+ yaozuwu279@gmail.com, lidy@csis.u-tokyo.ac.jp, yankaichen@acm.org, jiangrh@csis.u-tokyo.ac.jp.
8
+
9
+ # Abstract
10
+
11
+ Autonomous Driving Systems (ADs) are revolutionizing transportation by reducing human intervention, improving operational efficiency, and enhancing safety. Large Language Models (LLMs) have been integrated into ADs to support high-level decision-making through their powerful reasoning, instruction-following, and communication abilities. However, LLM-based single-agent ADs face three major challenges: limited perception, insufficient collaboration, and high computational demands. To address these issues, recent advances in LMMulti-agent ADs leverage languagedriven communication and coordination to enhance inter-agent collaboration. This paper provides a frontier survey of this emerging intersection between NLP and multi-agent ADs. We begin with a background introduction to related concepts, followed by a categorization of existing LLM-based methods based on different agent interaction modes. We then discuss agent-human interactions in scenarios where LLM-based agents engage with humans. Finally, we summarize key applications, datasets, and challenges to support future research.
12
+
13
+ # 1 Introduction
14
+
15
+ Autonomous driving systems (ADSs) are redefining driving behaviors, reshaping global transportation networks, and driving a technological revolution (Yurtsever et al., 2020). Traditional ADSs primarily rely on data-driven approaches (as detailed in Appendix A), focusing on system development while overlooking dynamic interactions with the environment. To enhance engagement with diverse and complex driving scenarios, agentic roles have been incorporated into ADSs (Durante et al., 2024) using methods like reinforcement learning (Zhang
16
+
17
+ et al., 2024b) and active learning (Lu et al., 2024). Despite notable progress, these methods struggle with "long-tail" scenarios, where rare but critical driving situations, such as sudden obstacles, pose significant challenges to model performance. Furthermore, their "black-box" nature limits interpretability, making their decisions difficult to trust.
18
+
19
+ LLM-based single-agent ADSs help overcome the limitations of data-driven methods (Wang et al., 2024a). Pre-trained on vast, multi-domain datasets, LLMs excel in knowledge transfer and generalization (Achiam et al., 2023), enabling strong performance in traffic scenarios under zero-shot settings, thus addressing the long-tail issue (Yang et al., 2023). Moreover, techniques such as Reinforcement Learning from Human Feedback (RLHF) and Chain-of-Thought (CoT) (Zhao et al., 2023), enhance language-based interaction and logical reasoning, allowing LLMs to make human-like, real-time decisions while providing interpretable and trustworthy feedback across various driving conditions. For instance, Drive-Like-a-Human (Fu et al., 2024) builds a closed-loop system comprising environment, agent, memory, and expert modules. The agent interacts with the environment, reflects on expert feedback, and ultimately accumulates experience. For example, DiLu (Wen et al., 2024) replaces human experts with a reflection module and integrates an LLM-based reasoning engine to enable continuous decision-making. AgentDriver (Mao et al., 2024) designs a tool library to collect environmental data and uses LLMs' cognitive memory and reasoning to improve planning.
20
+
21
+ However, as shown in Figure 1, researchers have identified three critical limitations of LLM-based single-agent ADSs in complex traffic environments:
22
+
23
+ $\bullet$ Limited Perception: LLMs can only respond to sensor inputs and lack predictive and generalization capabilities. As a result, LLM-based single-agent ADSs cannot complement incomplete sensor information and thus miss critical information in
24
+
25
+ ![](images/d194470fcf210cdbdfea1cda6b57fa4e2bd3c5f2fd33fa5c0318d06b5aba54bb.jpg)
26
+ Figure 1: Limitations of LLM-based single-agent ADSs. At an intersection without traffic lights, an accident has occurred ahead, causing Veh1 to be stuck. Due to limited perception, Veh1 is unable to assess the situation and cannot proceed. Veh2 intends to go straight, and Veh3 wants to turn left. However, due to insufficient collaboration, they are also unable to navigate the intersection efficiently. Furthermore, due to high computing demands, the lightweight agent on Veh1 struggles to handle the complex driving scenario and has to rely on a more powerful cloud-based agent for assistance.
27
+
28
+ driving scenarios, such as pedestrians or vehicles hidden in complex intersection environments (Hu et al., 2024c). Insufficient Collaboration: A single LLM-based agent cannot coordinate with other vehicles or infrastructure, leading to suboptimal performance in scenarios requiring multi-agent interactions, such as merging of lanes or navigate roundabouts (Hu et al., 2021). High Computational Demands: With billions of parameters in LLMs, these methods require substantial independent computational resources, making real-time deployment challenging, particularly in resource-limited in-vehicle systems (Cui et al., 2023).
29
+
30
+ To address these limitations, LLM-based multiagent ADSs enable distinct agents to communicate and collaborate, improving safety and performance. First, LLMs enhance contextual awareness by allowing agents to share data, extend their perceptual range, and enhance the detection of occluded objects in complex environments (Hu et al., 2024c). Second, real-time coordination among LLM-based agents mitigates insufficient collaboration, enabling joint decisions in tasks like lane merging and roundabout navigation, ultimately leading to safer and more efficient driving operations (Hu et al., 2021). Third, LLMs optimize computational efficiency by distributing tasks across agents, reducing individual load and enabling real-time processing in resource-limited systems.
31
+
32
+ As LLM capabilities continue to advance, they are playing an increasingly significant role in ADS as intelligent driving assistants. Several reviews
33
+
34
+ have focused on two primary aspects: $i$ the integration of LLMs in data-driven methods (Yang et al., 2023; Li et al., 2023) and $\pmb{ii}$ the applications of specific LLM types, such as vision-based (Zhou et al., 2024b) and multimodal-based (Fourati et al., 2024; Cui et al., 2024c) models in ADS. However, no comprehensive survey has systematically examined the emerging field of LLM-based multi-agent ADSs. This gap motivates us to provide a comprehensive review that consolidates existing knowledge and offers insights to guide future research and the development of advanced ADSs.
35
+
36
+ In this study, we present a comprehensive survey of LLM-based multi-agent systems. Specifically, Section 2 introduces the core concepts, including agent environments and profiles, inter-agent interaction mechanisms, and agent-human interactions. Section 3 provides a structured review of existing studies: multi-vehicle interaction, vehicle-infrastructure interaction, and vehicle-assistant interaction. As agent capabilities continue to grow, human-vehicle co-driving is emerging as the dominant autonomous driving paradigm, with human playing an increasingly vital role. Humans collaborate with agents by providing guidance or supervising their behavior. Therefore, we consider humans as special virtual agents and examine human-agent interactions in Section 4. Section 5 explores various applications, while Section 6 compiles a comprehensive collection of public datasets and open-source resources. Section 7 discusses existing challenges and future research directions. Finally, Section 8 concludes the study.
37
+
38
+ # 2 LLM-based Agents for ADS
39
+
40
+ # 2.1 LLM-based Single-Agent ADS
41
+
42
+ Achieving human-level driving is an ultimate goal of ADS. As shown in Figure 2(a), the LLM-based single agent retrieves past driving experiences from the memory, integrates them with real-time environmental information for reasoning, and makes driving decisions. Additionally, the driving agent reflects on its decision and updates its memory accordingly, ensuring safe and efficient driving actions. However, the complex and dynamic nature of real-world driving scenarios, where interactions with other vehicles significantly impact decision-making, suggests that neglecting these interactions can lead to suboptimal or unsafe driving outcomes.
43
+
44
+ ![](images/23a2bfc67681968d670e2023bcd825a83306b7a86c7f3472a6e518c8b9959108.jpg)
45
+ Figure 2: Overview of LLM-based (a) single- and (b) multi-agent ADSs, with key terms and differences highlighted.
46
+
47
+ # 2.2 LLM-based Multi-Agent ADS
48
+
49
+ With interactions among multiple agents, LLM-based multi-agent ADS leverages collective intelligence and specialized skills, with each agent playing a distinct role, communicating and collaborating within the system. This enhances the efficiency and safety of autonomous driving. Below, we introduce the LLM-based multi-agent ADS, as shown in Figure 2(b), and provide a detailed analysis of its three key modules: Agent Environment and Profile, LLM-based Multi-Agent Interaction, and LLM-based Agent-Human Interaction.
50
+
51
+ # 2.2.1 Agent Environment and Profile
52
+
53
+ Similar to the single-agent architecture in Figure 2(a), multi-agent systems first obtain relevant information from their environments, enabling them to make informed decisions and take appropriate actions. The environmental conditions define the settings and necessary context for agents in LLM-based multi-agent ADS to operate effectively. Generally, there are two environment types, i.e., physical environment and simulation environment.
54
+
55
+ Physical environment represents the real-world setting where driver agents gather information using various sensors, such as cameras and LiDAR, and interact with other traffic participants. However, due to the high cost of vehicles and strict regulations on public roads, collecting large amounts of data in real world is impractical. As a viable alternative, the Simulation environment provides a simulated setting constructed by humans. It can accurately model specific conditions without in-
56
+
57
+ Table 1: Comparison of Agent Profiling Methods.
58
+
59
+ <table><tr><td>Method</td><td>Advantage</td><td>Limitation</td></tr><tr><td>Pre-defined</td><td>Rely on prior knowledge to reduce the difficulty of scenario modeling and embed strict safety rules and regulatory constraints.</td><td>Labor-intensive to create and maintain, and lacks adaptability to novel or dynamic autonomous driving scenarios.</td></tr><tr><td>Model-generated</td><td>Synthesize new agent roles on-the-fly, letting simulators or fleets adapt to unseen driving contexts.</td><td>Generated profiles may violate traffic laws and have limited understanding of safety-critical environments.</td></tr><tr><td>Data-derived</td><td>Can learn complex, real-world driving behaviors and patterns from large datasets, potentially improving naturalistic interactions.</td><td>Coverage remains limited by the availability of vast, high-quality autonomous driving data, and privacy or commercial constraints may restrict data sharing.</td></tr></table>
60
+
61
+ curring the high costs and complexities associated with real-world data collection, allowing agents to freely test actions and strategies across a variety of scenarios (Dosovitskiy et al., 2017).
62
+
63
+ In LLM-based multi-agent systems, each agent is assigned distinct roles with specific functions through profiles, enabling them to collaborate on complex driving tasks or simulate intricate traffic scenarios. These profiles are crucial in defining the functionality of the agent, its interaction with the environment, and its collaboration with other agents. Existing work (Li et al., 2024) generates agent profiles using three types of methods: Predefined, Model-generated, and Data-derived.
64
+
65
+ Table 1 summarizes the advantages and limitations of different agent profiling methods in ADSs. Specifically, within Pre-defined methods, system designers explicitly define agent profiles based on prior knowledge and the analysis of complex scenarios (Chen et al., 2024a). Each agent has unique attributes and behavior patterns that can be adjusted
66
+
67
+ based on the scenario. In driving environments, the objectives of ADS require the collaboration of vehicle agents, infrastructure agents, and drivers. In particular, Vehicle agents denote various types of autonomous vehicles, traveling according to preset routes and traffic rules, while communicating and collaborating with other vehicles and driver agents. Infrastructure agents, e.g., traffic lights, road condition monitors, and parking facilities, provide real-time traffic information and instructions, influencing the behavior of driver and vehicle agents. However, manually crafting such roles is labor-intensive and often brittle when scenarios shift, which has stimulated interest in automatic profile construction, either generated by LLMs or extracted from large-scale datasets. Model-generated methods create agent profiles using advanced LLMs based on the interaction context and the goals that need to be accomplished (Zhou et al., 2024c) and Data-derived Profile design agent profiles based on pre-existing datasets (Guo et al., 2024).
68
+
69
+ # 2.2.2 LLM-based Multi-Agent Interaction
70
+
71
+ In LLM-based multi-agent ADS, effective communication and coordination among agents are crucial to improve collective intelligence and solve complex traffic scenarios. Agent interactions depend on both the interaction mode and the underlying interaction structure, as summarized in Table 3.
72
+
73
+ The interaction mode can be classified as: cooperative, competitive, and debate mode. In cooperative mode, agents work together to achieve shared objectives by exchanging information (Chen et al., 2024d; Jin et al., 2024). In competitive mode, agents strive to accomplish their individual goals and compete with others (Yao et al., 2024). The Debate mode enables agents to debate with each other, propose their own solutions, criticize the solutions of other agents, and collaboratively identify optimal strategies (Liang et al., 2024).
74
+
75
+ The interaction structure delineates the architecture of communication networks within LLM-based multi-agent ADS, including centralized, decentralized, hierarchical, and shared message pool structures, as shown in Figure 3. Specifically, 1 the centralized interaction structures defines a central agent or a group of central agents to manage interactions among all agents (Zhou et al., 2024c). 2 The decentralized interaction structure allows for direct communication between agents, with all agents being equal to each other (Hu et al., 2024b). 3 Hierarchical structures focus on interactions within a
76
+
77
+ ![](images/12f99e01cab36829fdc43908da517ee98262d75ee29845e3b779c334667261cb.jpg)
78
+ Figure 3: Different interaction modes and structures.
79
+
80
+ layer or with adjacent layers (Ohmer et al., 2022). The shared memory interaction structure maintains a shared message pool, allowing agents to send and extract the necessary information (Jiang et al., 2024a). We provide a more detailed introduction to LLM-based multi-agent ADSs based on their interaction structures and modes in Section 3.
81
+
82
+ # 2.2.3 LLM-based Agent-Human Interaction
83
+
84
+ Recent studies show that human-machine co-driving systems use LLMs to improve agent-human interactions, enabling vehicles to communicate and collaborate seamlessly with human drivers through natural language (Feng et al., 2024; Zou et al., 2025a,b). This allows vehicles to better understand and respond to human intent, provide context-aware responses, enhance driving safety and comfort, and offer personalized recommendations based on driver preferences. Humans also play a crucial role in guiding and supervising agent behavior, enhancing the agents' capabilities while ensuring safety. We examine the role of humans as special virtual agents and explore agent-human interaction dynamics in Section 4.
85
+
86
+ # 3 LLM-based Multi-Agent Interaction
87
+
88
+ Mutual interaction is central to multi-agent ADSs, enabling systems to solve complex problems beyond the capabilities of a single agent. Through information exchange and coordinated decision-making, multiple agents effectively complete shared tasks and achieve overarching objectives (Li et al., 2024). This section reviews recent studies on multi-agent ADSs, emphasizing interactions among vehicles, infrastructures, and assisted agents in driving scenarios. As shown in Figure 4, we categorize existing methods into three interaction types: multi-vehicle interaction, vehicle-infrastructure interaction, and vehicle-assistant interaction.
89
+
90
+ ![](images/24b341889f304768376ac768ac268e5903721b9ee8bc63db08b8f1c0301d24dd.jpg)
91
+ Figure 4: A taxonomy of LLM-based Multi-Agent Autonomous Driving Systems.
92
+
93
+ # 3.1 Multi-Vehicle Interaction
94
+
95
+ Multi-vehicle interactions involve multiple autonomous vehicles powered by LLMs exchanging real-time information, such as locations, speeds, sensor data, and intended trajectories. By sharing partial observations of the environment or negotiating maneuvers, multiple vehicles overcome the inherent limitations of single-agent ADS, such as restricted perception and lack of collaboration.
96
+
97
+ Typically, these interactions operate in a cooperative mode with varying architectures. LanguageMPC (Sha et al., 2023) employs a centralized structure, where a central agent acts as the fleet's "brain," providing optimized coordination and control commands to each vehicle agent. In contrast, other decentralized methods (Fang et al., 2024; Dona et al., 2024) treat all agents as peers, allowing direct vehicle-to-vehicle communication without central bottlenecks. For instance, AgentsCoDriver (Hu et al., 2024a) designs an adaptive communication module that generates context-aware messages for inter-agent communication when the agent deems it necessary. AgentsCoMerge (Hu et al., 2024b) and CoDrivingLLM (Fang et al., 2024) incorporate agent communication directly into the reasoning process, facilitating real-time intention sharing and proactive negotiation before decision-making. Additionally, KoMA (Jiang et al., 2024a) and CoMAL (Yao et al., 2024) build a distributed shared memory pool, allowing agents to send and retrieve the necessary information to facilitate scalable interaction between agents.
98
+
99
+ # 3.2 Vehicle-Infrastructure Interaction
100
+
101
+ The interaction between vehicles and external agents, such as traffic lights, roadside sensors, and LLM-powered control centers, not only helps autonomous vehicles make more intelligent decisions but also alleviates on-board computing requirements. This enables LLM-based multi-agent ADSs to operate effectively in real-world environments. EC-Drive (Chen et al., 2024a) proposes an EdgeCloud collaboration framework with a hierarchical interaction structure. The edge agent processes real-time sensor data and makes preliminary decisions under normal conditions. When anomalies are detected or the edge agent generates a low-confidence prediction, the system flags these instances and uploads them to the cloud agent equipped with LLMs. The cloud agent then performs detailed reasoning to generate optimized decisions and combines them with the output of the edge agent to update the driving plan. Following a similar architecture, Tang et al. (2024) uses agents deployed on remote clouds or network edges to assist connected driving agents in handling complex driving decisions.
102
+
103
+ # 3.3 Vehicle-Assistant Interaction
104
+
105
+ Beyond the interactions between the primary agents in driving scenarios, additional interactions among assisted agents play a crucial role in LLM-based multiagent ADSs. Both ChatSim (Wei et al., 2024) and ALGPT (Zhou et al., 2024c) employ a manager (PM) agent to interpret user instructions and coordinate tasks among other agents. Chat
106
+
107
+ Sim (Wei et al., 2024) adopts a centralized structure in which the PM agent decouples an overall demand into specific subtasks and dispatches instructions to other team agents. Similarly, the PM agent in ALGPT (Zhou et al., 2024c) formulates a work plan upon receiving user commands and assembles an agent team with the plan. Specifically, agents no longer communicate point-to-point with each other but instead communicate through a shared message pool, greatly improving efficiency.
108
+
109
+ Additionally, hierarchical agent architectures further enhance the performance and effectiveness of LLM-based multi-agent ADSs. AD-H (Zhang et al., 2024c) assigns high-level reasoning tasks to the multimodal LLM-based planner agent while delegating low-level control signal generation to a lightweight controller agent. These agents interact through mid-level commands generated by the multimodal LLMs. In LDPD (Liu et al., 2024a), the teacher agent leverages the LLM for complex cooperative decision reasoning and trains smaller student agents via its own decision demonstrations to achieve cooperative decision-making. SurrealDriver (Jin et al., 2024) introduces a CoachAgent to evaluate DriverAgent's driving behavior and provide guidelines for continuous improvement.
110
+
111
+ Different from the conventional collaborative interaction mode, V-HOI (Zhang et al., 2024a) proposes a hybrid interaction mode that blends collaboration with debate. It establishes various agents across different LLMs to evaluate reasoning logic from different aspects, enabling cross-agent reasoning. This process culminates in a debate-style integration of responses from various LLMs, improving predictions for enhanced decision-making.
112
+
113
+ # 4 LLM-based Agent-Human Interaction
114
+
115
+ Depending on the roles of human assume when interacting with agents, we classify current methods as: instructor paradigm and partnership paradigm.
116
+
117
+ # 4.1 Instructor Paradigm
118
+
119
+ In Figure 5, the instructor paradigm involves agents interacting with humans in a conversational manner, where humans act as "tutors" to offer quantitative and qualitative feedback to improve agent decision-making (Li et al., 2017). Quantitative feedback typically includes binary evaluations or ratings, while qualitative feedback consists of language suggestions for refinement. Agents incorporate this feedback to adapt and perform better in complex driving
120
+
121
+ ![](images/df5b968c86f522d0b196ec85f308c5e04c40dcbb4b437e0e8a1009a0e468452f.jpg)
122
+ Figure 5: Two modes of agent-human interaction.
123
+
124
+ scenarios. For instance, Wang et al. (2023) propose "Expert-Oriented Black-box Tuning", where domain experts provide feedback to optimize model performance. Similarly, Ma et al. (2024) present a human-guided learning pipeline that integrates driver feedback to refine agent decision-making.
125
+
126
+ # 4.2 Partnership Paradigm
127
+
128
+ As shown in Figure 5, the partnership paradigm emphasizes collaboration, where agents and humans interact as equals to accomplish complex driving tasks. In this paradigm, agents assist in decision-making by adapting to individual driver preferences and real-time traffic conditions. For instance, Talk2Drive (Cui et al., 2023), DaYS (Cui et al., 2024a) and Receive (Cui et al., 2024b) utilize memory modules to store human-vehicle interactions, enabling a more personalized driving experience based on individual driver preferences, such as overtaking speed and following distance. Additionally, infrastructure agents in AccidentGPT (Wang et al., 2024b) and ConnectGPT (Tong and Solmaz, 2024) connect vehicles to monitor traffic conditions, identify potential hazards, and provide proactive safety warnings, blind spot alerts, and driving suggestions through agent-human interaction.
129
+
130
+ # 5 Applications
131
+
132
+ # 5.1 Collaborative Perception
133
+
134
+ Despite significant advancements in the perception modules of ADS, LLM-based single-agent ADS continues to face substantial challenges, including constrained sensing ranges and persistent occlusion issues (Han et al., 2023). These two key limitations hinder their comprehensive understanding of the driving environment and can lead to suboptimal decision-making, especially in complex and dynamic traffic scenarios (Hu et al., 2024c).
135
+
136
+ Dona et al. (2024) propose a multi-agent cooperative framework that enhances the ego vehicle's field-of-view (FOV) by integrating complementary visual perspectives through inter-vehicle dialogues mediated by onboard LLMs, significantly expanding the ego vehicle's environmental comprehension. However, in complex road scenarios, reliance on a single LLM can lead to erroneous interpretations and hallucinatory predictions when processing complex traffic situations. To address this limitation, V-HOI MLCR (Zhang et al., 2024a) introduces a collaborative debate framework among different LLMs for video-based Human-Object Interaction (HOI) detection tasks. This framework first implements a Cross-Agent Reasoning scheme, assigning distinct roles to various agents within an LLM to conduct reasoning from multiple perspectives. Subsequently, a cyclic debate mechanism is employed to evaluate and aggregate responses from multiple agents, culminating in the final outcome.
137
+
138
+ # 5.2 Collaborative Decision-Making
139
+
140
+ After obtaining environmental information, the ADS performs three core functions: route planning, trajectory optimization, and real-time decision-making. In complex traffic scenarios such as roundabout navigation and lane merging, LLM-based multi-agent systems enable coordinated motion planning through three key mechanisms: 1 real-time intention sharing between agents, 2 adaptive communication protocols, and 3 dynamic negotiation frameworks. This collaborative architecture allows ADS to precisely coordinate their trajectories, maneuver strategies, and environmental interactions while maintaining operational safety.
141
+
142
+ LanguageMPC (Sha et al., 2023) uses LLMs to perform scenario analysis and decision-making. Additionally, it introduces a multi-vehicle control method where distributed LLMs govern individual vehicle operations, while a central LLM facilitates multi-vehicle communication and coordination. AgentsCoDriver (Hu et al., 2024a) presents a comprehensive LLM-based multi-vehicle collaborative decision-making framework with life-long learning capabilities, moving the field towards practical applications. This framework consists of five parts, as follows: the observation module, cognitive memory module, and reasoning engine support the high-level decision-making process for AD; the communication module enables negotiation and collaboration among vehicles; and the reinforcement reflection module reflects the output and
143
+
144
+ decision-making process. Similarly, AgentsCoMerge (Hu et al., 2024b) combines vision-based and text-based scene understanding to gather essential environmental information and incorporates a hierarchical planning module to allow agents to make informed decisions and effectively plan trajectories. Instead of directly interacting with each other, agents in KoMA (Jiang et al., 2024a) analyze and infer the intentions of surrounding vehicles via an interaction module to enhance decision-making. It also introduces a shared memory module to store successful driving experiences and a ranking-based reflection module to review them.
145
+
146
+ # 5.3 Collaborative Cloud-Edge Deployment
147
+
148
+ Although many innovative studies have explored the application of LLM-based multi-agent ADS, significant technical challenges remain in deploying LLMs locally on autonomous vehicles due to their huge computational resource requirements (Sun et al., 2024a). To address these issues, Tang et al. (2024) apply remote LLMs to provide assistance for connected autonomous vehicles, which communicate between themselves and with LLMs via vehicle-to-everything technologies. Moreover, this study evaluates LLMs' comprehension of driving theory and skills in a manner akin to human driver tests. However, remote LLM deployment can introduce inference latency, posing risks in emergency scenarios. To further improve system efficiency, Chen et al. (2024a) introduce a novel edge-cloud collaborative ADS with drift detection capabilities, using small LLMs on edge devices and GPT-4 on cloud to process motion planning data and complex inference tasks, respectively.
149
+
150
+ # 5.4 Collaborative Assistance-Tools
151
+
152
+ The long-term data accumulation in both industry and academia has enabled great success in highway driving and automatic parking (Liu et al., 2024b). However, collecting real-world data remains costly, especially for multi-agents or customized scenarios. Additionally, the uncontrollable nature of real scenarios makes it challenging to capture certain corner cases. To address these issues, many LLM-based studies focus on simulating multi-agent ADS, offering a cost-effective alternative to real-world data collection. For example, ChatSim (Wei et al., 2024) provides editable photo-realistic 3D driving scenario simulations via natural language commands and external digital assets. The system leverages multiple LLM agents with specialized
153
+
154
+ Table 2: Single-agent and multi-agent autonomous driving datasets.
155
+
156
+ <table><tr><td>Datasets</td><td>Dataset Type</td><td>Sensor Type</td><td>Tasks</td></tr><tr><td>KITTI (Geiger et al., 2012)</td><td>Single-agent</td><td>Camera, LiDAR</td><td>2D/3D detection, tracking, depth estimation</td></tr><tr><td>nuScenes (Geiger et al., 2020)</td><td>Single-agent</td><td>Cameras, LiDAR, Radars</td><td>3D detection, tracking, trajectory forecasting</td></tr><tr><td>BDD100K (Yu et al., 2020)</td><td>Single-agent</td><td>Camera</td><td>Object detection, lane detection, segmentation</td></tr><tr><td>Waymo (Sun et al., 2020)</td><td>Single-agent</td><td>Camera, LiDAR, Radars</td><td>2D/3D detection, tracking, domain adaptation</td></tr><tr><td>BDD-X (Kim et al., 2018)</td><td>Single-agent</td><td>BDD</td><td>Object detection, driving scenario captioning</td></tr><tr><td>nuScenes-QA (Qian et al., 2024)</td><td>Single-agent</td><td>nuScenes</td><td>3D detection, tracking, visual QA</td></tr><tr><td>DriveLM (Sima et al., 2025)</td><td>Single-agent</td><td>nuScenes, Waymo</td><td>Multi-modal planning, question answering</td></tr><tr><td>DAIR-V2X (Yu et al., 2022)</td><td>Multi-agent</td><td>Camera, LiDAR (multi-vehicle)</td><td>Cooperative perception, tracking</td></tr><tr><td>TUMTraf-V2X (Zimmer et al., 2024)</td><td>Multi-agent</td><td>Multi-vehicle camera, LiDAR</td><td>Cooperative perception, multi-agent tracking</td></tr><tr><td>V2V4Real (Xu et al., 2023)</td><td>Multi-agent</td><td>Multi-vehicle camera, LiDAR</td><td>Cooperative detection, tracking</td></tr><tr><td>V2XSet (Xu et al., 2022)</td><td>Multi-agent</td><td>Multi-vehicle camera, LiDAR</td><td>Multi-agent detection, tracking</td></tr></table>
157
+
158
+ roles to decompose complex commands into specific editing tasks, introducing novel McNeRF and Mclight methods that generate customized high-quality output. HumanSim (Zhou et al., 2024a) integrates LLMs to simulate human-like driving behaviors in multi-agent systems via pre-defined driver characters. By employing navigation strategies, HumanSim facilitates behavior-level control of vehicle movements, making it easier to generate corner cases in multi-agent environments. In addition, ALGPT (Zhou et al., 2024c) uses a multiagent cooperative framework for open-vocabulary, multimodal auto-annotation in autonomous driving. It introduces a Standard Operating Procedure to define agent roles and share documentation, enhancing interaction effectiveness. ALGPT also builds specialized knowledge bases for each agent using CoT and In-Context Learning (Brown et al., 2020).
159
+
160
+ # 6 Datasets and Benchmark
161
+
162
+ We organize recent open-source work to foster research on advanced ADSs. Mainstream ADS datasets are summarized in Table 2.
163
+
164
+ Single-Agent Autonomous Driving Data. Single-agent datasets are obtained from a single reference agent, which can be the ego vehicle or roadside infrastructure, using various sensors. Mainstream singel-agent autonomous driving datasets like KITTI (Geiger et al., 2012), nuScenes (Geiger et al., 2020), and Waymo (Sun et al., 2020) provide comprehensive multimodal sensor data, enabling researchers to develop and benchmark algorithms for multiple tasks such as object detection, object tracking, and object segmentation. In addition to these foundational datasets, newer ones like BDD-X (Kim et al., 2018), DriveLM (Sima et al., 2025), and nuScenes-QA (Qian et al., 2024) introduce action descriptions, detailed captions, and question-answer pairs that can be used to interact with LLMs. Combining language information with visual data
165
+
166
+ can enrich semantic and contextual understanding, promote a deeper understanding of driving scenarios, and improve the safety and interaction capabilities of autonomous vehicles.
167
+
168
+ Multi-agent Autonomous Driving Dataset. Beyond single-vehicle view datasets, integrating more viewpoints of traffic elements, such as drivers, vehicles and infrastructures into the data also brings advantages to AD systems. Multi-agent autonomous driving datasets, such as DAIR-V2X (Yu et al., 2022), V2XSet (Xu et al., 2022), V2V4Real (Xu et al., 2023), and TUMTraf-V2X (Zimmer et al., 2024) typically include data from multiple vehicles or infrastructure sensors, capturing the interactions and dependencies between different agents and additional knowledge regarding the environments. These datasets are essential for researching and developing cooperative perception, prediction, and planning strategies that enable vehicles to overcome the limitations of single agent datasets such as limited field of view (FOV) and occlusion.
169
+
170
+ Benchmarks. Several benchmarks are particularly well-suited for evaluating collaborative decision-making in autonomous driving. The INTERACTION dataset (Zhan et al., 2019) includes a variety of real-world interactive scenarios, such as roundabouts and lane merging. It provides vehicle trajectories that enable an assessment of cooperative maneuvering and negotiation behaviors. Another important benchmark is the Waymo Open Motion Dataset (Ettinger et al., 2021), which is explicitly designed for interactive multi-agent motion prediction and planning. It features challenging scenarios, including merges and unprotected left turns, along with detailed annotations of interactive agents. In addition, the SMARTS benchmark (Zhou et al., 2021) offers standardized scenarios for multi-agent autonomous driving research, particularly focusing on ramp merging and navigating unsignalized intersections. This work allows for direct comparisons
171
+
172
+ of algorithms in cooperative traffic management tasks. These benchmarks provide comprehensive test bases for evaluating the coordination, safety, and adaptability of LLM-based multi-agent ADSs.
173
+
174
+ # 7 Challenges and Future Directions
175
+
176
+ This section explores key open challenges and potential opportunities for future research.
177
+
178
+ 1 Hallucination, Safety & Trustworthiness. Hallucination refers to LLMs generating outputs that are factually incorrect or non-sensical (Huang et al., 2023). In complex driving scenarios, a single driving agent's hallucinations in an LLM-based multiagent ADS can be accepted and further propagated by other agents in the network via the inter-agent communication, potentially leading to serious accidents. Detecting agent-level hallucinations and managing inter-agent information flow are key to improving system safety and trust (Fan et al., 2024). Recent advances in spatiotemporal traffic analysis (Zhang et al., 2024d; Jiang et al., 2024b) further support real-time condition assessment, improving vehicle-road interaction and overall safety of ADS.
179
+
180
+ $\Theta$ Legal, Security & Privacy. As agents autonomously exchange and process information within multi-agent ADS, the distribution of legal liability between individual users and manufacturers becomes ambiguous, particularly in cases involving system failures or collisions. In addition, vulnerable communication methods and strict user privacy requirements place high demands on cryptographic protocols and data management. These interrelated concerns collectively represent critical directions for future research and regulatory initiatives.
181
+
182
+ $\Theta$ Multi-Modality Ability. In current multi-agent systems, agents primarily use LLMs for scene understanding and decision-making. Perception outputs are converted into text via manual prompts or interpreters, then processed by LLMs to generate decisions. This pipeline is limited by perception performance and may cause information loss (Gao et al., 2023). Integrating language understanding with multimodal data fusion offers a promising direction for future multimodal multi-agent ADSs.
183
+
184
+ Real-World Deployment & Scalability. LLM-based multi-agent ADS can scale up by adding more agents to handle increasingly complex driving scenarios. However, more LLM agents increase the demand for computing resources, while their interactions impose strict requirements on communication efficiency, which is critical for real-time
185
+
186
+ decision-making (Huang et al., 2024b). Therefore, under limited computing resources, it is crucial to develop a system architecture that supports distributed computing and efficient communication, as well as agents capable of adapting to various real-world environments and tasks, to optimize multiagent ADS within resource constraints.
187
+
188
+ 5 Human-Agent Interaction. Current multi-agent ADS struggle to communicate intentions to human road users, relying on static signals inadequate for complex scenarios. Developing LLM-powered adaptive interfaces that generate context-appropriate, human-understandable communications while maintaining safety and trust presents a key deployment challenge (Xia et al., 2025).
189
+
190
+ # 8 Conclusion
191
+
192
+ This paper systematically reviews LLM-based multi-agent ADSs and traces their evolution from single-agent to multi-agent systems. We detail their core components, including agent environments and profiles, inter-agent interaction, and agent-human communication. Existing studies are categorized by interaction types and applications. We further compile public datasets and open-source implementations, and discuss challenges and future directions. We hope this review will inspire NLP community to explore more practical and impactful applications in LLM-based multi-agent ADS.
193
+
194
+ # Limitations
195
+
196
+ Despite being a survey, this work still has several limitations. ① Emerging Research and Limited Data. As LLM-based multi-agent ADS is an emerging field, the current body of research is still growing. While this may limit the breadth of our classification, we have aimed to provide a representative and forward-looking overview based on the most relevant and recent work. ② Some Unverified Work. Given the novelty of this topic, some referenced works are from unreviewed arXiv preprints. We include them to reflect the latest progress and ideas, while acknowledging that their findings may require further validation through peer review. ③ Limited Discussion on Real-world Applications. Although industrial adoption of LLM-based multi-agent ADS is underway, public documentation remains limited. As a result, this review focuses on academic contributions, and real-world deployments are left for future investigation.
197
+
198
+ # References
199
+
200
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv:2303.08774.
201
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. In Proc. of NeurIPS, 33:1877-1901.
202
+ Jiao Chen, Suyan Dai, Fangfang Chen, Zuohong Lv, and Jianhua Tang. 2024a. Edge-cloud collaborative motion planning for autonomous driving with large language models. arXiv:2408.09972.
203
+ Li Chen, Penghao Wu, Kashyap Chitta, Bernhard Jaeger, Andreas Geiger, and Hongyang Li. 2024b. End-to-end autonomous driving: Challenges and frontiers. IEEE TPAMI, 46(12):10164-10183.
204
+ Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. 2024c. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. In Proc. of ICRA, pages 14093-14100. IEEE.
205
+ Pei Chen, Shuai Zhang, and Boran Han. 2024d. Comm: Collaborative multi-agent, multi-reasoning-path prompting for complex problem solving. In Proc. of NAACL-HLT (Findings), pages 1720-1738. Association for Computational Linguistics.
206
+ Hsu-kuang Chiu, Ryo Hachiuma, Chien-Yi Wang, Stephen F Smith, Yu-Chiang Frank Wang, and MinHung Chen. 2025. V2v-llm: Vehicle-to-vehicle cooperative autonomous driving with multi-modal large language models. arXiv preprint arXiv:2502.09980.
207
+ C Cui, Z Yang, Y Zhou, Y Ma, J Lu, L Li, Y Chen, J Panchal, and Z Wang. 2023. Personalized autonomous driving with large language models: Field experiments. arXiv:2312.09397.
208
+ Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran Wang. 2024a. Drive as you speak: Enabling human-like interaction with large language models in autonomous vehicles. In Proc. of WACV, pages 902-909.
209
+ Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran Wang. 2024b. Receive, reason, and react: Drive as you say, with large language models in autonomous vehicles. IEEE ITS Mag, 16(4):81-94.
210
+ Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, Yang Zhou, Kaizhao Liang, Jintai Chen, Juanwu Lu, Zichong Yang, Kuei-Da Liao, et al. 2024c. A survey on multimodal large language models for autonomous driving. In Proc. of WACV, pages 958-979.
211
+
212
+ Malsha Ashani Mahawatta Dona, Beatz Cabrero-Daniel, Yinan Yu, and Christian Berger. 2024. Tapping in a remote vehicle's onboard llm to complement the ego vehicle's field-of-view. arXiv:2408.10794.
213
+ Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. 2017. Carla: An open urban driving simulator. In Proc. of CoRL, pages 1-16. PMLR.
214
+ Zane Durante, Qiuyuan Huang, Naoki Wake, Ran Gong, Jae Sung Park, Bidipta Sarkar, Rohan Taori, Yusuke Noda, Demetri Terzopoulos, Yejin Choi, et al. 2024. Agent ai: Surveying the horizons of multimodal interaction. arXiv:2401.03568.
215
+ Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. 2021. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719.
216
+ Jiaqi Fan, Jianhua Wu, Hongqing Chu, Quanbo Ge, and Bingzhao Gao. 2024. Hallucination elimination and semantic enhancement framework for vision-language models in traffic scenarios. arXiv:2412.07518.
217
+ Shiyu Fang, Jiaqi Liu, Mingyu Ding, Yiming Cui, Chen Lv, Peng Hang, and Jian Sun. 2024. Towards interactive and learnable cooperative driving automation: a large language model-driven decision-making framework. arXiv:2409.12812.
218
+ Shiyu Fang, Jiaqi Liu, Chengkai Xu, Chen Lv, Peng Hang, and Jian Sun. 2025. Interact, instruct to improve: A llm-driven parallel actor-reasoner framework for enhancing autonomous vehicle interactions. arXiv preprint arXiv:2503.00502.
219
+ Xueyang Feng, Zhiyuan Chen, Yujia Qin, Yankai Lin, Xu Chen, Zhiyuan Liu, and Ji-Rong Wen. 2024. Large language model-based human-agent collaboration for complex task solving. In Proc. of EMNLP (Findings), pages 1336-1357. Association for Computational Linguistics.
220
+ Sonda Fourati, Wael Jaafar, Noura Baccar, and Safwan Alfattani. 2024. Xlm for autonomous driving systems: A comprehensive review. arXiv:2409.10484.
221
+ Daocheng Fu, Xin Li, Licheng Wen, Min Dou, Pinlong Cai, Botian Shi, and Yu Qiao. 2024. Drive like a human: Rethinking autonomous driving with large language models. In Proc. of WACVW, pages 910-919.
222
+ Peng Gao, Jiaming Han, Renrui Zhang, Ziyi Lin, Shijie Geng, Aojun Zhou, Wei Zhang, Pan Lu, Conghui He, Xiangyu Yue, et al. 2023. Llama-adapter v2: Parameter-efficient visual instruction model. arXiv:2304.15010.
223
+
224
+ Andreas Geiger, Philip Lenz, and Raquel Urtasun. 2020. nuscenes: A multimodal dataset for autonomous driving. In Proc. of CVPR, pages 11621-11631.
225
+ Andreas Geiger, Philip Lenz, et al. 2012. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proc. of CVPR, pages 3354-3361.
226
+ Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V. Chawla, Olaf Wiest, and Xiangliang Zhang. 2024. Large language model based multi-agents: A survey of progress and challenges. In Proc. of IJCAI, pages 8048-8057. ijcai.org.
227
+ Yushan Han, Hui Zhang, Huifang Li, Yi Jin, Congyan Lang, and Yidong Li. 2023. Collaborative perception in autonomous driving: Methods, datasets, and challenges. IEEE ITS Mag, 15(6):131-151.
228
+ Xinmeng Hou, Wuqi Wang, Long Yang, Hao Lin, Jinglun Feng, Haigen Min, and Xiangmo Zhao. 2025. Driveagent: Multi-agent structured reasoning with llm and multimodal sensor fusion for autonomous driving. arXiv preprint arXiv:2505.02123.
229
+ Senkang Hu, Zhengru Fang, Yiqin Deng, Xianhao Chen, and Yuguang Fang. 2021. Collaborative autonomous driving—a survey of solution approaches and future challenges. Sensors, 21(11):3783.
230
+ Senkang Hu, Zhengru Fang, Zihan Fang, Yiqin Deng, Xianhao Chen, and Yuguang Fang. 2024a. Agentscodriver: Large language model empowered collaborative driving with lifelong learning. arXiv:2404.06345.
231
+ Senkang Hu, Zhengru Fang, Zihan Fang, Yiqin Deng, Xianhao Chen, Yuguang Fang, and Sam Kwong. 2024b. Agentscomerge: Large language model empowered collaborative decision making for ramp merging. arXiv:2408.03624.
232
+ Senkang Hu, Zhengru Fang, et al. 2024c. Collaborative perception for connected and autonomous driving: Challenges, possible solutions and opportunities. arXiv:2401.01544.
233
+ Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2023. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv:2311.05232.
234
+ Yidong Huang, Jacob Sansom, Ziqiao Ma, Felix Gervits, and Joyce Chai. 2024a. Drivlme: Enhancing llm-based autonomous driving agents with embodied and social experiences. In Proc. of IROS, pages 3153-3160. IEEE.
235
+ Yizhou Huang, Yihua Cheng, and Kezhi Wang. 2024b. Efficient driving behavior narration and reasoning on edge device using large language models. arXiv:2409.20364.
236
+
237
+ Kemou Jiang, Xuan Cai, Zhiyong Cui, Aoyong Li, Yi long Ren, Haiyang Yu, Hao Yang, Daocheng Fu, Licheng Wen, and Pinlong Cai. 2024a. Koma: Knowledge-driven multi-agent framework for autonomous driving with large language models. IEEE TIV, pages 1-15.
238
+ Yushan Jiang, Zijie Pan, Xikun Zhang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024b. Empowering time series analysis with large language models: A survey. arXiv preprint arXiv:2402.03182.
239
+ Ye Jin, Ruoxuan Yang, Zhijie Yi, Xiaoxi Shen, Huiling Peng, Xiaoan Liu, Jingli Qin, Jiayang Li, Jintao Xie, Peizhong Gao, et al. 2024. Surrealdriver: Designing lvm-powered generative driver agent framework based on human drivers' driving-thinking data. In Proc. of IROS, pages 966-971. IEEE.
240
+ Jinkyu Kim, Anna Rohrbach, Trevor Darrell, John Canny, and Zeynep Akata. 2018. Textual explanations for self-driving vehicles. In Proc. of ECCV, pages 563-578.
241
+ Jiwei Li, Alexander H Miller, Sumit Chopra, Marc'Aurelio Ranzato, and Jason Weston. 2017. Dialogue learning with human-in-the-loop. In Proc. of ICLR.
242
+ Xin Li, Yeqi Bai, Pinlong Cai, Licheng Wen, Daocheng Fu, Bo Zhang, Xuemeng Yang, Xinyu Cai, Tao Ma, Jianfei Guo, et al. 2023. Towards knowledge-driven autonomous driving. arXiv:2312.04316.
243
+ Xinyi Li, Sai Wang, Siqi Zeng, Yu Wu, and Yi Yang. 2024. A survey on llm-based multi-agent systems: workflow, infrastructure, and challenges. *Vicinagearth*, 1(1):9.
244
+ Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. 2024. Encouraging divergent thinking in large language models through multi-agent debate. In Proc. of EMNLP, pages 17889-17904. Association for Computational Linguistics.
245
+ Jiaqi Liu, Chengkai Xu, Peng Hang, Jian Sun, Mingyu Ding, Wei Zhan, and Masayoshi Tomizuka. 2024a. Language-driven policy distillation for cooperative driving in multi-agent reinforcement learning. arXiv:2410.24152.
246
+ Mingyu Liu, Ekim Yurtsever, Jonathan Fossaert, Xingcheng Zhou, Walter Zimmer, Yuning Cui, Bare Luka Zagar, and Alois C Knoll. 2024b. A survey on autonomous driving datasets: Statistics, annotation quality, and a future outlook. IEEE TIV, pages 1-29.
247
+ Han Lu, Xiaosong Jia, Yichen Xie, Wenlong Liao, Xiaokang Yang, and Junchi Yan. 2024. Activead: Planning-oriented active learning for end-to-end autonomous driving. arXiv:2403.02877.
248
+
249
+ Yunsheng Ma, Xu Cao, Wenqian Ye, Can Cui, Kai Mei, and Ziran Wang. 2024. Learning autonomous driving tasks via human feedbacks with large language models. In Proc. of EMNLP (Findings), pages 4985-4995.
250
+ Vagul Mahadevan, Shangtong Zhang, and Rohan Chandra. 2025. Gamechat: Multi-llm dialogue for safe, agile, and socially optimal multi-agent navigation in constrained environments. arXiv preprint arXiv:2503.12333.
251
+ Jiageng Mao, Yuxi Qian, Junjie Ye, Hang Zhao, and Yue Wang. 2023. Gpt-driver: Learning to drive with gpt. arXiv:2310.01415.
252
+ Jiageng Mao, Junjie Ye, Yuxi Qian, Marco Pavone, and Yue Wang. 2024. A language agent for autonomous driving. In Proc. of COLM.
253
+ Xenia Ohmer, Marko Duda, and Elia Bruni. 2022. Emergence of hierarchical reference systems in multiagent communication. In Proc. of COLING, pages 5689-5706. International Committee on Computational Linguistics.
254
+ Mingxing Peng, Xusen Guo, Xianda Chen, Meixin Zhu, Kehua Chen, Xuesong Wang, Yinhai Wang, et al. 2024. Lc-llm: Explanable lane-change intention and trajectory predictions with large language models. arXiv:2403.18344.
255
+ Tianwen Qian, Jingjing Chen, Linhai Zhuo, Yang Jiao, and Yu-Gang Jiang. 2024. Nuscenes-qa: A multimodal visual question answering benchmark for autonomous driving scenario. In Proc. of AAAI, pages 4542-4550.
256
+ Axel Sauer, Nikolay Savinov, and Andreas Geiger. 2018. Conditional affordance learning for driving in urban environments. In Proc. of CoRL, pages 237-252. PMLR.
257
+ Hao Sha, Yao Mu, Yuxuan Jiang, Li Chen, Chenfeng Xu, Ping Luo, Shengbo Eben Li, Masayoshi Tomizuka, Wei Zhan, and Mingyu Ding. 2023. Languagempc: Large language models as decision makers for autonomous driving. arXiv:2310.03026.
258
+ Shaoshuai Shi, Li Jiang, Dengxin Dai, and Bernt Schiele. 2022. Motion transformer with global intention localization and local movement refinement. In Proc. of NeurIPS, 35:6531-6543.
259
+ Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens Beiwenger, Ping Luo, Andreas Geiger, and Hongyang Li. 2025. Drivelm: Driving with graph visual question answering. In Proc. of ECCV, pages 256-274.
260
+ Hao Sun, Jiayi Wu, Hengyi Cai, Xiaochi Wei, Yue Feng, Bo Wang, Shuaiqiang Wang, Yan Zhang, and Dawei Yin. 2024a. Adaswitch: Adaptive switching between small and large agents for effective cloud-local collaborative learning. In Proc. of EMNLP, pages 8052-8062. Association for Computational Linguistics.
261
+
262
+ Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. 2020. Scalability in perception for autonomous driving: Waymo open dataset. In Proc. of CVPR, pages 2446-2454.
263
+ Yuan Sun, Navid Salami Pargoo, Peter Jin, and Jorge Ortiz. 2024b. Optimizing autonomous driving for safety: A human-centric approach with lvm-enhanced rlhf. In Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, pages 76-80.
264
+ Zuoyin Tang, Jianhua He, Dashuai Pe, Kezhong Liu, Tao Gao, and Jiawei Zheng. 2024. Test large language models on driving theory knowledge and skills for connected autonomous vehicles. In Proc. of MobiArch, pages 1-6.
265
+ Kailin Tong and Selim Solmaz. 2024. Connectgpt: Connect large language models with connected and automated vehicles. In Proc. of IEEE IV, pages 581-588.
266
+ Wenwen Tong, Chonghao Sima, Tai Wang, Li Chen, Silei Wu, Hanming Deng, Yi Gu, Lewei Lu, Ping Luo, Dahua Lin, et al. 2023. Scene as occupancy. In Proc. of ICCV, pages 8406-8415.
267
+ Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2024a. A survey on large language model based autonomous agents. FCS, 18(6):186345.
268
+ Lening Wang, Yilong Ren, Han Jiang, Pinlong Cai, Daocheng Fu, Tianqi Wang, Zhiyong Cui, Haiyang Yu, Xuesong Wang, Hanchu Zhou, et al. 2024b. Accidentgt: A v2x environmental perception multimodal large model for accident analysis and prevention. In Proc. of IEEE IV, pages 472-477. IEEE.
269
+ Shiyi Wang, Yuxuan Zhu, Zhiheng Li, Yutong Wang, Li Li, and Zhengbing He. 2023. Chatgpt as your vehicle co-pilot: An initial attempt. IEEE TIV, 8(12):4706-4721.
270
+ Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, and Justin Solomon. 2021. DETR3d: 3d object detection from multi-view images via 3d-to-2d queries. In Proc. of CoRL.
271
+ Yuxi Wei, Zi Wang, Yifan Lu, Chenxin Xu, Changxing Liu, Hao Zhao, Siheng Chen, and Yanfeng Wang. 2024. Editable scene simulation for autonomous driving via collaborative llm-agents. In Proc. of CVPR, pages 15077-15087.
272
+ Licheng Wen, Daocheng Fu, Xin Li, Xinyu Cai, Tao MA, Pinlong Cai, Min Dou, Botian Shi, Liang He, and Yu Qiao. 2024. Dilu: A knowledge-driven approach to autonomous driving with large language models. In Proc. of ICLR.
273
+
274
+ Ding Xia, Xinyue Gui, Fan Gao, Dongyuan Li, Mark Colley, and Takeo Igarashi. 2025. Automating ehmi action design with llms for automated vehicle communication. arXiv preprint arXiv:2505.20711.
275
+ Chengkai Xu, Jiaqi Liu, Shiyu Fang, Yiming Cui, Dong Chen, Peng Hang, and Jian Sun. 2025. Tell-drive: Enhancing autonomous driving with teacher llm-guided deep reinforcement learning. arXiv preprint arXiv:2502.01387.
276
+ Runsheng Xu, Xin Xia, Jinlong Li, Hanzhao Li, Shuo Zhang, Zhengzhong Tu, Zonglin Meng, et al. 2023. V2v4real: A real-world large-scale dataset for vehicle-to-vehicle cooperative perception. In Proc. of CVPR, pages 13712-13722.
277
+ Runsheng Xu, Hao Xiang, Zhengzhong Tu, Xin Xia, Ming-Hsuan Yang, and Jiaqi Ma. 2022. V2x-vit: Vehicle-to-everything cooperative perception with vision transformer. In Proc. of ECCV, pages 107-124.
278
+ Zhenhua Xu, Yujia Zhang, Enze Xie, Zhen Zhao, Yong Guo, Kwan-Yee K Wong, Zhenguo Li, and Hengshuang Zhao. 2024. Drivegpt4: Interpretable end-to-end autonomous driving via large language model. IEEE RA-L.
279
+ Zijiang Yan, Hao Zhou, Hina Tabassum, and Xue Liu. 2025. Hybrid llm-ddqn based joint optimization of v2i communication and autonomous driving. IEEE Wireless Communications Letters.
280
+ Zhenjie Yang, Xiaosong Jia, Hongyang Li, and Junchi Yan. 2023. Llm4drive: A survey of large language models for autonomous driving. In NeurIPS 2024 Workshop on Open-World Agents.
281
+ Huaiyuan Yao, Longchao Da, Vishnu Nandam, Justin Turnau, Zhiwei Liu, Linsey Pang, and Hua Wei. 2024. Comal: Collaborative multi-agent large language models for mixed-autonomy traffic. arXiv:2410.14368.
282
+ Junjie Ye, Xuanting Chen, Nuo Xu, Can Zu, Zekai Shao, Shichun Liu, Yuhan Cui, Zeyang Zhou, Chao Gong, Yang Shen, et al. 2023. A comprehensive capability analysis of gpt-3 and gpt-3.5 series models. arXiv:2303.10420.
283
+ Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. 2020. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In Proc. of CVPR, pages 2636-2645.
284
+ Haibao Yu, Yizhen Luo, Mao Shu, Yiyi Huo, Zebang Yang, Yifeng Shi, Zhenglong Guo, Hanyu Li, Xing Hu, Jirui Yuan, et al. 2022. Dair-v2x: A large-scale dataset for vehicle-infrastructure cooperative 3d object detection. In Proc. of CVPR, pages 21361-21370.
285
+
286
+ Ekim Yurtsever, Jacob Lambert, Alexander Carballo, and Kazuya Takeda. 2020. A survey of autonomous driving: Common practices and emerging technologies. IEEE access, pages 58443-58469.
287
+ Wei Zhan, Liting Sun, Di Wang, Haojie Shi, Aubrey Clausse, Maximilian Naumann, Julius Kummerle, Hendrik Konigshof, Christoph Stiller, Arnaud de La Fortelle, et al. 2019. Interaction dataset: An international, adversarial and cooperative motion dataset in interactive driving scenarios with semantic maps. arXiv preprint arXiv:1910.03088.
288
+ Hang Zhang, Wenxiao Zhang, Haoxuan Qu, and Jun Liu. 2024a. Enhancing human-centered dynamic scene understanding via multiple llms collaborated reasoning. arXiv:2403.10107.
289
+ Miao Zhang, Zhenlong Fang, Tianyi Wang, Shuai Lu, Xueqian Wang, and Tianyu Shi. 2025. Ccma: A framework for cascading cooperative multi-agent in autonomous driving merging using large language models. Expert Systems with Applications, page 127717.
290
+ Ruiqi Zhang, Jing Hou, Florian Walter, Shangding Gu, Jiayi Guan, Florian Röhrbein, Yali Du, Panpan Cai, Guang Chen, and Alois Knoll. 2024b. Multi-agent reinforcement learning for autonomous driving: A survey. arXiv:2408.09675.
291
+ Zaibin Zhang, Shiyu Tang, Yuanhang Zhang, Talas Fu, Yifan Wang, Yang Liu, Dong Wang, Jing Shao, Lijun Wang, and Huchuan Lu. 2024c. Ad-h: Autonomous driving with hierarchical agents. arXiv:2406.03474.
292
+ Zijian Zhang, Yujie Sun, Zepu Wang, Yuqi Nie, Xiaobo Ma, Peng Sun, and Ruolin Li. 2024d. Large language models for mobility in transportation systems: A survey on forecasting tasks. arXiv preprint arXiv:2405.02357.
293
+ Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv:2303.18223.
294
+ Lingfeng Zhou, Mohan Jiang, and Dequan Wang. 2024a. Humansim: Human-like multi-agent novel driving simulation for corner case generation. In ECCV 2024 Workshop on MPCC-AD.
295
+ Ming Zhou, Jun Luo, Julian Villella, Yaodong Yang, David Rusu, Jiayu Miao, Weinan Zhang, Montgomery Alban, Iman Fadakar, Zheng Chen, et al. 2021. Smarts: An open-source scalable multi-agent rl training school for autonomous driving. In Conference on robot learning, pages 264-285. PMLR.
296
+ Xingcheng Zhou, Mingyu Liu, Ekim Yurtsever, Bare Luka Zagar, Walter Zimmer, Hu Cao, and Alois C Knoll. 2024b. Vision language models in autonomous driving: A survey and outlook. IEEE TIV, pages 1-20.
297
+
298
+ Yijie Zhou, Xianhui Cheng, Qiming Zhang, Lei Wang, Wenchao Ding, Xiangyang Xue, Chunbo Luo, and Jian Pu. 2024c. Algpt: Multi-agent cooperative framework for open-vocabulary multi-modal auto-annotating in autonomous driving. IEEE TIV, pages 1-15.
299
+ Walter Zimmer, Gerhard Arya Wardana, Suren Sritharan, Xingcheng Zhou, Rui Song, and Alois C Knoll. 2024. Tumtraf v2x cooperative perception dataset. In Proc. of CVPR, pages 22668-22677.
300
+ Henry Peng Zou, Wei-Chieh Huang, Yaozu Wu, Yankai Chen, Chunyu Miao, Hoang Nguyen, Yue Zhou, Weizhi Zhang, Liancheng Fang, Langzhou He, Yangning Li, Dongyuan Li, Renhe Jiang, Xue Liu, and Philip S. Yu. 2025a. Llm-based human-agent collaboration and interaction systems: A survey. Preprint, arXiv:2505.00753.
301
+ Henry Peng Zou, Wei-Chieh Huang, Yaozu Wu, Chunyu Miao, Dongyuan Li, Aiwei Liu, Yue Zhou, Yankai Chen, Weizhi Zhang, Yangning Li, et al. 2025b. A call for collaborative intelligence: Why human-agent systems should precede ai autonomy. arXiv preprint arXiv:2506.09420.
302
+
303
+ # A Data-driven Autonomous Driving System
304
+
305
+ Traditional ADS rely on data-driven approaches, which are categorized into modular and end-to-end frameworks (Chen et al., 2024b). Modular-based systems break the entire autonomous driving process into separate components, such as perception module, prediction module, and planning module. Perception modules are responsible for obtaining information about the vehicle's surrounding environment, aiming to identify and locate important traffic elements such as obstacles, pedestrians, and vehicles near the autonomous vehicle, usually including tasks such as object detection (Wang et al., 2021) and object occupancy prediction (Tong et al., 2023). Prediction modules estimate the future motions of surrounding traffic participants based on the information provided by the perception module, usually including tasks such as trajectory prediction and motion prediction (Shi et al., 2022). Planning module aims to derive safe and comfortable driving routes and decisions through the results of perception and prediction (Sauer et al., 2018). Each module is individually developed and integrated into onboard vehicles to achieve safe and efficient autonomous driving functions. Although modular methods have achieved remarkable results in many driving scenarios, the stacking design of multiple modules can lead to the loss of key information during transmission and introduce redundant calculations. Furthermore, due to the inconsistency in the optimization objectives of each module, the modular-based system may accumulate errors, which can negatively impact the vehicle's overall decision-making performance. End-to-end-based systems integrate the entire driving process into a single neural network, and then directly optimize the entire driving pipeline from sensor inputs to produce driving actions (Chen et al., 2024b). However, this method introduces the "black box" problem, meaning a lack of transparency in the decision-making process, complicating interpretation.
306
+
307
+ # B LLMs in Autonomous Driving System
308
+
309
+ As shown in Figure 6 and Figure 7, LLMs, with their powerful open-world cognitive and reasoning capabilities, have shown significant potential in ADSs (Yang et al., 2023; Li et al., 2023). LC-LLM (Peng et al., 2024) is an explainable lane change prediction model that leverages LLMs to process driving scenario information as natural
310
+
311
+ language prompts. By incorporating CoT reasoning and supervised finetuning, it not only predicts lane change intentions and trajectories but also provides transparent and reliable explanations for its predictions. GPT-Driver (Mao et al., 2023) regards the motion planning task as a language modeling problem, using a fine-tuned GPT-3.5 model (Ye et al., 2023) to generate driving trajectories. DriveGPT4 (Xu et al., 2024) introduces an interpretable end-to-end autonomous driving system that uses multimodal LLMs to process multiframe video inputs and textual queries, enabling vehicle action interpretation and low-level control prediction. By employing a visual instruction tuning dataset and mixfinetuning strategy, it provides a novel approach to directly map sensory inputs to actions, achieving superior performance in autonomous driving tasks. Driving with LLM (Chen et al., 2024c) integrates vectorized numeric data with pre-trained LLMs to improve context understanding in driving scenarios and enhances the interpretability of driving decisions.
312
+
313
+ # C LLM-enhanced Multi-Agent ADSs
314
+
315
+ To highlight the application of LLMs and other NLP technologies in multi-agent ADSs, we have specially prepared Table 4. This table systematically sorts out existing research from the two dimensions of "environment & subject characteristics" and "interaction mode", and marks the LLMs used in each solution one by one. Our goal is to help readers quickly grasp the landscape of this cross-domain research and better understand how LLM capabilities are being adapted to complex ADS scenarios.
316
+
317
+ # D Real-World Multi-Agent LLM Systems in Autonomous Driving
318
+
319
+ NVIDIA DriveOS LLM Integration NVIDIA has released the DriveOS LLM SDK, which allows multiple AI agents (for perception, planning, user interaction) to run on edge computing, with the ability to infer LLMs. For example, a car can have a local LLM-based agent for real-time driving decisions or V2X message interpretation, offloading heavy computational tasks to optimized hardware. This lightweight onboard agent works in tandem with powerful cloud-based AI. In this conceptual
320
+
321
+ multi-agent setup, the onboard LLM can handle immediate tasks and natural language commands while querying the cloud-based LLM for complex planning or traffic knowledge.
322
+
323
+ LLM in the Cabin and Beyond Mercedes-Benz is collaborating with NVIDIA to develop multimodal LLMs that can interpret sensor data and driver preferences to assist in driving decisions. LLM-based intelligent agents can act as "digital copilots", monitoring the surrounding environment and providing maneuvering recommendations.
324
+
325
+ # Scenario Description
326
+
327
+ You are driving on a highway road with 8 lanes, and you are currently driving in the rightmost.
328
+
329
+ Your current speed is $25.0\mathrm{m / s}$ acceleration is $0.0~\mathrm{m / s}^{\wedge}2$ and lane position is $360\mathrm{m}$
330
+
331
+ There are other vehicles around : Veh1 is driving on the lane to your left and is ahead of you. The speed of it is $21.2\mathrm{m / s}$ acceleration is 0.2 $\mathrm{m / s^{\prime}2}$ , and lane position is $372~\mathrm{m}$
332
+
333
+ Veh2 is driving on the same lane as you and is ahead of you. The speed of it is $25.0 \, \text{m/s}$ , acceleration is $-0.1 \, \text{m/s}^2$ , and lane position is $400 \, \text{m}$ .
334
+
335
+ ![](images/2d129053198ff75223d1ef98b3c0264b670b787a7fa91898938fd7bc284d6227.jpg)
336
+ Figure 6: An example of an LLM-based single-agent ADS (Wen et al., 2024).
337
+
338
+ ![](images/2015b388b51264731bad5fa5a46017cbd005e008aad22b47b60daf6f213d7cf9.jpg)
339
+
340
+ LLMs as the reason module of agents
341
+
342
+ ![](images/3802e9cc564c66bca266a3f3b74202a17de12f3fd4dce3cb4d28167cd1b41337.jpg)
343
+
344
+ # Agent Profile
345
+
346
+ You are a driving agent with proficient driving skills. The autonomous vehicle you drive runs very smoothly without sudden braking or acceleration. Your task is to transport various passengers to the required location and provide as a safety comfortable riding experience as much as possible.
347
+
348
+ # Reasoning Output
349
+
350
+ I should first check if I can accelerate, then maintain current speed, finally decelerate. I can also try to change lanes but with caution.
351
+
352
+ At First, I need to observe the veh1. The distance between me and veh1 is $400 - 360 = 40m$ , and the difference in speed is $25.00 - 25.00 = 0.00m/s$ .
353
+
354
+ Veh1 is traveling $40m$ ahead of me and the speed is same. This distance is safe but my speed is already at the desired level, so I should not accelerate.
355
+
356
+ Since I cannot accelerate, I want to know if I can maintain my current speed. I should observe car veh1.
357
+
358
+ Final Answer: maintain current speed
359
+
360
+ # Agent Profile & Scenario Description
361
+
362
+ ![](images/af62c7bccb9ec256ee1ad539bf2ffff6a1ef6acd7f5faa46cf4e13ccd28f0776.jpg)
363
+
364
+ ![](images/dd35f048aa54f29bc39f32acdeb3fd13d64cfb8f8bf1b68394ad97d524a762f2.jpg)
365
+
366
+ ![](images/46ebe836a6a5ef6990aeb012edf55390cf9a7ee9e6c451e3f85bc4661831223d.jpg)
367
+
368
+ # Veh1's reason process and decision
369
+
370
+ Veh1's message: Veh2, I am turning right, my speed is $2.03\mathrm{m / s}$ , and we should keep a safe distance to avoid collision.
371
+
372
+ ![](images/e9ea7d074ca6a5af9c3a6cae4e48b7d3da21599e30d7dda6000015a4d07b2e20.jpg)
373
+ communication
374
+
375
+ Veh2's message: Veh1, since you are turning right and in the intersection area, I will decelerate speed to keep a safe distance with you.
376
+
377
+ # Agent Profile
378
+
379
+ You are an intelligent driving agent with skilled driving skills that can provide a safe and comfortable driving experience.
380
+
381
+ ![](images/5d765a39b6f84058a6725ade8eff332ad6ee5ba02b8c7f17a044827e45b93df6.jpg)
382
+ Figure 7: The communication among multiple agents in an LLM-based multi-agent system (Hu et al., 2024a).
383
+
384
+ # Scenario Description
385
+
386
+ You are driving through a congested intersection at a speed of $25\mathrm{m / s}$ and an acceleration of $0.0~\mathrm{m / s}^{\wedge 2}$ . Veh2 is also driving through the intersection at a speed of $10\mathrm{m / s}$ and an acceleration of $0.5~\mathrm{m / s}^{\wedge 2}$ .
387
+
388
+ # Veh2's reasoning process and decision:
389
+
390
+ My position and speed: I am $30\mathrm{m}$ away from the center of the intersection and my current speed is $6.34~\mathrm{m / s}$ Intersection condition: Veh1 is in the intersection area and the intersection area is not clear.
391
+
392
+ Message from the other vehicle: Veh1 said that it is turning right, its speed is $3.06\mathrm{m / s}$ and I should keep a safe distance.
393
+
394
+ I am going to approach the intersection, Veh1 is in the interaction area and turning right, and I should decelerate to keep a safe distance with it.
395
+
396
+ Final Answer: Decelerate
397
+
398
+ Table 3: Comparison of Interaction Modes and System Structures in LLM-Based Multi-Agent ADSs.
399
+
400
+ <table><tr><td>Dimension</td><td>Advantage</td><td>Limitation</td></tr><tr><td colspan="3">Mode</td></tr><tr><td>Co-operative</td><td>Enhances traffic flow efficiency and reduces collision risk by sharing agent intents and aligning actions.</td><td>Unexpected selfish behavior from uncooperative agents can propagate unsafe plans to the entire fleet.</td></tr><tr><td>Competitive</td><td>Can lead to more assertive and individually optimized behaviors in contested scenarios, such as securing a lane change in dense traffic.</td><td>Risks escalating conflicts and reducing overall traffic system stability if not properly regulated, potentially leading to gridlock or unsafe maneuvers.</td></tr><tr><td>Debate</td><td>LLM-based driving agents critique each other&#x27;s plans, surfacing hazards and converging on safer, more optimal strategies before execution.</td><td>Can lead to significant communication overhead and decision delay, which is a problem for real-time driving decisions.</td></tr><tr><td colspan="3">Structure</td></tr><tr><td>Centralised</td><td>Enables strong global coordination and optimized system-wide decisions for traffic management due to a comprehensive overview.</td><td>Single-point failure and uplink delays can endanger all participating vehicles.</td></tr><tr><td>Decentralised</td><td>Offers high robustness and scalability as individual agent failures have limited systemic impact, allowing for agile responses to local traffic conditions.</td><td>Lacks a global picture; local optima (e.g., platoon break-ups) can degrade overall traffic efficiency and safety.</td></tr><tr><td>Hierarchical</td><td>Layered clusters (vehicle → platoon → cloud) scale to city-wide fleets while containing message volume within each tier.</td><td>Can introduce communication delays between layers and may suffer from inflexibility if the hierarchy is too rigid to adapt to highly dynamic situations.</td></tr><tr><td>Shared Message Pool</td><td>Allows flexible, asynchronous information sharing, reducing direct communication burdens and enabling opportunistic coordination.</td><td>Contention and information overload risk stale or conflicting data, demanding strict access control.</td></tr></table>
401
+
402
+ Table 4: Comparative Summary of LLM-Based Multi-Agent ADS Research.
403
+
404
+ <table><tr><td>Paper</td><td>Date</td><td>Environment</td><td>Profile-Method</td><td>Profile-Setting</td><td>Structure</td><td>Mode</td><td>Human-Feedback</td><td>LLM Model</td></tr><tr><td>LanguageMPC (Sha et al., 2023)</td><td>2023/10</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Centralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-3.5</td></tr><tr><td>AgentsCoDriver (Hu et al., 2024a)</td><td>2024/04</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Decentralized</td><td>Cooperative</td><td>-</td><td>GPT-3.5-turbo</td></tr><tr><td>KoMA (Jiang et al., 2024a)</td><td>2024/07</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Shared Message pool</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4</td></tr><tr><td>AgentsCoMerge (Hu et al., 2024b)</td><td>2024/08</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Decentralized, Hierarchical</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT/Claude/Gemini Series</td></tr><tr><td>CoDrivingLLM (Fang et al., 2024)</td><td>2024/09</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Centralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4o</td></tr><tr><td>CoMAL (Yao et al., 2024)</td><td>2024/10</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Shared Message pool</td><td>Cooperative</td><td>Instructor Paradigm</td><td>Instructor Paradigm</td><td>GPT-4o-mini, Qwen-72B, Qwen-32B, Qwen-7B</td></tr><tr><td>Complement-Vehicle&#x27;s-FOV (Dona et al., 2024)</td><td>2024/08</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents, Human</td><td>Decentralized, Hierarchical, Centralized</td><td>Cooperative</td><td>Instructor Paradigm, Partnership Paradigm</td><td>GPT-4V, GPT-4o</td></tr><tr><td>CAV-LLM-Driving-Assistant (Tang et al., 2024)</td><td>2024/11</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4V, GPT-4o</td></tr><tr><td>EC-Drive (Chen et al., 2024a)</td><td>2024/08</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents</td><td>Hierarchical</td><td>Cooperative</td><td>Instructor Paradigm</td><td>LLaMA-Adapter (7B), GPT-4</td></tr><tr><td>ChatSim (Wei et al., 2024)</td><td>2024/02</td><td>Simulation</td><td>Pre-defined, Model-generated</td><td>Human, Assistant agents</td><td>Hierarchical, Centralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4</td></tr><tr><td>ALGPT (Zhou et al., 2024c)</td><td>2024/01</td><td>Simulation</td><td>Pre-defined, Model-generated</td><td>Assistant agents</td><td>Hierarchical</td><td>Cooperative</td><td>-</td><td>GPT series</td></tr><tr><td>AD-H (Zhang et al., 2024c)</td><td>2024/06</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Hierarchical</td><td>Cooperative</td><td>Instructor Paradigm</td><td>LLaVA-7B-V1.5</td></tr><tr><td>SurrealDriver (Jin et al., 2024)</td><td>2023/09</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents, Human</td><td>Hierarchical</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT series, Llama, PaLM</td></tr><tr><td>LDPD (Liu et al., 2024a)</td><td>2024/10</td><td>Simulation</td><td>Model-generated</td><td>Vehicle agents</td><td>Hierarchical, Centralized</td><td>Cooperative</td><td>-</td><td>GPT-4o</td></tr><tr><td>V-HOI MLCR (Zhang et al., 2024a)</td><td>2024/03</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Hierarchical</td><td>Cooperative, Debate</td><td>Instructor Paradigm</td><td>GPT-4, GPT-3.5</td></tr><tr><td>Co-Pilot (Wang et al., 2023)</td><td>2023</td><td>Physics</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-3.5-turbo-0301</td></tr><tr><td>PPE (Ma et al., 2024)</td><td>2024</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4-turbo-preview and GPT-3.5-turbo</td></tr><tr><td>Drive-as-You-Speak (Cui et al., 2024a)</td><td>2023/09</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm, Partnership Paradigm</td><td>GPT-4</td></tr><tr><td>Reason-and-React (Cui et al., 2024b)</td><td>2023/10</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm, Partnership Paradigm</td><td>GPT-4</td></tr><tr><td>DriVLMe (Huang et al., 2024a)</td><td>2024/06</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>Vicuna-7B + LoRA</td></tr><tr><td>AccidentGPT (Wang et al., 2024b)</td><td>2024/06</td><td>Physics</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents, Human</td><td>Hierarchical, Centralized, Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4</td></tr><tr><td>ConnectGPT (Tong and Solmaz, 2024)</td><td>2024/06</td><td>Physics</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents, Human</td><td>Hierarchical, Centralized, Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4</td></tr><tr><td>DriveAgent (Hou et al., 2025)</td><td>2025/05</td><td>Physics</td><td>Pre-defined</td><td>Assistant agents</td><td>Decentralized</td><td>Cooperative</td><td>-</td><td>LLaMA-3.2-Vision</td></tr><tr><td>CCMA (Zhang et al., 2025)</td><td>2025</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Assistant agents</td><td>Hierarchical, Decentralized</td><td>Cooperative</td><td>-</td><td>GLM-4v-9B</td></tr><tr><td>V2V-LLM (Chiu et al., 2025)</td><td>2025/02</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Decentralized</td><td>Cooperative</td><td>-</td><td>LLaVA-v1.5-7b</td></tr><tr><td>IITI (Fang et al., 2025)</td><td>2025/03</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>Llama3</td></tr><tr><td>Tell-drive (Xu et al., 2025)</td><td>2025/02</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents</td><td>Hierarchical, Decentralized</td><td>Cooperative</td><td>-</td><td>GPT-4o-min</td></tr><tr><td>Human-RLHF (Sun et al., 2024b)</td><td>2024/06</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4o</td></tr><tr><td>GameChat (Mahadevan et al., 2025)</td><td>2025/03</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Human</td><td>Decentralized</td><td>Cooperative</td><td>Instructor Paradigm</td><td>GPT-4o-mini</td></tr><tr><td>hybrid LLM-DDQN (Yan et al., 2025)</td><td>2024/10</td><td>Simulation</td><td>Pre-defined</td><td>Vehicle agents, Infrastructure agents</td><td>Decentralized, Hierarchical</td><td>Cooperative</td><td>-</td><td>GPT-3.5, Llama3.1-8B, Llama3.1-70B</td></tr></table>
2502.16xxx/2502.16804/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fb4f9e6d47838d177c4ff986d51f4d13e178ebc736bd518d5470e985b256d7c
3
+ size 1003697
2502.16xxx/2502.16804/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_content_list.json ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Dataset Descriptor",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 147,
8
+ 91,
9
+ 302,
10
+ 107
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "PulseBat: A field-accessible dataset for second-life battery diagnostics from realistic histories using multidimensional rapid pulse test",
17
+ "bbox": [
18
+ 147,
19
+ 118,
20
+ 850,
21
+ 162
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Author List",
28
+ "text_level": 1,
29
+ "bbox": [
30
+ 147,
31
+ 202,
32
+ 247,
33
+ 217
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "Shengyu Tao $^{1}$ , Guangyuan Ma $^{1}$ , Huixiong Yang $^{2}$ , Minyan Lu $^{2}$ , Guodan Wei $^{1}$ , Guangmin Zhou $^{1,*}$ , Xuan Zhang $^{1,*}$",
40
+ "bbox": [
41
+ 147,
42
+ 229,
43
+ 850,
44
+ 274
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Author Affiliations",
51
+ "text_level": 1,
52
+ "bbox": [
53
+ 147,
54
+ 313,
55
+ 302,
56
+ 328
57
+ ],
58
+ "page_idx": 0
59
+ },
60
+ {
61
+ "type": "list",
62
+ "sub_type": "text",
63
+ "list_items": [
64
+ "1. Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China",
65
+ "2. Xiamen Lijing New Energy Technology Co., Ltd., Xiamen, China"
66
+ ],
67
+ "bbox": [
68
+ 144,
69
+ 341,
70
+ 813,
71
+ 382
72
+ ],
73
+ "page_idx": 0
74
+ },
75
+ {
76
+ "type": "text",
77
+ "text": "Significance Statement",
78
+ "text_level": 1,
79
+ "bbox": [
80
+ 147,
81
+ 414,
82
+ 332,
83
+ 431
84
+ ],
85
+ "page_idx": 0
86
+ },
87
+ {
88
+ "type": "text",
89
+ "text": "As electric vehicles (EVs) approach the end of their operational life, their batteries retain significant economic value and present promising opportunities for second-life use and material recycling. This is particularly compelling for Global South and other underdeveloped regions, where reliable energy storage is vital to addressing critical challenges posed by weak and even nonexistent power grid and energy infrastructures. However, despite this potential, widespread adoption has been hindered by critical uncertainties surrounding the technical performance, safety, and recertification of second-life batteries. In cases where they have been redeployed, mismatches between estimated and actual performance often render batteries technically unsuitable or hazardous, turning them into liabilities for communities they were intended to benefit. This considerable misalignment exacerbates energy access disparities and undermines the broader vision of energy justice, highlighting an urgent need for robust and scalable solutions to unlock the potential. In the PulseBat Dataset, the authors tested 464 retired lithium-ion batteries, covering 3 cathode material types, 6 historical usages, 3 physical formats, and 6 capacity designs. The pulse test experiments were performed repeatedly for each second-life battery with 10 pulse width, 10 pulse magnitude, multiple state-of-charge, and state-of-health conditions, e.g., from 0.37 to 1.03 (larger than the nominal capacity due to manufacturing inconsistencies). The PulseBat Dataset recorded these test conditions and the voltage response as well as the temperature signals that were subject to the injected pulse current, which could be used as a valuable data resource for critical diagnostics tasks such as state-of-charge estimation, state-of-health estimation, cathode material type identification, open-circuit voltage reconstruction, thermal management, and beyond. Part of the PulseBat Dataset was used in Nature Communications publications that addressed the state-of-health estimation problem under randomly distributed state-of-charge conditions<sup>1</sup>.",
90
+ "bbox": [
91
+ 147,
92
+ 439,
93
+ 852,
94
+ 843
95
+ ],
96
+ "page_idx": 0
97
+ },
98
+ {
99
+ "type": "text",
100
+ "text": "Correspondence",
101
+ "text_level": 1,
102
+ "bbox": [
103
+ 147,
104
+ 870,
105
+ 282,
106
+ 885
107
+ ],
108
+ "page_idx": 0
109
+ },
110
+ {
111
+ "type": "text",
112
+ "text": "Xuan Zhang: xuanzhang@sz.tsinghua.edu.cn;Guangmin Zhou: guangminzhou@sz.tsinghua.edu.cn",
113
+ "bbox": [
114
+ 147,
115
+ 897,
116
+ 850,
117
+ 912
118
+ ],
119
+ "page_idx": 0
120
+ },
121
+ {
122
+ "type": "text",
123
+ "text": "1 Overview",
124
+ "text_level": 1,
125
+ "bbox": [
126
+ 147,
127
+ 91,
128
+ 263,
129
+ 105
130
+ ],
131
+ "page_idx": 1
132
+ },
133
+ {
134
+ "type": "table",
135
+ "img_path": "images/ba3932483939970c18d1e06c7d5b3fbba7b038d3620f7fa1ef8ea61809000014.jpg",
136
+ "table_caption": [
137
+ "The overview of second-life batteries tested is summarized below:"
138
+ ],
139
+ "table_footnote": [],
140
+ "table_body": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>History</td><td>Quantity</td><td>State of health Range (Median, Std.)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>Accelerated aging</td><td>67</td><td>0.61-0.92 (0.83, 0.07)</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>HEV1</td><td>95</td><td>0.51-0.95 (0.88, 0.11)</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>BEV1</td><td>52</td><td>0.75-1.01 (0.99, 0.05)</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>HEV2</td><td>56</td><td>0.74-0.96 (0.85, 0.05)</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>PHEV1</td><td>96</td><td>0.37-0.95 (0.80, 0.12)</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>HEV3</td><td>98</td><td>0.78-1.03 (0.94, 0.07)</td></tr></table>",
141
+ "bbox": [
142
+ 183,
143
+ 137,
144
+ 848,
145
+ 341
146
+ ],
147
+ "page_idx": 1
148
+ },
149
+ {
150
+ "type": "text",
151
+ "text": "where, NMC, LMO, and LFP stand for lithium nickel manganese cobalt oxide, lithium manganese oxide, and lithium iron phosphate, respectively. HEV, BEV, and PHEV stand for hybrid, battery, and plug-in hybrid electric vehicles, respectively. Q stands for the nominal capacity rated by the manufacturer. State-of-health value can be larger than 1 due to inconsistencies from the manufacturing process.",
152
+ "bbox": [
153
+ 179,
154
+ 343,
155
+ 850,
156
+ 434
157
+ ],
158
+ "page_idx": 1
159
+ },
160
+ {
161
+ "type": "text",
162
+ "text": "2 Experimental procedures",
163
+ "text_level": 1,
164
+ "bbox": [
165
+ 144,
166
+ 439,
167
+ 389,
168
+ 457
169
+ ],
170
+ "page_idx": 1
171
+ },
172
+ {
173
+ "type": "text",
174
+ "text": "Tests were performed with BAT-NEEFLCT-05300-V010, NEBULA, Co, Ltd, and the air conditioner temperature was set at $25^{\\circ}\\mathrm{C}$ .",
175
+ "bbox": [
176
+ 179,
177
+ 468,
178
+ 850,
179
+ 513
180
+ ],
181
+ "page_idx": 1
182
+ },
183
+ {
184
+ "type": "text",
185
+ "text": "2.1 Step 1: Capacity Calibration",
186
+ "text_level": 1,
187
+ "bbox": [
188
+ 181,
189
+ 524,
190
+ 463,
191
+ 541
192
+ ],
193
+ "page_idx": 1
194
+ },
195
+ {
196
+ "type": "text",
197
+ "text": "We use the widely adopted constant current (CC) discharge method as the gold standard for determining the capacity of retired batteries. Even considering the different initial state of charge (SOC) distributions of retired batteries, we use a unified method of first constant current constant voltage (CCCV) charging and then CC discharging to determine the capacity of retired batteries.",
198
+ "bbox": [
199
+ 179,
200
+ 552,
201
+ 852,
202
+ 680
203
+ ],
204
+ "page_idx": 1
205
+ },
206
+ {
207
+ "type": "text",
208
+ "text": "First, the retired batteries are charged to the upper cut-off voltage using a 1C constant current, then charged using constant voltage until the current drops to $0.05\\mathrm{C}$ . The batteries are then discharged to the lower cut-off voltage using a 1C constant current. We use the actual discharge capacity as the calibrated (true) battery capacity and then let the battery rest for 20 minutes before SOC conditioning and pulse injection. The term C refers to the C-rate, determined by the current value required by a 1-hour full charge or discharge of a battery. The sampling frequency during Step 1 is $1\\mathrm{Hz}$ . The cut-off conditions of Step 1 are listed in the table below:",
209
+ "bbox": [
210
+ 179,
211
+ 690,
212
+ 853,
213
+ 902
214
+ ],
215
+ "page_idx": 1
216
+ },
217
+ {
218
+ "type": "table",
219
+ "img_path": "images/58c22c8ce19117ff73cabcaf737287874e1fa5e04150b487484999cb8210fdec.jpg",
220
+ "table_caption": [],
221
+ "table_footnote": [],
222
+ "table_body": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>2.0/4.2</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.5/3.65</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.7/4.2</td></tr></table>",
223
+ "bbox": [
224
+ 183,
225
+ 83,
226
+ 850,
227
+ 287
228
+ ],
229
+ "page_idx": 2
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "2.2 Step 2: SOC Conditioning",
234
+ "text_level": 1,
235
+ "bbox": [
236
+ 184,
237
+ 319,
238
+ 440,
239
+ 337
240
+ ],
241
+ "page_idx": 2
242
+ },
243
+ {
244
+ "type": "text",
245
+ "text": "SOC conditioning refers to adjusting the battery SOC to a desired level. The battery is at its zero SOC when the capacity calibration is finished. When a $5\\%$ SOC is desired, we use a 1C constant current for 3 minutes to adjust the calibrated battery to a $5\\%$ SOC level. The battery is then left to stand for 10 minutes to rest, expecting the battery to return to a steady state in preparation for subsequent pulse injection. Notice that SOC here is defined as the ratio of charged or dischargeable capacity to the nominal capacity. The sampling frequency during Step 2 is $1\\mathrm{Hz}$ .",
246
+ "bbox": [
247
+ 183,
248
+ 348,
249
+ 850,
250
+ 531
251
+ ],
252
+ "page_idx": 2
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "2.3 Step 3: Pulse Injection",
257
+ "text_level": 1,
258
+ "bbox": [
259
+ 184,
260
+ 542,
261
+ 411,
262
+ 558
263
+ ],
264
+ "page_idx": 2
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "The pulse width and pulse resting time are shown in the following table; that is, for each pulse width and resting time (each row of the table), we consecutively perform pulse injection with pulse amplitude being 0.5-1-1.5-2-2.5(C) in order, including positive and negative pulse injections. Note that positive and negative pulses alternate to cancel the equivalent energy injection; thus, the stored energy inside the battery does not change. Take pulse injection at $5\\%$ SOC as an example; at the 30ms pulse width, we inject 0.5C positive current pulse, then let the battery rest for 450ms, and then inject 0.5C negative current pulse, then again let the battery rest for 450ms. Still at $5\\%$ SOC, other remaining pulses with other amplitudes follow the rest time of the previous pulses. Repetitive experiments are performed until the remaining pulse widths are exhausted. Then, we charge the retired battery with a constant current of 1C for another 3 minutes to $10\\%$ SOC (refer to Step 1 for details), followed by the same procedure as explained above.",
269
+ "bbox": [
270
+ 183,
271
+ 570,
272
+ 850,
273
+ 892
274
+ ],
275
+ "page_idx": 2
276
+ },
277
+ {
278
+ "type": "table",
279
+ "img_path": "images/36fbea104e3eb148f8f32796e127eaf1c7f71421e018a616e4fe957decfbeaec.jpg",
280
+ "table_caption": [],
281
+ "table_footnote": [
282
+ "Repeat Step 2 and Step 3 until the SOC conditioning region is exhausted. The sampling frequency during Step 3 is $100\\mathrm{Hz}$ ."
283
+ ],
284
+ "table_body": "<table><tr><td>Pulse width (ms)</td><td>Pulse rest time (ms)</td><td>Pulse magnitude (±C)</td></tr><tr><td>30</td><td>450</td><td></td></tr><tr><td>50</td><td>750</td><td></td></tr><tr><td>70</td><td>1,050</td><td></td></tr><tr><td>100</td><td>1,500</td><td></td></tr><tr><td>300</td><td>4,500</td><td></td></tr><tr><td>500</td><td>7,500</td><td>0.5-1-1.5-2-2.5</td></tr><tr><td>700</td><td>10,500</td><td></td></tr><tr><td>1,000</td><td>15,000</td><td></td></tr><tr><td>3,000</td><td>45,000</td><td></td></tr><tr><td>5,000</td><td>75,000</td><td></td></tr></table>",
285
+ "bbox": [
286
+ 179,
287
+ 80,
288
+ 843,
289
+ 393
290
+ ],
291
+ "page_idx": 3
292
+ },
293
+ {
294
+ "type": "text",
295
+ "text": "2.4 SOC Conditioning Range Determination",
296
+ "text_level": 1,
297
+ "bbox": [
298
+ 181,
299
+ 439,
300
+ 556,
301
+ 457
302
+ ],
303
+ "page_idx": 3
304
+ },
305
+ {
306
+ "type": "text",
307
+ "text": "The range of SOC conditioning is determined by a calibrated SOH of the retired battery. Specifically, the upper bound of the SOC conditioning region is lower than the calibrated minimal SOH value of the retired battery by 0.05. For instance, when the retired battery has a previously calibrated SOH between 0.50 and 0.55, then the SOC conditioning region will be $5\\%$ to $45\\%$ , with a grain of $5\\%$ . Detailed information is shown in the table below.",
308
+ "bbox": [
309
+ 179,
310
+ 468,
311
+ 852,
312
+ 596
313
+ ],
314
+ "page_idx": 3
315
+ },
316
+ {
317
+ "type": "table",
318
+ "img_path": "images/77f51ad5fa3b397fa7f544694906a82532cc8990605a6227bc59dd2f8a0789fc.jpg",
319
+ "table_caption": [],
320
+ "table_footnote": [],
321
+ "table_body": "<table><tr><td>State-of-health</td><td>State-of-charge (%), with a resolution of 5%</td></tr><tr><td>&gt;0.95</td><td>[5,90]</td></tr><tr><td>0.90-0.95</td><td>[5,85]</td></tr><tr><td>0.85-0.90</td><td>[5,80]</td></tr><tr><td>0.80-0.85</td><td>[5,75]</td></tr><tr><td>0.75-0.80</td><td>[5,70]</td></tr><tr><td>0.70-0.75</td><td>[5,65]</td></tr><tr><td>0.65-0.70</td><td>[5,60]</td></tr><tr><td>0.60-0.65</td><td>[5,55]</td></tr><tr><td>0.55-0.60</td><td>[5,50]</td></tr><tr><td>0.50-0.55</td><td>[5,45]</td></tr><tr><td>0.45-0.50</td><td>[5,40]</td></tr><tr><td>0.40-0.45</td><td>[5,35]</td></tr><tr><td>0.35-0.40</td><td>[5,30]</td></tr><tr><td>&lt;0.35</td><td>Not Found</td></tr></table>",
322
+ "bbox": [
323
+ 181,
324
+ 600,
325
+ 848,
326
+ 885
327
+ ],
328
+ "page_idx": 3
329
+ },
330
+ {
331
+ "type": "text",
332
+ "text": "2.5 Voltage Protection",
333
+ "text_level": 1,
334
+ "bbox": [
335
+ 181,
336
+ 91,
337
+ 381,
338
+ 107
339
+ ],
340
+ "page_idx": 4
341
+ },
342
+ {
343
+ "type": "text",
344
+ "text": "If the oscillation voltage during pulse injection exceeds the protection range, the current charging or discharging work step will be immediately terminated for a physical security check. If the security check is passed, no time will be made up for the already terminated work step, but the remaining work steps in the test procedure will be continued. In our test, voltage is mainly possible to exceed the protection range during charging, and no cases below the protection range during discharge have been found. The specific protection voltage parameters are consistent with those in the following table.",
345
+ "bbox": [
346
+ 179,
347
+ 118,
348
+ 853,
349
+ 303
350
+ ],
351
+ "page_idx": 4
352
+ },
353
+ {
354
+ "type": "table",
355
+ "img_path": "images/00b1392f18da82cdbe5720077c8cb6e2013a800e2b27f5294e1499e1c5e251b2.jpg",
356
+ "table_caption": [],
357
+ "table_footnote": [],
358
+ "table_body": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>1.95/4.3</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.65/4.3</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.45/3.7</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.65/4.25</td></tr></table>",
359
+ "bbox": [
360
+ 183,
361
+ 306,
362
+ 848,
363
+ 508
364
+ ],
365
+ "page_idx": 4
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "2.6 SOC Deviation",
370
+ "text_level": 1,
371
+ "bbox": [
372
+ 181,
373
+ 514,
374
+ 354,
375
+ 530
376
+ ],
377
+ "page_idx": 4
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "The unequal charged and discharged capacity in adjacent positive and negative pulses with the same pulse intensity and planned pulse width caused by voltage protection will lead to an accumulative deviation in SOC to subsequent pulse tests. This SOC deviation is usually very slight due to the extremely short pulse width with no more than 5s. The voltage may exceed the protection range when the tested SOC is close to the SOH value of the battery. In Nature Communications publication<sup>1</sup>, we only used data from $5 - 50\\%$ SOC. Considering that the SOH of the vast majority of batteries is above 0.6, the SOC deviation used can be ignored for simplicity. However, if readers want to use data at a higher SOC level, they need to pay attention to this SOC deviation issue to avoid introducing unnecessary errors.",
382
+ "bbox": [
383
+ 179,
384
+ 542,
385
+ 852,
386
+ 783
387
+ ],
388
+ "page_idx": 4
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "3 Accessibility",
393
+ "text_level": 1,
394
+ "bbox": [
395
+ 146,
396
+ 793,
397
+ 285,
398
+ 809
399
+ ],
400
+ "page_idx": 4
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "The raw data and the manipulation code are accessible at this link. The readers should cite the Nature Communications publication<sup>1</sup> and this data descriptor article when using the data.",
405
+ "bbox": [
406
+ 179,
407
+ 820,
408
+ 850,
409
+ 866
410
+ ],
411
+ "page_idx": 4
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "Reference",
416
+ "text_level": 1,
417
+ "bbox": [
418
+ 147,
419
+ 87,
420
+ 230,
421
+ 101
422
+ ],
423
+ "page_idx": 5
424
+ },
425
+ {
426
+ "type": "ref_text",
427
+ "text": "1 Tao, S. et al. Generative learning assisted state-of-health estimation for sustainable battery recycling with random retirement conditions. Nature Communications 15, 10154 (2024). https://doi.org:10.1038/s41467-024-54454-0",
428
+ "bbox": [
429
+ 147,
430
+ 105,
431
+ 820,
432
+ 159
433
+ ],
434
+ "page_idx": 5
435
+ }
436
+ ]
2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_model.json ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.149,
7
+ 0.092,
8
+ 0.303,
9
+ 0.108
10
+ ],
11
+ "angle": 0,
12
+ "content": "Dataset Descriptor"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.148,
18
+ 0.12,
19
+ 0.851,
20
+ 0.163
21
+ ],
22
+ "angle": 0,
23
+ "content": "PulseBat: A field-accessible dataset for second-life battery diagnostics from realistic histories using multidimensional rapid pulse test"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.149,
29
+ 0.203,
30
+ 0.248,
31
+ 0.218
32
+ ],
33
+ "angle": 0,
34
+ "content": "Author List"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.148,
40
+ 0.23,
41
+ 0.851,
42
+ 0.275
43
+ ],
44
+ "angle": 0,
45
+ "content": "Shengyu Tao\\(^{1}\\), Guangyuan Ma\\(^{1}\\), Huixiong Yang\\(^{2}\\), Minyan Lu\\(^{2}\\), Guodan Wei\\(^{1}\\), Guangmin Zhou\\(^{1,*}\\), Xuan Zhang\\(^{1,*}\\)"
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.149,
51
+ 0.314,
52
+ 0.303,
53
+ 0.329
54
+ ],
55
+ "angle": 0,
56
+ "content": "Author Affiliations"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.147,
62
+ 0.342,
63
+ 0.814,
64
+ 0.356
65
+ ],
66
+ "angle": 0,
67
+ "content": "1. Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.146,
73
+ 0.37,
74
+ 0.648,
75
+ 0.384
76
+ ],
77
+ "angle": 0,
78
+ "content": "2. Xiamen Lijing New Energy Technology Co., Ltd., Xiamen, China"
79
+ },
80
+ {
81
+ "type": "list",
82
+ "bbox": [
83
+ 0.146,
84
+ 0.342,
85
+ 0.814,
86
+ 0.384
87
+ ],
88
+ "angle": 0,
89
+ "content": null
90
+ },
91
+ {
92
+ "type": "title",
93
+ "bbox": [
94
+ 0.149,
95
+ 0.416,
96
+ 0.334,
97
+ 0.432
98
+ ],
99
+ "angle": 0,
100
+ "content": "Significance Statement"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.148,
106
+ 0.44,
107
+ 0.853,
108
+ 0.844
109
+ ],
110
+ "angle": 0,
111
+ "content": "As electric vehicles (EVs) approach the end of their operational life, their batteries retain significant economic value and present promising opportunities for second-life use and material recycling. This is particularly compelling for Global South and other underdeveloped regions, where reliable energy storage is vital to addressing critical challenges posed by weak and even nonexistent power grid and energy infrastructures. However, despite this potential, widespread adoption has been hindered by critical uncertainties surrounding the technical performance, safety, and recertification of second-life batteries. In cases where they have been redeployed, mismatches between estimated and actual performance often render batteries technically unsuitable or hazardous, turning them into liabilities for communities they were intended to benefit. This considerable misalignment exacerbates energy access disparities and undermines the broader vision of energy justice, highlighting an urgent need for robust and scalable solutions to unlock the potential. In the PulseBat Dataset, the authors tested 464 retired lithium-ion batteries, covering 3 cathode material types, 6 historical usages, 3 physical formats, and 6 capacity designs. The pulse test experiments were performed repeatedly for each second-life battery with 10 pulse width, 10 pulse magnitude, multiple state-of-charge, and state-of-health conditions, e.g., from 0.37 to 1.03 (larger than the nominal capacity due to manufacturing inconsistencies). The PulseBat Dataset recorded these test conditions and the voltage response as well as the temperature signals that were subject to the injected pulse current, which could be used as a valuable data resource for critical diagnostics tasks such as state-of-charge estimation, state-of-health estimation, cathode material type identification, open-circuit voltage reconstruction, thermal management, and beyond. Part of the PulseBat Dataset was used in Nature Communications publications that addressed the state-of-health estimation problem under randomly distributed state-of-charge conditions<sup>1</sup>."
112
+ },
113
+ {
114
+ "type": "title",
115
+ "bbox": [
116
+ 0.149,
117
+ 0.871,
118
+ 0.283,
119
+ 0.886
120
+ ],
121
+ "angle": 0,
122
+ "content": "Correspondence"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.148,
128
+ 0.898,
129
+ 0.851,
130
+ 0.913
131
+ ],
132
+ "angle": 0,
133
+ "content": "Xuan Zhang: xuanzhang@sz.tsinghua.edu.cn;Guangmin Zhou: guangminzhou@sz.tsinghua.edu.cn"
134
+ }
135
+ ],
136
+ [
137
+ {
138
+ "type": "title",
139
+ "bbox": [
140
+ 0.148,
141
+ 0.092,
142
+ 0.265,
143
+ 0.106
144
+ ],
145
+ "angle": 0,
146
+ "content": "1 Overview"
147
+ },
148
+ {
149
+ "type": "table_caption",
150
+ "bbox": [
151
+ 0.182,
152
+ 0.12,
153
+ 0.679,
154
+ 0.135
155
+ ],
156
+ "angle": 0,
157
+ "content": "The overview of second-life batteries tested is summarized below:"
158
+ },
159
+ {
160
+ "type": "table",
161
+ "bbox": [
162
+ 0.184,
163
+ 0.139,
164
+ 0.849,
165
+ 0.342
166
+ ],
167
+ "angle": 0,
168
+ "content": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>History</td><td>Quantity</td><td>State of health Range (Median, Std.)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>Accelerated aging</td><td>67</td><td>0.61-0.92 (0.83, 0.07)</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>HEV1</td><td>95</td><td>0.51-0.95 (0.88, 0.11)</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>BEV1</td><td>52</td><td>0.75-1.01 (0.99, 0.05)</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>HEV2</td><td>56</td><td>0.74-0.96 (0.85, 0.05)</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>PHEV1</td><td>96</td><td>0.37-0.95 (0.80, 0.12)</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>HEV3</td><td>98</td><td>0.78-1.03 (0.94, 0.07)</td></tr></table>"
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.18,
174
+ 0.344,
175
+ 0.851,
176
+ 0.435
177
+ ],
178
+ "angle": 0,
179
+ "content": "where, NMC, LMO, and LFP stand for lithium nickel manganese cobalt oxide, lithium manganese oxide, and lithium iron phosphate, respectively. HEV, BEV, and PHEV stand for hybrid, battery, and plug-in hybrid electric vehicles, respectively. Q stands for the nominal capacity rated by the manufacturer. State-of-health value can be larger than 1 due to inconsistencies from the manufacturing process."
180
+ },
181
+ {
182
+ "type": "title",
183
+ "bbox": [
184
+ 0.146,
185
+ 0.441,
186
+ 0.391,
187
+ 0.458
188
+ ],
189
+ "angle": 0,
190
+ "content": "2 Experimental procedures"
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.181,
196
+ 0.469,
197
+ 0.851,
198
+ 0.514
199
+ ],
200
+ "angle": 0,
201
+ "content": "Tests were performed with BAT-NEEFLCT-05300-V010, NEBULA, Co, Ltd, and the air conditioner temperature was set at \\(25^{\\circ}\\mathrm{C}\\)."
202
+ },
203
+ {
204
+ "type": "title",
205
+ "bbox": [
206
+ 0.182,
207
+ 0.525,
208
+ 0.464,
209
+ 0.542
210
+ ],
211
+ "angle": 0,
212
+ "content": "2.1 Step 1: Capacity Calibration"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.181,
218
+ 0.553,
219
+ 0.853,
220
+ 0.681
221
+ ],
222
+ "angle": 0,
223
+ "content": "We use the widely adopted constant current (CC) discharge method as the gold standard for determining the capacity of retired batteries. Even considering the different initial state of charge (SOC) distributions of retired batteries, we use a unified method of first constant current constant voltage (CCCV) charging and then CC discharging to determine the capacity of retired batteries."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.181,
229
+ 0.692,
230
+ 0.854,
231
+ 0.903
232
+ ],
233
+ "angle": 0,
234
+ "content": "First, the retired batteries are charged to the upper cut-off voltage using a 1C constant current, then charged using constant voltage until the current drops to \\(0.05\\mathrm{C}\\). The batteries are then discharged to the lower cut-off voltage using a 1C constant current. We use the actual discharge capacity as the calibrated (true) battery capacity and then let the battery rest for 20 minutes before SOC conditioning and pulse injection. The term C refers to the C-rate, determined by the current value required by a 1-hour full charge or discharge of a battery. The sampling frequency during Step 1 is \\(1\\mathrm{Hz}\\). The cut-off conditions of Step 1 are listed in the table below:"
235
+ }
236
+ ],
237
+ [
238
+ {
239
+ "type": "table",
240
+ "bbox": [
241
+ 0.184,
242
+ 0.084,
243
+ 0.851,
244
+ 0.288
245
+ ],
246
+ "angle": 0,
247
+ "content": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>2.0/4.2</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.5/3.65</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.7/4.2</td></tr></table>"
248
+ },
249
+ {
250
+ "type": "title",
251
+ "bbox": [
252
+ 0.185,
253
+ 0.321,
254
+ 0.442,
255
+ 0.338
256
+ ],
257
+ "angle": 0,
258
+ "content": "2.2 Step 2: SOC Conditioning"
259
+ },
260
+ {
261
+ "type": "text",
262
+ "bbox": [
263
+ 0.184,
264
+ 0.349,
265
+ 0.851,
266
+ 0.532
267
+ ],
268
+ "angle": 0,
269
+ "content": "SOC conditioning refers to adjusting the battery SOC to a desired level. The battery is at its zero SOC when the capacity calibration is finished. When a \\(5\\%\\) SOC is desired, we use a 1C constant current for 3 minutes to adjust the calibrated battery to a \\(5\\%\\) SOC level. The battery is then left to stand for 10 minutes to rest, expecting the battery to return to a steady state in preparation for subsequent pulse injection. Notice that SOC here is defined as the ratio of charged or dischargeable capacity to the nominal capacity. The sampling frequency during Step 2 is \\(1\\mathrm{Hz}\\)."
270
+ },
271
+ {
272
+ "type": "title",
273
+ "bbox": [
274
+ 0.185,
275
+ 0.543,
276
+ 0.412,
277
+ 0.56
278
+ ],
279
+ "angle": 0,
280
+ "content": "2.3 Step 3: Pulse Injection"
281
+ },
282
+ {
283
+ "type": "text",
284
+ "bbox": [
285
+ 0.184,
286
+ 0.571,
287
+ 0.851,
288
+ 0.894
289
+ ],
290
+ "angle": 0,
291
+ "content": "The pulse width and pulse resting time are shown in the following table; that is, for each pulse width and resting time (each row of the table), we consecutively perform pulse injection with pulse amplitude being 0.5-1-1.5-2-2.5(C) in order, including positive and negative pulse injections. Note that positive and negative pulses alternate to cancel the equivalent energy injection; thus, the stored energy inside the battery does not change. Take pulse injection at \\(5\\%\\) SOC as an example; at the 30ms pulse width, we inject 0.5C positive current pulse, then let the battery rest for 450ms, and then inject 0.5C negative current pulse, then again let the battery rest for 450ms. Still at \\(5\\%\\) SOC, other remaining pulses with other amplitudes follow the rest time of the previous pulses. Repetitive experiments are performed until the remaining pulse widths are exhausted. Then, we charge the retired battery with a constant current of 1C for another 3 minutes to \\(10\\%\\) SOC (refer to Step 1 for details), followed by the same procedure as explained above."
292
+ }
293
+ ],
294
+ [
295
+ {
296
+ "type": "table",
297
+ "bbox": [
298
+ 0.18,
299
+ 0.082,
300
+ 0.845,
301
+ 0.394
302
+ ],
303
+ "angle": 0,
304
+ "content": "<table><tr><td>Pulse width (ms)</td><td>Pulse rest time (ms)</td><td>Pulse magnitude (±C)</td></tr><tr><td>30</td><td>450</td><td></td></tr><tr><td>50</td><td>750</td><td></td></tr><tr><td>70</td><td>1,050</td><td></td></tr><tr><td>100</td><td>1,500</td><td></td></tr><tr><td>300</td><td>4,500</td><td></td></tr><tr><td>500</td><td>7,500</td><td>0.5-1-1.5-2-2.5</td></tr><tr><td>700</td><td>10,500</td><td></td></tr><tr><td>1,000</td><td>15,000</td><td></td></tr><tr><td>3,000</td><td>45,000</td><td></td></tr><tr><td>5,000</td><td>75,000</td><td></td></tr></table>"
305
+ },
306
+ {
307
+ "type": "table_footnote",
308
+ "bbox": [
309
+ 0.182,
310
+ 0.4,
311
+ 0.85,
312
+ 0.434
313
+ ],
314
+ "angle": 0,
315
+ "content": "Repeat Step 2 and Step 3 until the SOC conditioning region is exhausted. The sampling frequency during Step 3 is \\(100\\mathrm{Hz}\\)."
316
+ },
317
+ {
318
+ "type": "title",
319
+ "bbox": [
320
+ 0.182,
321
+ 0.441,
322
+ 0.557,
323
+ 0.458
324
+ ],
325
+ "angle": 0,
326
+ "content": "2.4 SOC Conditioning Range Determination"
327
+ },
328
+ {
329
+ "type": "text",
330
+ "bbox": [
331
+ 0.181,
332
+ 0.469,
333
+ 0.853,
334
+ 0.598
335
+ ],
336
+ "angle": 0,
337
+ "content": "The range of SOC conditioning is determined by a calibrated SOH of the retired battery. Specifically, the upper bound of the SOC conditioning region is lower than the calibrated minimal SOH value of the retired battery by 0.05. For instance, when the retired battery has a previously calibrated SOH between 0.50 and 0.55, then the SOC conditioning region will be \\(5\\%\\) to \\(45\\%\\), with a grain of \\(5\\%\\). Detailed information is shown in the table below."
338
+ },
339
+ {
340
+ "type": "table",
341
+ "bbox": [
342
+ 0.182,
343
+ 0.601,
344
+ 0.85,
345
+ 0.887
346
+ ],
347
+ "angle": 0,
348
+ "content": "<table><tr><td>State-of-health</td><td>State-of-charge (%), with a resolution of 5%</td></tr><tr><td>&gt;0.95</td><td>[5,90]</td></tr><tr><td>0.90-0.95</td><td>[5,85]</td></tr><tr><td>0.85-0.90</td><td>[5,80]</td></tr><tr><td>0.80-0.85</td><td>[5,75]</td></tr><tr><td>0.75-0.80</td><td>[5,70]</td></tr><tr><td>0.70-0.75</td><td>[5,65]</td></tr><tr><td>0.65-0.70</td><td>[5,60]</td></tr><tr><td>0.60-0.65</td><td>[5,55]</td></tr><tr><td>0.55-0.60</td><td>[5,50]</td></tr><tr><td>0.50-0.55</td><td>[5,45]</td></tr><tr><td>0.45-0.50</td><td>[5,40]</td></tr><tr><td>0.40-0.45</td><td>[5,35]</td></tr><tr><td>0.35-0.40</td><td>[5,30]</td></tr><tr><td>&lt;0.35</td><td>Not Found</td></tr></table>"
349
+ }
350
+ ],
351
+ [
352
+ {
353
+ "type": "title",
354
+ "bbox": [
355
+ 0.182,
356
+ 0.092,
357
+ 0.383,
358
+ 0.108
359
+ ],
360
+ "angle": 0,
361
+ "content": "2.5 Voltage Protection"
362
+ },
363
+ {
364
+ "type": "text",
365
+ "bbox": [
366
+ 0.181,
367
+ 0.119,
368
+ 0.854,
369
+ 0.304
370
+ ],
371
+ "angle": 0,
372
+ "content": "If the oscillation voltage during pulse injection exceeds the protection range, the current charging or discharging work step will be immediately terminated for a physical security check. If the security check is passed, no time will be made up for the already terminated work step, but the remaining work steps in the test procedure will be continued. In our test, voltage is mainly possible to exceed the protection range during charging, and no cases below the protection range during discharge have been found. The specific protection voltage parameters are consistent with those in the following table."
373
+ },
374
+ {
375
+ "type": "table",
376
+ "bbox": [
377
+ 0.184,
378
+ 0.307,
379
+ 0.85,
380
+ 0.51
381
+ ],
382
+ "angle": 0,
383
+ "content": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>1.95/4.3</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.65/4.3</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.45/3.7</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.65/4.25</td></tr></table>"
384
+ },
385
+ {
386
+ "type": "title",
387
+ "bbox": [
388
+ 0.182,
389
+ 0.516,
390
+ 0.356,
391
+ 0.531
392
+ ],
393
+ "angle": 0,
394
+ "content": "2.6 SOC Deviation"
395
+ },
396
+ {
397
+ "type": "text",
398
+ "bbox": [
399
+ 0.181,
400
+ 0.543,
401
+ 0.853,
402
+ 0.784
403
+ ],
404
+ "angle": 0,
405
+ "content": "The unequal charged and discharged capacity in adjacent positive and negative pulses with the same pulse intensity and planned pulse width caused by voltage protection will lead to an accumulative deviation in SOC to subsequent pulse tests. This SOC deviation is usually very slight due to the extremely short pulse width with no more than 5s. The voltage may exceed the protection range when the tested SOC is close to the SOH value of the battery. In Nature Communications publication<sup>1</sup>, we only used data from \\(5 - 50\\%\\) SOC. Considering that the SOH of the vast majority of batteries is above 0.6, the SOC deviation used can be ignored for simplicity. However, if readers want to use data at a higher SOC level, they need to pay attention to this SOC deviation issue to avoid introducing unnecessary errors."
406
+ },
407
+ {
408
+ "type": "title",
409
+ "bbox": [
410
+ 0.147,
411
+ 0.794,
412
+ 0.287,
413
+ 0.81
414
+ ],
415
+ "angle": 0,
416
+ "content": "3 Accessibility"
417
+ },
418
+ {
419
+ "type": "text",
420
+ "bbox": [
421
+ 0.181,
422
+ 0.821,
423
+ 0.851,
424
+ 0.867
425
+ ],
426
+ "angle": 0,
427
+ "content": "The raw data and the manipulation code are accessible at this link. The readers should cite the Nature Communications publication<sup>1</sup> and this data descriptor article when using the data."
428
+ }
429
+ ],
430
+ [
431
+ {
432
+ "type": "title",
433
+ "bbox": [
434
+ 0.148,
435
+ 0.088,
436
+ 0.231,
437
+ 0.102
438
+ ],
439
+ "angle": 0,
440
+ "content": "Reference"
441
+ },
442
+ {
443
+ "type": "ref_text",
444
+ "bbox": [
445
+ 0.148,
446
+ 0.106,
447
+ 0.821,
448
+ 0.16
449
+ ],
450
+ "angle": 0,
451
+ "content": "1 Tao, S. et al. Generative learning assisted state-of-health estimation for sustainable battery recycling with random retirement conditions. Nature Communications 15, 10154 (2024). https://doi.org:10.1038/s41467-024-54454-0"
452
+ }
453
+ ]
454
+ ]
2502.16xxx/2502.16848/3c7e6965-458b-46fc-9ef7-cb73aa51696e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:445a540997a1f9ba136948da46c2cdc4728a75e472c0e6cb144f1a7ff39e2745
3
+ size 249678
2502.16xxx/2502.16848/full.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Descriptor
2
+
3
+ PulseBat: A field-accessible dataset for second-life battery diagnostics from realistic histories using multidimensional rapid pulse test
4
+
5
+ # Author List
6
+
7
+ Shengyu Tao $^{1}$ , Guangyuan Ma $^{1}$ , Huixiong Yang $^{2}$ , Minyan Lu $^{2}$ , Guodan Wei $^{1}$ , Guangmin Zhou $^{1,*}$ , Xuan Zhang $^{1,*}$
8
+
9
+ # Author Affiliations
10
+
11
+ 1. Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China
12
+ 2. Xiamen Lijing New Energy Technology Co., Ltd., Xiamen, China
13
+
14
+ # Significance Statement
15
+
16
+ As electric vehicles (EVs) approach the end of their operational life, their batteries retain significant economic value and present promising opportunities for second-life use and material recycling. This is particularly compelling for Global South and other underdeveloped regions, where reliable energy storage is vital to addressing critical challenges posed by weak and even nonexistent power grid and energy infrastructures. However, despite this potential, widespread adoption has been hindered by critical uncertainties surrounding the technical performance, safety, and recertification of second-life batteries. In cases where they have been redeployed, mismatches between estimated and actual performance often render batteries technically unsuitable or hazardous, turning them into liabilities for communities they were intended to benefit. This considerable misalignment exacerbates energy access disparities and undermines the broader vision of energy justice, highlighting an urgent need for robust and scalable solutions to unlock the potential. In the PulseBat Dataset, the authors tested 464 retired lithium-ion batteries, covering 3 cathode material types, 6 historical usages, 3 physical formats, and 6 capacity designs. The pulse test experiments were performed repeatedly for each second-life battery with 10 pulse width, 10 pulse magnitude, multiple state-of-charge, and state-of-health conditions, e.g., from 0.37 to 1.03 (larger than the nominal capacity due to manufacturing inconsistencies). The PulseBat Dataset recorded these test conditions and the voltage response as well as the temperature signals that were subject to the injected pulse current, which could be used as a valuable data resource for critical diagnostics tasks such as state-of-charge estimation, state-of-health estimation, cathode material type identification, open-circuit voltage reconstruction, thermal management, and beyond. Part of the PulseBat Dataset was used in Nature Communications publications that addressed the state-of-health estimation problem under randomly distributed state-of-charge conditions<sup>1</sup>.
17
+
18
+ # Correspondence
19
+
20
+ Xuan Zhang: xuanzhang@sz.tsinghua.edu.cn;Guangmin Zhou: guangminzhou@sz.tsinghua.edu.cn
21
+
22
+ # 1 Overview
23
+
24
+ The overview of second-life batteries tested is summarized below:
25
+
26
+ <table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>History</td><td>Quantity</td><td>State of health Range (Median, Std.)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>Accelerated aging</td><td>67</td><td>0.61-0.92 (0.83, 0.07)</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>HEV1</td><td>95</td><td>0.51-0.95 (0.88, 0.11)</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>BEV1</td><td>52</td><td>0.75-1.01 (0.99, 0.05)</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>HEV2</td><td>56</td><td>0.74-0.96 (0.85, 0.05)</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>PHEV1</td><td>96</td><td>0.37-0.95 (0.80, 0.12)</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>HEV3</td><td>98</td><td>0.78-1.03 (0.94, 0.07)</td></tr></table>
27
+
28
+ where, NMC, LMO, and LFP stand for lithium nickel manganese cobalt oxide, lithium manganese oxide, and lithium iron phosphate, respectively. HEV, BEV, and PHEV stand for hybrid, battery, and plug-in hybrid electric vehicles, respectively. Q stands for the nominal capacity rated by the manufacturer. State-of-health value can be larger than 1 due to inconsistencies from the manufacturing process.
29
+
30
+ # 2 Experimental procedures
31
+
32
+ Tests were performed with BAT-NEEFLCT-05300-V010, NEBULA, Co, Ltd, and the air conditioner temperature was set at $25^{\circ}\mathrm{C}$ .
33
+
34
+ # 2.1 Step 1: Capacity Calibration
35
+
36
+ We use the widely adopted constant current (CC) discharge method as the gold standard for determining the capacity of retired batteries. Even considering the different initial state of charge (SOC) distributions of retired batteries, we use a unified method of first constant current constant voltage (CCCV) charging and then CC discharging to determine the capacity of retired batteries.
37
+
38
+ First, the retired batteries are charged to the upper cut-off voltage using a 1C constant current, then charged using constant voltage until the current drops to $0.05\mathrm{C}$ . The batteries are then discharged to the lower cut-off voltage using a 1C constant current. We use the actual discharge capacity as the calibrated (true) battery capacity and then let the battery rest for 20 minutes before SOC conditioning and pulse injection. The term C refers to the C-rate, determined by the current value required by a 1-hour full charge or discharge of a battery. The sampling frequency during Step 1 is $1\mathrm{Hz}$ . The cut-off conditions of Step 1 are listed in the table below:
39
+
40
+ <table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>2.0/4.2</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.5/3.65</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.7/4.2</td></tr></table>
41
+
42
+ # 2.2 Step 2: SOC Conditioning
43
+
44
+ SOC conditioning refers to adjusting the battery SOC to a desired level. The battery is at its zero SOC when the capacity calibration is finished. When a $5\%$ SOC is desired, we use a 1C constant current for 3 minutes to adjust the calibrated battery to a $5\%$ SOC level. The battery is then left to stand for 10 minutes to rest, expecting the battery to return to a steady state in preparation for subsequent pulse injection. Notice that SOC here is defined as the ratio of charged or dischargeable capacity to the nominal capacity. The sampling frequency during Step 2 is $1\mathrm{Hz}$ .
45
+
46
+ # 2.3 Step 3: Pulse Injection
47
+
48
+ The pulse width and pulse resting time are shown in the following table; that is, for each pulse width and resting time (each row of the table), we consecutively perform pulse injection with pulse amplitude being 0.5-1-1.5-2-2.5(C) in order, including positive and negative pulse injections. Note that positive and negative pulses alternate to cancel the equivalent energy injection; thus, the stored energy inside the battery does not change. Take pulse injection at $5\%$ SOC as an example; at the 30ms pulse width, we inject 0.5C positive current pulse, then let the battery rest for 450ms, and then inject 0.5C negative current pulse, then again let the battery rest for 450ms. Still at $5\%$ SOC, other remaining pulses with other amplitudes follow the rest time of the previous pulses. Repetitive experiments are performed until the remaining pulse widths are exhausted. Then, we charge the retired battery with a constant current of 1C for another 3 minutes to $10\%$ SOC (refer to Step 1 for details), followed by the same procedure as explained above.
49
+
50
+ <table><tr><td>Pulse width (ms)</td><td>Pulse rest time (ms)</td><td>Pulse magnitude (±C)</td></tr><tr><td>30</td><td>450</td><td></td></tr><tr><td>50</td><td>750</td><td></td></tr><tr><td>70</td><td>1,050</td><td></td></tr><tr><td>100</td><td>1,500</td><td></td></tr><tr><td>300</td><td>4,500</td><td></td></tr><tr><td>500</td><td>7,500</td><td>0.5-1-1.5-2-2.5</td></tr><tr><td>700</td><td>10,500</td><td></td></tr><tr><td>1,000</td><td>15,000</td><td></td></tr><tr><td>3,000</td><td>45,000</td><td></td></tr><tr><td>5,000</td><td>75,000</td><td></td></tr></table>
51
+
52
+ Repeat Step 2 and Step 3 until the SOC conditioning region is exhausted. The sampling frequency during Step 3 is $100\mathrm{Hz}$ .
53
+
54
+ # 2.4 SOC Conditioning Range Determination
55
+
56
+ The range of SOC conditioning is determined by a calibrated SOH of the retired battery. Specifically, the upper bound of the SOC conditioning region is lower than the calibrated minimal SOH value of the retired battery by 0.05. For instance, when the retired battery has a previously calibrated SOH between 0.50 and 0.55, then the SOC conditioning region will be $5\%$ to $45\%$ , with a grain of $5\%$ . Detailed information is shown in the table below.
57
+
58
+ <table><tr><td>State-of-health</td><td>State-of-charge (%), with a resolution of 5%</td></tr><tr><td>&gt;0.95</td><td>[5,90]</td></tr><tr><td>0.90-0.95</td><td>[5,85]</td></tr><tr><td>0.85-0.90</td><td>[5,80]</td></tr><tr><td>0.80-0.85</td><td>[5,75]</td></tr><tr><td>0.75-0.80</td><td>[5,70]</td></tr><tr><td>0.70-0.75</td><td>[5,65]</td></tr><tr><td>0.65-0.70</td><td>[5,60]</td></tr><tr><td>0.60-0.65</td><td>[5,55]</td></tr><tr><td>0.55-0.60</td><td>[5,50]</td></tr><tr><td>0.50-0.55</td><td>[5,45]</td></tr><tr><td>0.45-0.50</td><td>[5,40]</td></tr><tr><td>0.40-0.45</td><td>[5,35]</td></tr><tr><td>0.35-0.40</td><td>[5,30]</td></tr><tr><td>&lt;0.35</td><td>Not Found</td></tr></table>
59
+
60
+ # 2.5 Voltage Protection
61
+
62
+ If the oscillation voltage during pulse injection exceeds the protection range, the current charging or discharging work step will be immediately terminated for a physical security check. If the security check is passed, no time will be made up for the already terminated work step, but the remaining work steps in the test procedure will be continued. In our test, voltage is mainly possible to exceed the protection range during charging, and no cases below the protection range during discharge have been found. The specific protection voltage parameters are consistent with those in the following table.
63
+
64
+ <table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>1.95/4.3</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.65/4.3</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.45/3.7</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.65/4.25</td></tr></table>
65
+
66
+ # 2.6 SOC Deviation
67
+
68
+ The unequal charged and discharged capacity in adjacent positive and negative pulses with the same pulse intensity and planned pulse width caused by voltage protection will lead to an accumulative deviation in SOC to subsequent pulse tests. This SOC deviation is usually very slight due to the extremely short pulse width with no more than 5s. The voltage may exceed the protection range when the tested SOC is close to the SOH value of the battery. In Nature Communications publication<sup>1</sup>, we only used data from $5 - 50\%$ SOC. Considering that the SOH of the vast majority of batteries is above 0.6, the SOC deviation used can be ignored for simplicity. However, if readers want to use data at a higher SOC level, they need to pay attention to this SOC deviation issue to avoid introducing unnecessary errors.
69
+
70
+ # 3 Accessibility
71
+
72
+ The raw data and the manipulation code are accessible at this link. The readers should cite the Nature Communications publication<sup>1</sup> and this data descriptor article when using the data.
73
+
74
+ # Reference
75
+
76
+ 1 Tao, S. et al. Generative learning assisted state-of-health estimation for sustainable battery recycling with random retirement conditions. Nature Communications 15, 10154 (2024). https://doi.org:10.1038/s41467-024-54454-0
2502.16xxx/2502.16848/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dc8cc3a5d7123e653f4d8c8dc782ca9883c18d5800ffc48e83bffb8e518d1e7
3
+ size 157712
2502.16xxx/2502.16848/layout.json ADDED
@@ -0,0 +1,1842 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pdf_info": [
3
+ {
4
+ "para_blocks": [
5
+ {
6
+ "bbox": [
7
+ 88,
8
+ 77,
9
+ 180,
10
+ 90
11
+ ],
12
+ "type": "title",
13
+ "angle": 0,
14
+ "lines": [
15
+ {
16
+ "bbox": [
17
+ 88,
18
+ 77,
19
+ 180,
20
+ 90
21
+ ],
22
+ "spans": [
23
+ {
24
+ "bbox": [
25
+ 88,
26
+ 77,
27
+ 180,
28
+ 90
29
+ ],
30
+ "type": "text",
31
+ "content": "Dataset Descriptor"
32
+ }
33
+ ]
34
+ }
35
+ ],
36
+ "index": 0
37
+ },
38
+ {
39
+ "bbox": [
40
+ 88,
41
+ 100,
42
+ 506,
43
+ 137
44
+ ],
45
+ "type": "text",
46
+ "angle": 0,
47
+ "lines": [
48
+ {
49
+ "bbox": [
50
+ 88,
51
+ 100,
52
+ 506,
53
+ 137
54
+ ],
55
+ "spans": [
56
+ {
57
+ "bbox": [
58
+ 88,
59
+ 100,
60
+ 506,
61
+ 137
62
+ ],
63
+ "type": "text",
64
+ "content": "PulseBat: A field-accessible dataset for second-life battery diagnostics from realistic histories using multidimensional rapid pulse test"
65
+ }
66
+ ]
67
+ }
68
+ ],
69
+ "index": 1
70
+ },
71
+ {
72
+ "bbox": [
73
+ 88,
74
+ 170,
75
+ 147,
76
+ 183
77
+ ],
78
+ "type": "title",
79
+ "angle": 0,
80
+ "lines": [
81
+ {
82
+ "bbox": [
83
+ 88,
84
+ 170,
85
+ 147,
86
+ 183
87
+ ],
88
+ "spans": [
89
+ {
90
+ "bbox": [
91
+ 88,
92
+ 170,
93
+ 147,
94
+ 183
95
+ ],
96
+ "type": "text",
97
+ "content": "Author List"
98
+ }
99
+ ]
100
+ }
101
+ ],
102
+ "index": 2
103
+ },
104
+ {
105
+ "bbox": [
106
+ 88,
107
+ 193,
108
+ 506,
109
+ 231
110
+ ],
111
+ "type": "text",
112
+ "angle": 0,
113
+ "lines": [
114
+ {
115
+ "bbox": [
116
+ 88,
117
+ 193,
118
+ 506,
119
+ 231
120
+ ],
121
+ "spans": [
122
+ {
123
+ "bbox": [
124
+ 88,
125
+ 193,
126
+ 506,
127
+ 231
128
+ ],
129
+ "type": "text",
130
+ "content": "Shengyu Tao"
131
+ },
132
+ {
133
+ "bbox": [
134
+ 88,
135
+ 193,
136
+ 506,
137
+ 231
138
+ ],
139
+ "type": "inline_equation",
140
+ "content": "^{1}"
141
+ },
142
+ {
143
+ "bbox": [
144
+ 88,
145
+ 193,
146
+ 506,
147
+ 231
148
+ ],
149
+ "type": "text",
150
+ "content": ", Guangyuan Ma"
151
+ },
152
+ {
153
+ "bbox": [
154
+ 88,
155
+ 193,
156
+ 506,
157
+ 231
158
+ ],
159
+ "type": "inline_equation",
160
+ "content": "^{1}"
161
+ },
162
+ {
163
+ "bbox": [
164
+ 88,
165
+ 193,
166
+ 506,
167
+ 231
168
+ ],
169
+ "type": "text",
170
+ "content": ", Huixiong Yang"
171
+ },
172
+ {
173
+ "bbox": [
174
+ 88,
175
+ 193,
176
+ 506,
177
+ 231
178
+ ],
179
+ "type": "inline_equation",
180
+ "content": "^{2}"
181
+ },
182
+ {
183
+ "bbox": [
184
+ 88,
185
+ 193,
186
+ 506,
187
+ 231
188
+ ],
189
+ "type": "text",
190
+ "content": ", Minyan Lu"
191
+ },
192
+ {
193
+ "bbox": [
194
+ 88,
195
+ 193,
196
+ 506,
197
+ 231
198
+ ],
199
+ "type": "inline_equation",
200
+ "content": "^{2}"
201
+ },
202
+ {
203
+ "bbox": [
204
+ 88,
205
+ 193,
206
+ 506,
207
+ 231
208
+ ],
209
+ "type": "text",
210
+ "content": ", Guodan Wei"
211
+ },
212
+ {
213
+ "bbox": [
214
+ 88,
215
+ 193,
216
+ 506,
217
+ 231
218
+ ],
219
+ "type": "inline_equation",
220
+ "content": "^{1}"
221
+ },
222
+ {
223
+ "bbox": [
224
+ 88,
225
+ 193,
226
+ 506,
227
+ 231
228
+ ],
229
+ "type": "text",
230
+ "content": ", Guangmin Zhou"
231
+ },
232
+ {
233
+ "bbox": [
234
+ 88,
235
+ 193,
236
+ 506,
237
+ 231
238
+ ],
239
+ "type": "inline_equation",
240
+ "content": "^{1,*}"
241
+ },
242
+ {
243
+ "bbox": [
244
+ 88,
245
+ 193,
246
+ 506,
247
+ 231
248
+ ],
249
+ "type": "text",
250
+ "content": ", Xuan Zhang"
251
+ },
252
+ {
253
+ "bbox": [
254
+ 88,
255
+ 193,
256
+ 506,
257
+ 231
258
+ ],
259
+ "type": "inline_equation",
260
+ "content": "^{1,*}"
261
+ }
262
+ ]
263
+ }
264
+ ],
265
+ "index": 3
266
+ },
267
+ {
268
+ "bbox": [
269
+ 88,
270
+ 264,
271
+ 180,
272
+ 276
273
+ ],
274
+ "type": "title",
275
+ "angle": 0,
276
+ "lines": [
277
+ {
278
+ "bbox": [
279
+ 88,
280
+ 264,
281
+ 180,
282
+ 276
283
+ ],
284
+ "spans": [
285
+ {
286
+ "bbox": [
287
+ 88,
288
+ 264,
289
+ 180,
290
+ 276
291
+ ],
292
+ "type": "text",
293
+ "content": "Author Affiliations"
294
+ }
295
+ ]
296
+ }
297
+ ],
298
+ "index": 4
299
+ },
300
+ {
301
+ "bbox": [
302
+ 86,
303
+ 287,
304
+ 484,
305
+ 322
306
+ ],
307
+ "type": "list",
308
+ "angle": 0,
309
+ "index": 7,
310
+ "blocks": [
311
+ {
312
+ "bbox": [
313
+ 87,
314
+ 287,
315
+ 484,
316
+ 299
317
+ ],
318
+ "type": "text",
319
+ "angle": 0,
320
+ "lines": [
321
+ {
322
+ "bbox": [
323
+ 87,
324
+ 287,
325
+ 484,
326
+ 299
327
+ ],
328
+ "spans": [
329
+ {
330
+ "bbox": [
331
+ 87,
332
+ 287,
333
+ 484,
334
+ 299
335
+ ],
336
+ "type": "text",
337
+ "content": "1. Tsinghua Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"
338
+ }
339
+ ]
340
+ }
341
+ ],
342
+ "index": 5
343
+ },
344
+ {
345
+ "bbox": [
346
+ 86,
347
+ 311,
348
+ 385,
349
+ 322
350
+ ],
351
+ "type": "text",
352
+ "angle": 0,
353
+ "lines": [
354
+ {
355
+ "bbox": [
356
+ 86,
357
+ 311,
358
+ 385,
359
+ 322
360
+ ],
361
+ "spans": [
362
+ {
363
+ "bbox": [
364
+ 86,
365
+ 311,
366
+ 385,
367
+ 322
368
+ ],
369
+ "type": "text",
370
+ "content": "2. Xiamen Lijing New Energy Technology Co., Ltd., Xiamen, China"
371
+ }
372
+ ]
373
+ }
374
+ ],
375
+ "index": 6
376
+ }
377
+ ],
378
+ "sub_type": "text"
379
+ },
380
+ {
381
+ "bbox": [
382
+ 88,
383
+ 349,
384
+ 198,
385
+ 363
386
+ ],
387
+ "type": "title",
388
+ "angle": 0,
389
+ "lines": [
390
+ {
391
+ "bbox": [
392
+ 88,
393
+ 349,
394
+ 198,
395
+ 363
396
+ ],
397
+ "spans": [
398
+ {
399
+ "bbox": [
400
+ 88,
401
+ 349,
402
+ 198,
403
+ 363
404
+ ],
405
+ "type": "text",
406
+ "content": "Significance Statement"
407
+ }
408
+ ]
409
+ }
410
+ ],
411
+ "index": 8
412
+ },
413
+ {
414
+ "bbox": [
415
+ 88,
416
+ 370,
417
+ 507,
418
+ 709
419
+ ],
420
+ "type": "text",
421
+ "angle": 0,
422
+ "lines": [
423
+ {
424
+ "bbox": [
425
+ 88,
426
+ 370,
427
+ 507,
428
+ 709
429
+ ],
430
+ "spans": [
431
+ {
432
+ "bbox": [
433
+ 88,
434
+ 370,
435
+ 507,
436
+ 709
437
+ ],
438
+ "type": "text",
439
+ "content": "As electric vehicles (EVs) approach the end of their operational life, their batteries retain significant economic value and present promising opportunities for second-life use and material recycling. This is particularly compelling for Global South and other underdeveloped regions, where reliable energy storage is vital to addressing critical challenges posed by weak and even nonexistent power grid and energy infrastructures. However, despite this potential, widespread adoption has been hindered by critical uncertainties surrounding the technical performance, safety, and recertification of second-life batteries. In cases where they have been redeployed, mismatches between estimated and actual performance often render batteries technically unsuitable or hazardous, turning them into liabilities for communities they were intended to benefit. This considerable misalignment exacerbates energy access disparities and undermines the broader vision of energy justice, highlighting an urgent need for robust and scalable solutions to unlock the potential. In the PulseBat Dataset, the authors tested 464 retired lithium-ion batteries, covering 3 cathode material types, 6 historical usages, 3 physical formats, and 6 capacity designs. The pulse test experiments were performed repeatedly for each second-life battery with 10 pulse width, 10 pulse magnitude, multiple state-of-charge, and state-of-health conditions, e.g., from 0.37 to 1.03 (larger than the nominal capacity due to manufacturing inconsistencies). The PulseBat Dataset recorded these test conditions and the voltage response as well as the temperature signals that were subject to the injected pulse current, which could be used as a valuable data resource for critical diagnostics tasks such as state-of-charge estimation, state-of-health estimation, cathode material type identification, open-circuit voltage reconstruction, thermal management, and beyond. Part of the PulseBat Dataset was used in Nature Communications publications that addressed the state-of-health estimation problem under randomly distributed state-of-charge conditions<sup>1</sup>."
440
+ }
441
+ ]
442
+ }
443
+ ],
444
+ "index": 9
445
+ },
446
+ {
447
+ "bbox": [
448
+ 88,
449
+ 732,
450
+ 168,
451
+ 745
452
+ ],
453
+ "type": "title",
454
+ "angle": 0,
455
+ "lines": [
456
+ {
457
+ "bbox": [
458
+ 88,
459
+ 732,
460
+ 168,
461
+ 745
462
+ ],
463
+ "spans": [
464
+ {
465
+ "bbox": [
466
+ 88,
467
+ 732,
468
+ 168,
469
+ 745
470
+ ],
471
+ "type": "text",
472
+ "content": "Correspondence"
473
+ }
474
+ ]
475
+ }
476
+ ],
477
+ "index": 10
478
+ },
479
+ {
480
+ "bbox": [
481
+ 88,
482
+ 755,
483
+ 506,
484
+ 767
485
+ ],
486
+ "type": "text",
487
+ "angle": 0,
488
+ "lines": [
489
+ {
490
+ "bbox": [
491
+ 88,
492
+ 755,
493
+ 506,
494
+ 767
495
+ ],
496
+ "spans": [
497
+ {
498
+ "bbox": [
499
+ 88,
500
+ 755,
501
+ 506,
502
+ 767
503
+ ],
504
+ "type": "text",
505
+ "content": "Xuan Zhang: xuanzhang@sz.tsinghua.edu.cn;Guangmin Zhou: guangminzhou@sz.tsinghua.edu.cn"
506
+ }
507
+ ]
508
+ }
509
+ ],
510
+ "index": 11
511
+ }
512
+ ],
513
+ "discarded_blocks": [],
514
+ "page_size": [
515
+ 595,
516
+ 841
517
+ ],
518
+ "page_idx": 0
519
+ },
520
+ {
521
+ "para_blocks": [
522
+ {
523
+ "bbox": [
524
+ 88,
525
+ 77,
526
+ 157,
527
+ 89
528
+ ],
529
+ "type": "title",
530
+ "angle": 0,
531
+ "lines": [
532
+ {
533
+ "bbox": [
534
+ 88,
535
+ 77,
536
+ 157,
537
+ 89
538
+ ],
539
+ "spans": [
540
+ {
541
+ "bbox": [
542
+ 88,
543
+ 77,
544
+ 157,
545
+ 89
546
+ ],
547
+ "type": "text",
548
+ "content": "1 Overview"
549
+ }
550
+ ]
551
+ }
552
+ ],
553
+ "index": 0
554
+ },
555
+ {
556
+ "type": "table",
557
+ "bbox": [
558
+ 109,
559
+ 116,
560
+ 505,
561
+ 287
562
+ ],
563
+ "blocks": [
564
+ {
565
+ "bbox": [
566
+ 108,
567
+ 100,
568
+ 404,
569
+ 113
570
+ ],
571
+ "lines": [
572
+ {
573
+ "bbox": [
574
+ 108,
575
+ 100,
576
+ 404,
577
+ 113
578
+ ],
579
+ "spans": [
580
+ {
581
+ "bbox": [
582
+ 108,
583
+ 100,
584
+ 404,
585
+ 113
586
+ ],
587
+ "type": "text",
588
+ "content": "The overview of second-life batteries tested is summarized below:"
589
+ }
590
+ ]
591
+ }
592
+ ],
593
+ "index": 1,
594
+ "angle": 0,
595
+ "type": "table_caption"
596
+ },
597
+ {
598
+ "bbox": [
599
+ 109,
600
+ 116,
601
+ 505,
602
+ 287
603
+ ],
604
+ "lines": [
605
+ {
606
+ "bbox": [
607
+ 109,
608
+ 116,
609
+ 505,
610
+ 287
611
+ ],
612
+ "spans": [
613
+ {
614
+ "bbox": [
615
+ 109,
616
+ 116,
617
+ 505,
618
+ 287
619
+ ],
620
+ "type": "table",
621
+ "html": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>History</td><td>Quantity</td><td>State of health Range (Median, Std.)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>Accelerated aging</td><td>67</td><td>0.61-0.92 (0.83, 0.07)</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>HEV1</td><td>95</td><td>0.51-0.95 (0.88, 0.11)</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>BEV1</td><td>52</td><td>0.75-1.01 (0.99, 0.05)</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>HEV2</td><td>56</td><td>0.74-0.96 (0.85, 0.05)</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>PHEV1</td><td>96</td><td>0.37-0.95 (0.80, 0.12)</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>HEV3</td><td>98</td><td>0.78-1.03 (0.94, 0.07)</td></tr></table>",
622
+ "image_path": "ba3932483939970c18d1e06c7d5b3fbba7b038d3620f7fa1ef8ea61809000014.jpg"
623
+ }
624
+ ]
625
+ }
626
+ ],
627
+ "index": 2,
628
+ "angle": 0,
629
+ "type": "table_body"
630
+ }
631
+ ],
632
+ "index": 2
633
+ },
634
+ {
635
+ "bbox": [
636
+ 107,
637
+ 289,
638
+ 506,
639
+ 365
640
+ ],
641
+ "type": "text",
642
+ "angle": 0,
643
+ "lines": [
644
+ {
645
+ "bbox": [
646
+ 107,
647
+ 289,
648
+ 506,
649
+ 365
650
+ ],
651
+ "spans": [
652
+ {
653
+ "bbox": [
654
+ 107,
655
+ 289,
656
+ 506,
657
+ 365
658
+ ],
659
+ "type": "text",
660
+ "content": "where, NMC, LMO, and LFP stand for lithium nickel manganese cobalt oxide, lithium manganese oxide, and lithium iron phosphate, respectively. HEV, BEV, and PHEV stand for hybrid, battery, and plug-in hybrid electric vehicles, respectively. Q stands for the nominal capacity rated by the manufacturer. State-of-health value can be larger than 1 due to inconsistencies from the manufacturing process."
661
+ }
662
+ ]
663
+ }
664
+ ],
665
+ "index": 3
666
+ },
667
+ {
668
+ "bbox": [
669
+ 86,
670
+ 370,
671
+ 232,
672
+ 385
673
+ ],
674
+ "type": "title",
675
+ "angle": 0,
676
+ "lines": [
677
+ {
678
+ "bbox": [
679
+ 86,
680
+ 370,
681
+ 232,
682
+ 385
683
+ ],
684
+ "spans": [
685
+ {
686
+ "bbox": [
687
+ 86,
688
+ 370,
689
+ 232,
690
+ 385
691
+ ],
692
+ "type": "text",
693
+ "content": "2 Experimental procedures"
694
+ }
695
+ ]
696
+ }
697
+ ],
698
+ "index": 4
699
+ },
700
+ {
701
+ "bbox": [
702
+ 107,
703
+ 394,
704
+ 506,
705
+ 432
706
+ ],
707
+ "type": "text",
708
+ "angle": 0,
709
+ "lines": [
710
+ {
711
+ "bbox": [
712
+ 107,
713
+ 394,
714
+ 506,
715
+ 432
716
+ ],
717
+ "spans": [
718
+ {
719
+ "bbox": [
720
+ 107,
721
+ 394,
722
+ 506,
723
+ 432
724
+ ],
725
+ "type": "text",
726
+ "content": "Tests were performed with BAT-NEEFLCT-05300-V010, NEBULA, Co, Ltd, and the air conditioner temperature was set at "
727
+ },
728
+ {
729
+ "bbox": [
730
+ 107,
731
+ 394,
732
+ 506,
733
+ 432
734
+ ],
735
+ "type": "inline_equation",
736
+ "content": "25^{\\circ}\\mathrm{C}"
737
+ },
738
+ {
739
+ "bbox": [
740
+ 107,
741
+ 394,
742
+ 506,
743
+ 432
744
+ ],
745
+ "type": "text",
746
+ "content": "."
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "index": 5
752
+ },
753
+ {
754
+ "bbox": [
755
+ 108,
756
+ 441,
757
+ 276,
758
+ 455
759
+ ],
760
+ "type": "title",
761
+ "angle": 0,
762
+ "lines": [
763
+ {
764
+ "bbox": [
765
+ 108,
766
+ 441,
767
+ 276,
768
+ 455
769
+ ],
770
+ "spans": [
771
+ {
772
+ "bbox": [
773
+ 108,
774
+ 441,
775
+ 276,
776
+ 455
777
+ ],
778
+ "type": "text",
779
+ "content": "2.1 Step 1: Capacity Calibration"
780
+ }
781
+ ]
782
+ }
783
+ ],
784
+ "index": 6
785
+ },
786
+ {
787
+ "bbox": [
788
+ 107,
789
+ 465,
790
+ 507,
791
+ 572
792
+ ],
793
+ "type": "text",
794
+ "angle": 0,
795
+ "lines": [
796
+ {
797
+ "bbox": [
798
+ 107,
799
+ 465,
800
+ 507,
801
+ 572
802
+ ],
803
+ "spans": [
804
+ {
805
+ "bbox": [
806
+ 107,
807
+ 465,
808
+ 507,
809
+ 572
810
+ ],
811
+ "type": "text",
812
+ "content": "We use the widely adopted constant current (CC) discharge method as the gold standard for determining the capacity of retired batteries. Even considering the different initial state of charge (SOC) distributions of retired batteries, we use a unified method of first constant current constant voltage (CCCV) charging and then CC discharging to determine the capacity of retired batteries."
813
+ }
814
+ ]
815
+ }
816
+ ],
817
+ "index": 7
818
+ },
819
+ {
820
+ "bbox": [
821
+ 107,
822
+ 581,
823
+ 508,
824
+ 759
825
+ ],
826
+ "type": "text",
827
+ "angle": 0,
828
+ "lines": [
829
+ {
830
+ "bbox": [
831
+ 107,
832
+ 581,
833
+ 508,
834
+ 759
835
+ ],
836
+ "spans": [
837
+ {
838
+ "bbox": [
839
+ 107,
840
+ 581,
841
+ 508,
842
+ 759
843
+ ],
844
+ "type": "text",
845
+ "content": "First, the retired batteries are charged to the upper cut-off voltage using a 1C constant current, then charged using constant voltage until the current drops to "
846
+ },
847
+ {
848
+ "bbox": [
849
+ 107,
850
+ 581,
851
+ 508,
852
+ 759
853
+ ],
854
+ "type": "inline_equation",
855
+ "content": "0.05\\mathrm{C}"
856
+ },
857
+ {
858
+ "bbox": [
859
+ 107,
860
+ 581,
861
+ 508,
862
+ 759
863
+ ],
864
+ "type": "text",
865
+ "content": ". The batteries are then discharged to the lower cut-off voltage using a 1C constant current. We use the actual discharge capacity as the calibrated (true) battery capacity and then let the battery rest for 20 minutes before SOC conditioning and pulse injection. The term C refers to the C-rate, determined by the current value required by a 1-hour full charge or discharge of a battery. The sampling frequency during Step 1 is "
866
+ },
867
+ {
868
+ "bbox": [
869
+ 107,
870
+ 581,
871
+ 508,
872
+ 759
873
+ ],
874
+ "type": "inline_equation",
875
+ "content": "1\\mathrm{Hz}"
876
+ },
877
+ {
878
+ "bbox": [
879
+ 107,
880
+ 581,
881
+ 508,
882
+ 759
883
+ ],
884
+ "type": "text",
885
+ "content": ". The cut-off conditions of Step 1 are listed in the table below:"
886
+ }
887
+ ]
888
+ }
889
+ ],
890
+ "index": 8
891
+ }
892
+ ],
893
+ "discarded_blocks": [],
894
+ "page_size": [
895
+ 595,
896
+ 841
897
+ ],
898
+ "page_idx": 1
899
+ },
900
+ {
901
+ "para_blocks": [
902
+ {
903
+ "type": "table",
904
+ "bbox": [
905
+ 109,
906
+ 70,
907
+ 506,
908
+ 242
909
+ ],
910
+ "blocks": [
911
+ {
912
+ "bbox": [
913
+ 109,
914
+ 70,
915
+ 506,
916
+ 242
917
+ ],
918
+ "lines": [
919
+ {
920
+ "bbox": [
921
+ 109,
922
+ 70,
923
+ 506,
924
+ 242
925
+ ],
926
+ "spans": [
927
+ {
928
+ "bbox": [
929
+ 109,
930
+ 70,
931
+ 506,
932
+ 242
933
+ ],
934
+ "type": "table",
935
+ "html": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>2.0/4.2</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.5/3.65</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.7/4.2</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.7/4.2</td></tr></table>",
936
+ "image_path": "58c22c8ce19117ff73cabcaf737287874e1fa5e04150b487484999cb8210fdec.jpg"
937
+ }
938
+ ]
939
+ }
940
+ ],
941
+ "index": 0,
942
+ "angle": 0,
943
+ "type": "table_body"
944
+ }
945
+ ],
946
+ "index": 0
947
+ },
948
+ {
949
+ "bbox": [
950
+ 110,
951
+ 269,
952
+ 262,
953
+ 284
954
+ ],
955
+ "type": "title",
956
+ "angle": 0,
957
+ "lines": [
958
+ {
959
+ "bbox": [
960
+ 110,
961
+ 269,
962
+ 262,
963
+ 284
964
+ ],
965
+ "spans": [
966
+ {
967
+ "bbox": [
968
+ 110,
969
+ 269,
970
+ 262,
971
+ 284
972
+ ],
973
+ "type": "text",
974
+ "content": "2.2 Step 2: SOC Conditioning"
975
+ }
976
+ ]
977
+ }
978
+ ],
979
+ "index": 1
980
+ },
981
+ {
982
+ "bbox": [
983
+ 109,
984
+ 293,
985
+ 506,
986
+ 447
987
+ ],
988
+ "type": "text",
989
+ "angle": 0,
990
+ "lines": [
991
+ {
992
+ "bbox": [
993
+ 109,
994
+ 293,
995
+ 506,
996
+ 447
997
+ ],
998
+ "spans": [
999
+ {
1000
+ "bbox": [
1001
+ 109,
1002
+ 293,
1003
+ 506,
1004
+ 447
1005
+ ],
1006
+ "type": "text",
1007
+ "content": "SOC conditioning refers to adjusting the battery SOC to a desired level. The battery is at its zero SOC when the capacity calibration is finished. When a "
1008
+ },
1009
+ {
1010
+ "bbox": [
1011
+ 109,
1012
+ 293,
1013
+ 506,
1014
+ 447
1015
+ ],
1016
+ "type": "inline_equation",
1017
+ "content": "5\\%"
1018
+ },
1019
+ {
1020
+ "bbox": [
1021
+ 109,
1022
+ 293,
1023
+ 506,
1024
+ 447
1025
+ ],
1026
+ "type": "text",
1027
+ "content": " SOC is desired, we use a 1C constant current for 3 minutes to adjust the calibrated battery to a "
1028
+ },
1029
+ {
1030
+ "bbox": [
1031
+ 109,
1032
+ 293,
1033
+ 506,
1034
+ 447
1035
+ ],
1036
+ "type": "inline_equation",
1037
+ "content": "5\\%"
1038
+ },
1039
+ {
1040
+ "bbox": [
1041
+ 109,
1042
+ 293,
1043
+ 506,
1044
+ 447
1045
+ ],
1046
+ "type": "text",
1047
+ "content": " SOC level. The battery is then left to stand for 10 minutes to rest, expecting the battery to return to a steady state in preparation for subsequent pulse injection. Notice that SOC here is defined as the ratio of charged or dischargeable capacity to the nominal capacity. The sampling frequency during Step 2 is "
1048
+ },
1049
+ {
1050
+ "bbox": [
1051
+ 109,
1052
+ 293,
1053
+ 506,
1054
+ 447
1055
+ ],
1056
+ "type": "inline_equation",
1057
+ "content": "1\\mathrm{Hz}"
1058
+ },
1059
+ {
1060
+ "bbox": [
1061
+ 109,
1062
+ 293,
1063
+ 506,
1064
+ 447
1065
+ ],
1066
+ "type": "text",
1067
+ "content": "."
1068
+ }
1069
+ ]
1070
+ }
1071
+ ],
1072
+ "index": 2
1073
+ },
1074
+ {
1075
+ "bbox": [
1076
+ 110,
1077
+ 456,
1078
+ 245,
1079
+ 470
1080
+ ],
1081
+ "type": "title",
1082
+ "angle": 0,
1083
+ "lines": [
1084
+ {
1085
+ "bbox": [
1086
+ 110,
1087
+ 456,
1088
+ 245,
1089
+ 470
1090
+ ],
1091
+ "spans": [
1092
+ {
1093
+ "bbox": [
1094
+ 110,
1095
+ 456,
1096
+ 245,
1097
+ 470
1098
+ ],
1099
+ "type": "text",
1100
+ "content": "2.3 Step 3: Pulse Injection"
1101
+ }
1102
+ ]
1103
+ }
1104
+ ],
1105
+ "index": 3
1106
+ },
1107
+ {
1108
+ "bbox": [
1109
+ 109,
1110
+ 480,
1111
+ 506,
1112
+ 751
1113
+ ],
1114
+ "type": "text",
1115
+ "angle": 0,
1116
+ "lines": [
1117
+ {
1118
+ "bbox": [
1119
+ 109,
1120
+ 480,
1121
+ 506,
1122
+ 751
1123
+ ],
1124
+ "spans": [
1125
+ {
1126
+ "bbox": [
1127
+ 109,
1128
+ 480,
1129
+ 506,
1130
+ 751
1131
+ ],
1132
+ "type": "text",
1133
+ "content": "The pulse width and pulse resting time are shown in the following table; that is, for each pulse width and resting time (each row of the table), we consecutively perform pulse injection with pulse amplitude being 0.5-1-1.5-2-2.5(C) in order, including positive and negative pulse injections. Note that positive and negative pulses alternate to cancel the equivalent energy injection; thus, the stored energy inside the battery does not change. Take pulse injection at "
1134
+ },
1135
+ {
1136
+ "bbox": [
1137
+ 109,
1138
+ 480,
1139
+ 506,
1140
+ 751
1141
+ ],
1142
+ "type": "inline_equation",
1143
+ "content": "5\\%"
1144
+ },
1145
+ {
1146
+ "bbox": [
1147
+ 109,
1148
+ 480,
1149
+ 506,
1150
+ 751
1151
+ ],
1152
+ "type": "text",
1153
+ "content": " SOC as an example; at the 30ms pulse width, we inject 0.5C positive current pulse, then let the battery rest for 450ms, and then inject 0.5C negative current pulse, then again let the battery rest for 450ms. Still at "
1154
+ },
1155
+ {
1156
+ "bbox": [
1157
+ 109,
1158
+ 480,
1159
+ 506,
1160
+ 751
1161
+ ],
1162
+ "type": "inline_equation",
1163
+ "content": "5\\%"
1164
+ },
1165
+ {
1166
+ "bbox": [
1167
+ 109,
1168
+ 480,
1169
+ 506,
1170
+ 751
1171
+ ],
1172
+ "type": "text",
1173
+ "content": " SOC, other remaining pulses with other amplitudes follow the rest time of the previous pulses. Repetitive experiments are performed until the remaining pulse widths are exhausted. Then, we charge the retired battery with a constant current of 1C for another 3 minutes to "
1174
+ },
1175
+ {
1176
+ "bbox": [
1177
+ 109,
1178
+ 480,
1179
+ 506,
1180
+ 751
1181
+ ],
1182
+ "type": "inline_equation",
1183
+ "content": "10\\%"
1184
+ },
1185
+ {
1186
+ "bbox": [
1187
+ 109,
1188
+ 480,
1189
+ 506,
1190
+ 751
1191
+ ],
1192
+ "type": "text",
1193
+ "content": " SOC (refer to Step 1 for details), followed by the same procedure as explained above."
1194
+ }
1195
+ ]
1196
+ }
1197
+ ],
1198
+ "index": 4
1199
+ }
1200
+ ],
1201
+ "discarded_blocks": [],
1202
+ "page_size": [
1203
+ 595,
1204
+ 841
1205
+ ],
1206
+ "page_idx": 2
1207
+ },
1208
+ {
1209
+ "para_blocks": [
1210
+ {
1211
+ "type": "table",
1212
+ "bbox": [
1213
+ 107,
1214
+ 68,
1215
+ 502,
1216
+ 331
1217
+ ],
1218
+ "blocks": [
1219
+ {
1220
+ "bbox": [
1221
+ 107,
1222
+ 68,
1223
+ 502,
1224
+ 331
1225
+ ],
1226
+ "lines": [
1227
+ {
1228
+ "bbox": [
1229
+ 107,
1230
+ 68,
1231
+ 502,
1232
+ 331
1233
+ ],
1234
+ "spans": [
1235
+ {
1236
+ "bbox": [
1237
+ 107,
1238
+ 68,
1239
+ 502,
1240
+ 331
1241
+ ],
1242
+ "type": "table",
1243
+ "html": "<table><tr><td>Pulse width (ms)</td><td>Pulse rest time (ms)</td><td>Pulse magnitude (±C)</td></tr><tr><td>30</td><td>450</td><td></td></tr><tr><td>50</td><td>750</td><td></td></tr><tr><td>70</td><td>1,050</td><td></td></tr><tr><td>100</td><td>1,500</td><td></td></tr><tr><td>300</td><td>4,500</td><td></td></tr><tr><td>500</td><td>7,500</td><td>0.5-1-1.5-2-2.5</td></tr><tr><td>700</td><td>10,500</td><td></td></tr><tr><td>1,000</td><td>15,000</td><td></td></tr><tr><td>3,000</td><td>45,000</td><td></td></tr><tr><td>5,000</td><td>75,000</td><td></td></tr></table>",
1244
+ "image_path": "36fbea104e3eb148f8f32796e127eaf1c7f71421e018a616e4fe957decfbeaec.jpg"
1245
+ }
1246
+ ]
1247
+ }
1248
+ ],
1249
+ "index": 0,
1250
+ "angle": 0,
1251
+ "type": "table_body"
1252
+ },
1253
+ {
1254
+ "bbox": [
1255
+ 108,
1256
+ 336,
1257
+ 505,
1258
+ 364
1259
+ ],
1260
+ "lines": [
1261
+ {
1262
+ "bbox": [
1263
+ 108,
1264
+ 336,
1265
+ 505,
1266
+ 364
1267
+ ],
1268
+ "spans": [
1269
+ {
1270
+ "bbox": [
1271
+ 108,
1272
+ 336,
1273
+ 505,
1274
+ 364
1275
+ ],
1276
+ "type": "text",
1277
+ "content": "Repeat Step 2 and Step 3 until the SOC conditioning region is exhausted. The sampling frequency during Step 3 is "
1278
+ },
1279
+ {
1280
+ "bbox": [
1281
+ 108,
1282
+ 336,
1283
+ 505,
1284
+ 364
1285
+ ],
1286
+ "type": "inline_equation",
1287
+ "content": "100\\mathrm{Hz}"
1288
+ },
1289
+ {
1290
+ "bbox": [
1291
+ 108,
1292
+ 336,
1293
+ 505,
1294
+ 364
1295
+ ],
1296
+ "type": "text",
1297
+ "content": "."
1298
+ }
1299
+ ]
1300
+ }
1301
+ ],
1302
+ "index": 1,
1303
+ "angle": 0,
1304
+ "type": "table_footnote"
1305
+ }
1306
+ ],
1307
+ "index": 0
1308
+ },
1309
+ {
1310
+ "bbox": [
1311
+ 108,
1312
+ 370,
1313
+ 331,
1314
+ 385
1315
+ ],
1316
+ "type": "title",
1317
+ "angle": 0,
1318
+ "lines": [
1319
+ {
1320
+ "bbox": [
1321
+ 108,
1322
+ 370,
1323
+ 331,
1324
+ 385
1325
+ ],
1326
+ "spans": [
1327
+ {
1328
+ "bbox": [
1329
+ 108,
1330
+ 370,
1331
+ 331,
1332
+ 385
1333
+ ],
1334
+ "type": "text",
1335
+ "content": "2.4 SOC Conditioning Range Determination"
1336
+ }
1337
+ ]
1338
+ }
1339
+ ],
1340
+ "index": 2
1341
+ },
1342
+ {
1343
+ "bbox": [
1344
+ 107,
1345
+ 394,
1346
+ 507,
1347
+ 502
1348
+ ],
1349
+ "type": "text",
1350
+ "angle": 0,
1351
+ "lines": [
1352
+ {
1353
+ "bbox": [
1354
+ 107,
1355
+ 394,
1356
+ 507,
1357
+ 502
1358
+ ],
1359
+ "spans": [
1360
+ {
1361
+ "bbox": [
1362
+ 107,
1363
+ 394,
1364
+ 507,
1365
+ 502
1366
+ ],
1367
+ "type": "text",
1368
+ "content": "The range of SOC conditioning is determined by a calibrated SOH of the retired battery. Specifically, the upper bound of the SOC conditioning region is lower than the calibrated minimal SOH value of the retired battery by 0.05. For instance, when the retired battery has a previously calibrated SOH between 0.50 and 0.55, then the SOC conditioning region will be "
1369
+ },
1370
+ {
1371
+ "bbox": [
1372
+ 107,
1373
+ 394,
1374
+ 507,
1375
+ 502
1376
+ ],
1377
+ "type": "inline_equation",
1378
+ "content": "5\\%"
1379
+ },
1380
+ {
1381
+ "bbox": [
1382
+ 107,
1383
+ 394,
1384
+ 507,
1385
+ 502
1386
+ ],
1387
+ "type": "text",
1388
+ "content": " to "
1389
+ },
1390
+ {
1391
+ "bbox": [
1392
+ 107,
1393
+ 394,
1394
+ 507,
1395
+ 502
1396
+ ],
1397
+ "type": "inline_equation",
1398
+ "content": "45\\%"
1399
+ },
1400
+ {
1401
+ "bbox": [
1402
+ 107,
1403
+ 394,
1404
+ 507,
1405
+ 502
1406
+ ],
1407
+ "type": "text",
1408
+ "content": ", with a grain of "
1409
+ },
1410
+ {
1411
+ "bbox": [
1412
+ 107,
1413
+ 394,
1414
+ 507,
1415
+ 502
1416
+ ],
1417
+ "type": "inline_equation",
1418
+ "content": "5\\%"
1419
+ },
1420
+ {
1421
+ "bbox": [
1422
+ 107,
1423
+ 394,
1424
+ 507,
1425
+ 502
1426
+ ],
1427
+ "type": "text",
1428
+ "content": ". Detailed information is shown in the table below."
1429
+ }
1430
+ ]
1431
+ }
1432
+ ],
1433
+ "index": 3
1434
+ },
1435
+ {
1436
+ "type": "table",
1437
+ "bbox": [
1438
+ 108,
1439
+ 505,
1440
+ 505,
1441
+ 745
1442
+ ],
1443
+ "blocks": [
1444
+ {
1445
+ "bbox": [
1446
+ 108,
1447
+ 505,
1448
+ 505,
1449
+ 745
1450
+ ],
1451
+ "lines": [
1452
+ {
1453
+ "bbox": [
1454
+ 108,
1455
+ 505,
1456
+ 505,
1457
+ 745
1458
+ ],
1459
+ "spans": [
1460
+ {
1461
+ "bbox": [
1462
+ 108,
1463
+ 505,
1464
+ 505,
1465
+ 745
1466
+ ],
1467
+ "type": "table",
1468
+ "html": "<table><tr><td>State-of-health</td><td>State-of-charge (%), with a resolution of 5%</td></tr><tr><td>&gt;0.95</td><td>[5,90]</td></tr><tr><td>0.90-0.95</td><td>[5,85]</td></tr><tr><td>0.85-0.90</td><td>[5,80]</td></tr><tr><td>0.80-0.85</td><td>[5,75]</td></tr><tr><td>0.75-0.80</td><td>[5,70]</td></tr><tr><td>0.70-0.75</td><td>[5,65]</td></tr><tr><td>0.65-0.70</td><td>[5,60]</td></tr><tr><td>0.60-0.65</td><td>[5,55]</td></tr><tr><td>0.55-0.60</td><td>[5,50]</td></tr><tr><td>0.50-0.55</td><td>[5,45]</td></tr><tr><td>0.45-0.50</td><td>[5,40]</td></tr><tr><td>0.40-0.45</td><td>[5,35]</td></tr><tr><td>0.35-0.40</td><td>[5,30]</td></tr><tr><td>&lt;0.35</td><td>Not Found</td></tr></table>",
1469
+ "image_path": "77f51ad5fa3b397fa7f544694906a82532cc8990605a6227bc59dd2f8a0789fc.jpg"
1470
+ }
1471
+ ]
1472
+ }
1473
+ ],
1474
+ "index": 4,
1475
+ "angle": 0,
1476
+ "type": "table_body"
1477
+ }
1478
+ ],
1479
+ "index": 4
1480
+ }
1481
+ ],
1482
+ "discarded_blocks": [],
1483
+ "page_size": [
1484
+ 595,
1485
+ 841
1486
+ ],
1487
+ "page_idx": 3
1488
+ },
1489
+ {
1490
+ "para_blocks": [
1491
+ {
1492
+ "bbox": [
1493
+ 108,
1494
+ 77,
1495
+ 227,
1496
+ 90
1497
+ ],
1498
+ "type": "title",
1499
+ "angle": 0,
1500
+ "lines": [
1501
+ {
1502
+ "bbox": [
1503
+ 108,
1504
+ 77,
1505
+ 227,
1506
+ 90
1507
+ ],
1508
+ "spans": [
1509
+ {
1510
+ "bbox": [
1511
+ 108,
1512
+ 77,
1513
+ 227,
1514
+ 90
1515
+ ],
1516
+ "type": "text",
1517
+ "content": "2.5 Voltage Protection"
1518
+ }
1519
+ ]
1520
+ }
1521
+ ],
1522
+ "index": 0
1523
+ },
1524
+ {
1525
+ "bbox": [
1526
+ 107,
1527
+ 100,
1528
+ 508,
1529
+ 255
1530
+ ],
1531
+ "type": "text",
1532
+ "angle": 0,
1533
+ "lines": [
1534
+ {
1535
+ "bbox": [
1536
+ 107,
1537
+ 100,
1538
+ 508,
1539
+ 255
1540
+ ],
1541
+ "spans": [
1542
+ {
1543
+ "bbox": [
1544
+ 107,
1545
+ 100,
1546
+ 508,
1547
+ 255
1548
+ ],
1549
+ "type": "text",
1550
+ "content": "If the oscillation voltage during pulse injection exceeds the protection range, the current charging or discharging work step will be immediately terminated for a physical security check. If the security check is passed, no time will be made up for the already terminated work step, but the remaining work steps in the test procedure will be continued. In our test, voltage is mainly possible to exceed the protection range during charging, and no cases below the protection range during discharge have been found. The specific protection voltage parameters are consistent with those in the following table."
1551
+ }
1552
+ ]
1553
+ }
1554
+ ],
1555
+ "index": 1
1556
+ },
1557
+ {
1558
+ "type": "table",
1559
+ "bbox": [
1560
+ 109,
1561
+ 258,
1562
+ 505,
1563
+ 428
1564
+ ],
1565
+ "blocks": [
1566
+ {
1567
+ "bbox": [
1568
+ 109,
1569
+ 258,
1570
+ 505,
1571
+ 428
1572
+ ],
1573
+ "lines": [
1574
+ {
1575
+ "bbox": [
1576
+ 109,
1577
+ 258,
1578
+ 505,
1579
+ 428
1580
+ ],
1581
+ "spans": [
1582
+ {
1583
+ "bbox": [
1584
+ 109,
1585
+ 258,
1586
+ 505,
1587
+ 428
1588
+ ],
1589
+ "type": "table",
1590
+ "html": "<table><tr><td>Batch</td><td>Material</td><td>Q (Ah)</td><td>Format</td><td>Cut-off voltage for discharging/charging (V)</td></tr><tr><td>1</td><td>NMC</td><td>2.1</td><td>Cylinder</td><td>1.95/4.3</td></tr><tr><td>1</td><td>LMO</td><td>10.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>1</td><td>NMC</td><td>21.0</td><td>Pouch</td><td>2.65/4.3</td></tr><tr><td>1</td><td>LFP</td><td>35.0</td><td>Prismatic</td><td>2.45/3.7</td></tr><tr><td>2</td><td>LMO</td><td>25.0</td><td>Pouch</td><td>2.65/4.25</td></tr><tr><td>2</td><td>LMO</td><td>26.0</td><td>Pouch</td><td>2.65/4.25</td></tr></table>",
1591
+ "image_path": "00b1392f18da82cdbe5720077c8cb6e2013a800e2b27f5294e1499e1c5e251b2.jpg"
1592
+ }
1593
+ ]
1594
+ }
1595
+ ],
1596
+ "index": 2,
1597
+ "angle": 0,
1598
+ "type": "table_body"
1599
+ }
1600
+ ],
1601
+ "index": 2
1602
+ },
1603
+ {
1604
+ "bbox": [
1605
+ 108,
1606
+ 433,
1607
+ 211,
1608
+ 446
1609
+ ],
1610
+ "type": "title",
1611
+ "angle": 0,
1612
+ "lines": [
1613
+ {
1614
+ "bbox": [
1615
+ 108,
1616
+ 433,
1617
+ 211,
1618
+ 446
1619
+ ],
1620
+ "spans": [
1621
+ {
1622
+ "bbox": [
1623
+ 108,
1624
+ 433,
1625
+ 211,
1626
+ 446
1627
+ ],
1628
+ "type": "text",
1629
+ "content": "2.6 SOC Deviation"
1630
+ }
1631
+ ]
1632
+ }
1633
+ ],
1634
+ "index": 3
1635
+ },
1636
+ {
1637
+ "bbox": [
1638
+ 107,
1639
+ 456,
1640
+ 507,
1641
+ 659
1642
+ ],
1643
+ "type": "text",
1644
+ "angle": 0,
1645
+ "lines": [
1646
+ {
1647
+ "bbox": [
1648
+ 107,
1649
+ 456,
1650
+ 507,
1651
+ 659
1652
+ ],
1653
+ "spans": [
1654
+ {
1655
+ "bbox": [
1656
+ 107,
1657
+ 456,
1658
+ 507,
1659
+ 659
1660
+ ],
1661
+ "type": "text",
1662
+ "content": "The unequal charged and discharged capacity in adjacent positive and negative pulses with the same pulse intensity and planned pulse width caused by voltage protection will lead to an accumulative deviation in SOC to subsequent pulse tests. This SOC deviation is usually very slight due to the extremely short pulse width with no more than 5s. The voltage may exceed the protection range when the tested SOC is close to the SOH value of the battery. In Nature Communications publication<sup>1</sup>, we only used data from "
1663
+ },
1664
+ {
1665
+ "bbox": [
1666
+ 107,
1667
+ 456,
1668
+ 507,
1669
+ 659
1670
+ ],
1671
+ "type": "inline_equation",
1672
+ "content": "5 - 50\\%"
1673
+ },
1674
+ {
1675
+ "bbox": [
1676
+ 107,
1677
+ 456,
1678
+ 507,
1679
+ 659
1680
+ ],
1681
+ "type": "text",
1682
+ "content": " SOC. Considering that the SOH of the vast majority of batteries is above 0.6, the SOC deviation used can be ignored for simplicity. However, if readers want to use data at a higher SOC level, they need to pay attention to this SOC deviation issue to avoid introducing unnecessary errors."
1683
+ }
1684
+ ]
1685
+ }
1686
+ ],
1687
+ "index": 4
1688
+ },
1689
+ {
1690
+ "bbox": [
1691
+ 87,
1692
+ 667,
1693
+ 170,
1694
+ 681
1695
+ ],
1696
+ "type": "title",
1697
+ "angle": 0,
1698
+ "lines": [
1699
+ {
1700
+ "bbox": [
1701
+ 87,
1702
+ 667,
1703
+ 170,
1704
+ 681
1705
+ ],
1706
+ "spans": [
1707
+ {
1708
+ "bbox": [
1709
+ 87,
1710
+ 667,
1711
+ 170,
1712
+ 681
1713
+ ],
1714
+ "type": "text",
1715
+ "content": "3 Accessibility"
1716
+ }
1717
+ ]
1718
+ }
1719
+ ],
1720
+ "index": 5
1721
+ },
1722
+ {
1723
+ "bbox": [
1724
+ 107,
1725
+ 690,
1726
+ 506,
1727
+ 729
1728
+ ],
1729
+ "type": "text",
1730
+ "angle": 0,
1731
+ "lines": [
1732
+ {
1733
+ "bbox": [
1734
+ 107,
1735
+ 690,
1736
+ 506,
1737
+ 729
1738
+ ],
1739
+ "spans": [
1740
+ {
1741
+ "bbox": [
1742
+ 107,
1743
+ 690,
1744
+ 506,
1745
+ 729
1746
+ ],
1747
+ "type": "text",
1748
+ "content": "The raw data and the manipulation code are accessible at this link. The readers should cite the Nature Communications publication<sup>1</sup> and this data descriptor article when using the data."
1749
+ }
1750
+ ]
1751
+ }
1752
+ ],
1753
+ "index": 6
1754
+ }
1755
+ ],
1756
+ "discarded_blocks": [],
1757
+ "page_size": [
1758
+ 595,
1759
+ 841
1760
+ ],
1761
+ "page_idx": 4
1762
+ },
1763
+ {
1764
+ "para_blocks": [
1765
+ {
1766
+ "bbox": [
1767
+ 88,
1768
+ 74,
1769
+ 137,
1770
+ 85
1771
+ ],
1772
+ "type": "title",
1773
+ "angle": 0,
1774
+ "lines": [
1775
+ {
1776
+ "bbox": [
1777
+ 88,
1778
+ 74,
1779
+ 137,
1780
+ 85
1781
+ ],
1782
+ "spans": [
1783
+ {
1784
+ "bbox": [
1785
+ 88,
1786
+ 74,
1787
+ 137,
1788
+ 85
1789
+ ],
1790
+ "type": "text",
1791
+ "content": "Reference"
1792
+ }
1793
+ ]
1794
+ }
1795
+ ],
1796
+ "index": 0
1797
+ },
1798
+ {
1799
+ "bbox": [
1800
+ 88,
1801
+ 89,
1802
+ 488,
1803
+ 134
1804
+ ],
1805
+ "type": "ref_text",
1806
+ "angle": 0,
1807
+ "lines": [
1808
+ {
1809
+ "bbox": [
1810
+ 88,
1811
+ 89,
1812
+ 488,
1813
+ 134
1814
+ ],
1815
+ "spans": [
1816
+ {
1817
+ "bbox": [
1818
+ 88,
1819
+ 89,
1820
+ 488,
1821
+ 134
1822
+ ],
1823
+ "type": "text",
1824
+ "content": "1 Tao, S. et al. Generative learning assisted state-of-health estimation for sustainable battery recycling with random retirement conditions. Nature Communications 15, 10154 (2024). https://doi.org:10.1038/s41467-024-54454-0"
1825
+ }
1826
+ ]
1827
+ }
1828
+ ],
1829
+ "index": 1
1830
+ }
1831
+ ],
1832
+ "discarded_blocks": [],
1833
+ "page_size": [
1834
+ 595,
1835
+ 841
1836
+ ],
1837
+ "page_idx": 5
1838
+ }
1839
+ ],
1840
+ "_backend": "vlm",
1841
+ "_version_name": "2.6.4"
1842
+ }
2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_content_list.json ADDED
@@ -0,0 +1,888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Toward Agentic AI: Generative Information Retrieval Inspired Intelligent Communications and Networking",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 91,
8
+ 70,
9
+ 903,
10
+ 175
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Ruichen Zhang, Shunpu Tang, Yinqiu Liu, Dusit Niyato, Fellow, IEEE, Zehui Xiong, Sumei Sun, Fellow, IEEE, Shiwen Mao, Fellow, IEEE, and Zhu Han, Fellow, IEEE",
17
+ "bbox": [
18
+ 179,
19
+ 181,
20
+ 815,
21
+ 215
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—The increasing complexity and scale of modern telecommunications networks demand intelligent automation to enhance efficiency, adaptability, and resilience. Agentic AI has emerged as a key paradigm for intelligent communications and networking, enabling AI-driven agents to perceive, reason, decide, and act within dynamic networking environments. However, effective decision-making in telecom applications, such as network planning, management, and resource allocation, requires integrating retrieval mechanisms that support multi-hop reasoning, historical cross-referencing, and compliance with evolving 3GPP standards. This article presents a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of knowledge acquisition, processing, and retrieval in agentic AI for telecom systems. We first provide a comprehensive review of generative information retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval. We then analyze their advantages, limitations, and suitability for various networking scenarios. Next, we present a survey about their applications in communications and networking. Additionally, we introduce an agentic contextual retrieval framework to enhance telecom-specific planning by integrating multi-source retrieval, structured reasoning, and self-reflective validation. Experimental results demonstrate that our framework significantly improves answer accuracy, explanation consistency, and retrieval efficiency compared to traditional and semantic retrieval methods. Finally, we outline future research directions.",
28
+ "bbox": [
29
+ 73,
30
+ 266,
31
+ 491,
32
+ 619
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "I. INTRODUCTION",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 215,
42
+ 647,
43
+ 349,
44
+ 660
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "According to a Cisco report, the number of connected devices is expected to surpass 125 billion by $2030^{1}$ , requiring networking systems to process massive amounts of data while maintaining seamless interactions across diverse, heterogeneous infrastructures. To support this evolution, modern networks must incorporate intelligent decision-making",
51
+ "bbox": [
52
+ 73,
53
+ 667,
54
+ 490,
55
+ 758
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "list",
61
+ "sub_type": "text",
62
+ "list_items": [
63
+ "R. Zhang, S. Tang, Y. Liu, and D. Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: ruichen.zhang@ntu.edu.sg, n2409411h@e.ntu.edu.sg, yinqiu001@e.ntu.edu.sg, dniyato@ntu.edu.sg).",
64
+ "Z. Xiong is with the Computer Science and Design Pillar, University of Technology and Design, Singapore (e-mail: zehui_xiong@sutd.edu.sg).",
65
+ "S. Sun is with the Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore (e-mail: sunsm@i2r.a-star.edu.sg).",
66
+ "S. Mao is with the Department of Electrical and Computer Engineering, Auburn University, Auburn, AL 36849, USA (e-mail: smao@ieee.org).",
67
+ "Z. Han is with the University of Houston, Houston TX 77004, USA, and also with the Department of Computer Science and Engineering, Kyung Hee University, Seoul 446701, South Korea (e-mail: hanzhu22@gmail.com).",
68
+ "<sup>1</sup>https://blogs.cisco.com/industrial-iot/iot-is-creating-massive-growth-opportunities"
69
+ ],
70
+ "bbox": [
71
+ 73,
72
+ 771,
73
+ 491,
74
+ 943
75
+ ],
76
+ "page_idx": 0
77
+ },
78
+ {
79
+ "type": "text",
80
+ "text": "mechanisms that enable autonomous control, adaptive resource management, and real-time optimization [1]. Agentic AI has emerged as a promising paradigm for autonomous network intelligence, addressing the limitations of traditional rule-based and static AI architectures. Introduced by OpenAI $^2$ , DeepSeek $^3$ , and other research institutions, agentic AI refers to autonomous agents that can perceive, reason, act, and continuously learn from their environments, allowing them to dynamically optimize network configurations, manage resources, and mitigate failures in large-scale systems [2]. Unlike conventional AI, which operates on fixed rules or pretrained models, agentic AI leverages large language models (LLMs), generative AI-based decision-making, and multi-embodied AI agent collaboration to facilitate self-organizing, highly adaptive network architectures [3]. For example, in [4], the authors explored intent-based networking with agentic AI, where autonomous agents dynamically updated network management policies based on user-defined intents, achieving a $32\\%$ improvement in QoS requirements and a $40\\%$ reduction in manual intervention for network reconfiguration. Despite its potential, agentic AI faces critical limitations, particularly in handling large-scale network data, maintaining long-term memory, and retrieving historical insights for enhanced decision-making. Specifically, LLM-based agents often lack efficient information retrieval methods, resulting in hallucinations, context drift, and response inconsistency, which undermine their reliability in real-world networking applications.",
81
+ "bbox": [
82
+ 501,
83
+ 265,
84
+ 921,
85
+ 686
86
+ ],
87
+ "page_idx": 0
88
+ },
89
+ {
90
+ "type": "text",
91
+ "text": "To mitigate these limitations, generative information retrieval has been proposed as a fundamental enhancement for agentic AI-driven network intelligence [5]. Unlike traditional retrieval techniques, which rely on static keyword searches and limited contextual matching, generative information retrieval dynamically retrieves, synthesizes, and integrates multi-source knowledge, enabling memory-augmented, context-aware reasoning. For instance, in real-world networking applications, retrieval-augmented AI systems can access historical network logs, regulatory standards, and prior optimization strategies, allowing them to infer multi-hop dependencies across diverse network data sources [6]. This approach significantly enhances decision accuracy, adaptability, and long-term contextual understanding. An example of generative information retrieval in",
92
+ "bbox": [
93
+ 503,
94
+ 688,
95
+ 921,
96
+ 901
97
+ ],
98
+ "page_idx": 0
99
+ },
100
+ {
101
+ "type": "list",
102
+ "sub_type": "text",
103
+ "list_items": [
104
+ "$^{2}$ https://openai.com/",
105
+ "<sup>3</sup>https://www_deepseek.com/"
106
+ ],
107
+ "bbox": [
108
+ 517,
109
+ 917,
110
+ 673,
111
+ 944
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "page_number",
117
+ "text": "1",
118
+ "bbox": [
119
+ 911,
120
+ 30,
121
+ 919,
122
+ 40
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "aside_text",
128
+ "text": "arXiv:2502.16866v1 [cs.NI] 24 Feb 2025",
129
+ "bbox": [
130
+ 22,
131
+ 272,
132
+ 57,
133
+ 704
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "image",
139
+ "img_path": "images/fab36b19c36a4adc0f425bf9783ceb86800cc59ad2b4b095bbda03c986ecc738.jpg",
140
+ "image_caption": [
141
+ "Fig. 1. Overview of key retrieval strategies in networking. The figure highlights the methodologies, key components, and applications of different approaches, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval."
142
+ ],
143
+ "image_footnote": [],
144
+ "bbox": [
145
+ 86,
146
+ 75,
147
+ 915,
148
+ 272
149
+ ],
150
+ "page_idx": 1
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "practice is Meta AI's LlamaIndex<sup>4</sup>, which enables structured document retrieval for LLM-based applications. It allows AI agents to process and integrate domain-specific knowledge in real-time.",
155
+ "bbox": [
156
+ 73,
157
+ 335,
158
+ 491,
159
+ 395
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "Building on these foundations, this article provides a forward-looking perspective on agentic contextual retrieval and its role in enhancing information retrieval and decision-making within 3GPP-driven autonomous networking environments. Unlike conventional retrieval-augmented AI frameworks, the proposed approach integrates multi-source retrieval, structured reasoning, and self-reflective validation, thereby ensuring improved retrieval accuracy, contextual coherence, and decision consistency. To the best of our knowledge, this is the first work to explore the potential of agentic contextual retrieval for 3GPP-based telecommunications troubleshooting and real-time standard-compliant decision-making. The key contributions of this work are summarized as follows.",
166
+ "bbox": [
167
+ 73,
168
+ 396,
169
+ 491,
170
+ 590
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "Firstly, we summarize different retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and demonstrate the most advanced agentic contextual retrieval. We analyze their applications in networking environments, identifying key challenges and the role of retrieval in enhancing network intelligence. Secondly, we provide a comprehensive review of retrieval-based methodologies in networking and communications, categorizing existing works based on their scenarios, proposed techniques, and publication timelines. This analysis highlights research trends and the evolving role of retrieval in intelligent communications and networking. Finally, we introduce an LLM-based framework that integrates agentic contextual retrieval to improve telecom-specific planning and decision-making. This framework incorporates multi-source knowledge retrieval, reasoning-based decision augmentation, and contextual adaptation, leading to substantial improvements in network optimization, fault diagnosis, and adaptive policy enforcement.",
177
+ "bbox": [
178
+ 73,
179
+ 590,
180
+ 491,
181
+ 864
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "II. DIFFERENT RETRIEVAL METHODS FOR NETWORKING",
188
+ "text_level": 1,
189
+ "bbox": [
190
+ 80,
191
+ 873,
192
+ 485,
193
+ 887
194
+ ],
195
+ "page_idx": 1
196
+ },
197
+ {
198
+ "type": "text",
199
+ "text": "In intelligent networking, retrieval systems help process vast amounts of unstructured data, optimize spectrum usage, and",
200
+ "bbox": [
201
+ 75,
202
+ 891,
203
+ 491,
204
+ 922
205
+ ],
206
+ "page_idx": 1
207
+ },
208
+ {
209
+ "type": "text",
210
+ "text": "support AI-based network controllers [5]. In edge intelligence, retrieval techniques facilitate distributed learning, enhance federated AI models, and provide real-time recommendations with minimal latency. As shown in Fig 1, retrieval methods have evolved from traditional keyword-based approaches to hybrid and context-aware techniques, each addressing specific challenges in networking environments.",
211
+ "bbox": [
212
+ 501,
213
+ 335,
214
+ 921,
215
+ 441
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "A. Traditional Information Retrieval",
222
+ "text_level": 1,
223
+ "bbox": [
224
+ 503,
225
+ 463,
226
+ 756,
227
+ 477
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "Traditional information retrieval is based on matching query terms with exact keywords in the dataset, often using simple yet effective algorithms such as Boolean matching or vector space models. These methods calculate document relevance by scoring terms according to their frequency within a document (i.e., term frequency, TF) and across the entire dataset (i.e., inverse document frequency, IDF). The resulting relevance scores rank documents based on their alignment with the query. This approach works well in structured datasets with clear and consistent keyword distributions, such as early library catalog systems or archival searches. However, it does not account for the semantic meaning of terms or the broader context in which the query occurs. To address such issues, for example, Salton et al. [7] proposed a foundational vector space model where documents and queries are represented as vectors in a multi-dimensional space. The similarity between these vectors is computed using cosine similarity, allowing for efficient ranking of documents based on query relevance. Experimental results demonstrated that the vector space model improved retrieval precision by $15\\%$ compared to basic Boolean retrieval methods. However, when applied to dynamic datasets such as network resource management logs, its reliance on exact matches caused about a $20\\%$ drop in recall for queries involving synonyms or contextually related terms. These limitations highlight the need for more adaptive retrieval methods in real-time scenarios.",
234
+ "bbox": [
235
+ 501,
236
+ 481,
237
+ 921,
238
+ 875
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "B. Hybrid Retrieval",
245
+ "text_level": 1,
246
+ "bbox": [
247
+ 504,
248
+ 895,
249
+ 643,
250
+ 909
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "Hybrid retrieval combines traditional retrieval methods, such as TF-IDF scoring, with semantic embeddings generated by",
257
+ "bbox": [
258
+ 503,
259
+ 914,
260
+ 921,
261
+ 945
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "page_number",
267
+ "text": "2",
268
+ "bbox": [
269
+ 911,
270
+ 31,
271
+ 919,
272
+ 40
273
+ ],
274
+ "page_idx": 1
275
+ },
276
+ {
277
+ "type": "footer",
278
+ "text": "<sup>4</sup>https://gpt-index.readthedocs.io/en/latest/",
279
+ "bbox": [
280
+ 86,
281
+ 930,
282
+ 316,
283
+ 944
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "table",
289
+ "img_path": "images/20d149503ce823579a1847ceb3d4fef4eaa11248e027ed4433ef82e4f90d87b4.jpg",
290
+ "table_caption": [
291
+ "TABLEI COMPARISON OF KEY RETRIEVAL STRATEGIES."
292
+ ],
293
+ "table_footnote": [],
294
+ "table_body": "<table><tr><td>Retrieval methods</td><td>Training strategies</td><td>Applicable network types</td><td>User demands</td><td>Agentic AI applications</td><td>Application examples</td></tr><tr><td>Traditional Information retrieval</td><td>●Based on explicit keyword matching or Boolean logic.</td><td>●Works well in relatively static networks or environments [6].</td><td>●Focused on delivering relevant results based on exact keyword matches</td><td>●Limited agent-based applications but can be used in simple chatbot systems.</td><td>●Elasticsearch (https://github.com/elastic/elasticsearch)●Apache Lucene (https://github.com/apache/lucene)</td></tr><tr><td>Hybrid retrieval</td><td>●Combines traditional keyword-based retrieval and machine learning models (e.g., TF-IDF and BERT).</td><td>●Works well in dynamic environments where content is constantly changing, and user preferences need to be understood [7].</td><td>●Users demand a more refined search experience where results are also tailored to personal preferences.</td><td>●Chatbots and recommendation systems use hybrid retrieval to suggest products, content, or responses.</td><td>●Recommendation System (https://github.com/lyst/lightfm)●Nixisearch (https://github.com/nixisearch/nixi research)</td></tr><tr><td>Semantic retrieval</td><td>●Uses deep learning (e.g., word embeddings like Word2Vec and BERT, etc.) to understand the meaning behind the query and the documents.</td><td>●Works well in environments where understanding context is important [8].</td><td>●Users demand results that understand the intent behind their queries rather than just keyword matches.</td><td>●Widely used in AI agents like virtual assistants (Google Assistant, Siri), or knowledge-based agents.</td><td>●Semantic Search Engine (https://github.com/deepset-al/haystack)●Txtai (https://github.com/neuml/txtai)</td></tr><tr><td>Knowledge-based retrieval</td><td>●Uses rule-based approaches and inference engines to retrieve relevant information based on predefined knowledge structures.</td><td>●Primarily used in static or semi-static networks, where domain knowledge remains relatively constant but is highly structured [9].</td><td>●Users expect highly accurate, factual, and structured information based on established knowledge.</td><td>●AI agents can act as expert consultants in areas like healthcare (e.g., IBM Watson) or legal systems.</td><td>●Knowledge Graph Search (https://github.com/neoj4/neoj4)●SciTDLR (https://github.com/allena/scitdlr)</td></tr><tr><td>Agentic contextual retrieval</td><td>●Methods like Reinforcement Learning (RLHF) for adaptive retrieval or meta-learning for fast adaption.</td><td>●Works well in multi-agent and dynamic environments where context is constantly evolving [10].</td><td>●Users expect adaptive and personalized retrieval based on evolving queries.</td><td>●Used in autonomous AI assistants (ChatGPT Agents, Claude, Google Gemini).</td><td>●AI-Powered Coding Assistants (https://github.com/features/copilot)●ModelScope-Agent (https://github.com/modelscope/odelscope-agent)</td></tr></table>",
295
+ "bbox": [
296
+ 99,
297
+ 108,
298
+ 893,
299
+ 327
300
+ ],
301
+ "page_idx": 2
302
+ },
303
+ {
304
+ "type": "text",
305
+ "text": "pre-trained deep learning models such as BERT or GPT. This hybrid approach addresses the limitations of traditional methods by incorporating contextual understanding while maintaining computational efficiency. In hybrid retrieval, the process typically contains two stages: a coarse filtering stage, which uses lightweight traditional methods to identify a subset of candidate documents, followed by a re-ranking stage where semantic embeddings are applied to refine results. This two-stage approach ensures that hybrid retrieval is both efficient and accurate, making it particularly suitable for environments where computational resources are limited but semantic depth is required. In networking applications, hybrid retrieval can be particularly useful for AI-driven network monitoring and anomaly detection, where efficient pre-filtering combined with deep learning enables fast yet context-aware decision-making. For example, Zeng et al. [8] proposed a federated hybrid retrieval framework designed to integrate traditional TF-IDF filtering with semantic re-ranking using BERT embeddings. Their system processed candidate documents in two stages: first, TF-IDF was used to rapidly filter out irrelevant data at mobile edge nodes, significantly reducing the search space; second, the filtered candidates were semantically ranked using embeddings. Experimental results showed that this approach improved retrieval precision by $25\\%$ and reduced computational latency by $20\\%$ compared to other classical retrieval systems.",
306
+ "bbox": [
307
+ 73,
308
+ 357,
309
+ 491,
310
+ 750
311
+ ],
312
+ "page_idx": 2
313
+ },
314
+ {
315
+ "type": "text",
316
+ "text": "C. Semantic Retrieval",
317
+ "text_level": 1,
318
+ "bbox": [
319
+ 75,
320
+ 773,
321
+ 228,
322
+ 787
323
+ ],
324
+ "page_idx": 2
325
+ },
326
+ {
327
+ "type": "text",
328
+ "text": "Semantic retrieval uses deep neural networks, particularly transformer-based architectures such as BERT, to encode queries and documents into a shared embedding space. This embedding space captures the semantic relationships between terms, enabling the retrieval system to understand the intent behind the query rather than relying solely on exact keyword matches. Semantic retrieval excels in handling complex queries that involve ambiguous or domain-specific language, such as medical diagnostics and network troubleshooting. For example, Tang et al. [9] proposed a semantic retrieval",
329
+ "bbox": [
330
+ 73,
331
+ 792,
332
+ 490,
333
+ 946
334
+ ],
335
+ "page_idx": 2
336
+ },
337
+ {
338
+ "type": "text",
339
+ "text": "framework leveraging BERT-based embeddings to optimize resource allocation in wireless networks. By encoding queries and documents into a shared semantic space, the system retrieved contextually related documents even for complex queries such as \"dynamic spectrum sharing in 5G\". Their experiments demonstrated a $32\\%$ increase in recall compared to hybrid retrieval methods and an $18\\%$ improvement in precision.",
340
+ "bbox": [
341
+ 501,
342
+ 357,
343
+ 921,
344
+ 479
345
+ ],
346
+ "page_idx": 2
347
+ },
348
+ {
349
+ "type": "text",
350
+ "text": "D. Knowledge-Based Retrieval",
351
+ "text_level": 1,
352
+ "bbox": [
353
+ 504,
354
+ 502,
355
+ 718,
356
+ 517
357
+ ],
358
+ "page_idx": 2
359
+ },
360
+ {
361
+ "type": "text",
362
+ "text": "Knowledge-based retrieval integrates domain-specific ontologies and structured knowledge graphs to enhance retrieval performance. These systems excel in reasoning tasks by explicitly leveraging predefined relationships between entities, providing interpretable results that are often critical in regulated domains such as healthcare, finance, and telecommunications. In knowledge-based retrieval, it is performed by querying the knowledge graph to extract entities and their relationships that match the query context. This method allows for reasoning over linked data, enabling the retrieval of not just relevant documents but also actionable insights based on the relationships in the dataset. For example, Xiong et al. [10] proposed a knowledge graph-based retrieval system for wireless spectrum management. Their framework utilized a graph structure where nodes represented entities such as \"spectrum bands,\" \"user demands,\" and \"interference levels,\" while edges captured relationships such as \"interferes with\" or \"assigned to.\" The key advantage of this approach lies in its ability to provide structured, explainable decisions based on predefined rules. The system achieved a $25\\%$ improvement in spectrum allocation efficiency and a $30\\%$ reduction in interference conflicts compared to heuristic-based methods.",
363
+ "bbox": [
364
+ 501,
365
+ 522,
366
+ 921,
367
+ 854
368
+ ],
369
+ "page_idx": 2
370
+ },
371
+ {
372
+ "type": "text",
373
+ "text": "E. Agentic Contextual Retrieval",
374
+ "text_level": 1,
375
+ "bbox": [
376
+ 504,
377
+ 878,
378
+ 725,
379
+ 893
380
+ ],
381
+ "page_idx": 2
382
+ },
383
+ {
384
+ "type": "text",
385
+ "text": "Agentic contextual retrieval leverages intelligent agent-based control mechanisms to dynamically adjust retrieval strategies based on task-specific requirements, multimodal",
386
+ "bbox": [
387
+ 503,
388
+ 898,
389
+ 921,
390
+ 946
391
+ ],
392
+ "page_idx": 2
393
+ },
394
+ {
395
+ "type": "page_number",
396
+ "text": "3",
397
+ "bbox": [
398
+ 911,
399
+ 31,
400
+ 919,
401
+ 39
402
+ ],
403
+ "page_idx": 2
404
+ },
405
+ {
406
+ "type": "image",
407
+ "img_path": "images/a4c32f6074e92f6d7720240033cd20dea9f3dcb36b54fa4862d32ebae0bd4673.jpg",
408
+ "image_caption": [
409
+ "Fig. 2. A summary of recent retrieval methods in communications and networking, which provides an overview of various proposals, research scenarios, and levels of human-AI interaction."
410
+ ],
411
+ "image_footnote": [],
412
+ "bbox": [
413
+ 102,
414
+ 71,
415
+ 897,
416
+ 275
417
+ ],
418
+ "page_idx": 3
419
+ },
420
+ {
421
+ "type": "text",
422
+ "text": "data integration, and real-time environmental changes. Unlike traditional or semantic retrieval methods, which rely on static queries and predefined indexing, this approach enables adaptive, goal-driven information extraction that continuously refines itself based on evolving conditions. By incorporating real-time system states, historical patterns, and structured knowledge representations, agentic contextual retrieval ensures high adaptability and context-aware decision-making, making it particularly suited for applications in network optimization, autonomous systems, and intelligent fault diagnostics. A key advantage of agentic contextual retrieval is its ability to enable autonomous decision-making agents that actively monitor, retrieve, and reason over multiple data sources to enhance performance in complex, dynamic environments. For example, Kagaya et al. [11] proposed a retrieval framework for autonomous driving, where an agent-driven control mechanism integrated LiDAR, GPS, real-time traffic updates, and weather conditions to dynamically adjust navigation strategies. By enabling real-time, intelligent retrieval and control, their system reduced recalibration time by $40\\%$ and improved navigation accuracy by $28\\%$ .",
423
+ "bbox": [
424
+ 73,
425
+ 339,
426
+ 491,
427
+ 656
428
+ ],
429
+ "page_idx": 3
430
+ },
431
+ {
432
+ "type": "text",
433
+ "text": "F. Retrieval Comparison and Lessons Learned",
434
+ "text_level": 1,
435
+ "bbox": [
436
+ 73,
437
+ 669,
438
+ 397,
439
+ 684
440
+ ],
441
+ "page_idx": 3
442
+ },
443
+ {
444
+ "type": "text",
445
+ "text": "Retrieval methods vary significantly in their methodologies, applications, and suitability for different networking scenarios. Specifically, traditional retrieval, which relies on explicit keyword matching, is well-suited for static local network management, where queries are simple, computational resources are limited, and speed is prioritized. Hybrid retrieval combines keyword-based search with machine learning models, making it effective for dynamic network environments, such as adaptive caching or content distribution, where user preferences evolve over time. Semantic retrieval, powered by deep learning models, enhances intent-driven network diagnostics by capturing query context, making it particularly useful for automated fault detection and troubleshooting in telecom networks. Knowledge-based retrieval, leveraging structured inference models, supports rule-based network security and access control, where highly accurate, structured decision-making is critical. Finally, agentic contextual retrieval offers",
446
+ "bbox": [
447
+ 73,
448
+ 686,
449
+ 491,
450
+ 946
451
+ ],
452
+ "page_idx": 3
453
+ },
454
+ {
455
+ "type": "text",
456
+ "text": "adaptive and real-time decision support in multi-agent network control systems, where dynamic environmental factors, such as interference levels or traffic congestion, require continuous learning and adjustment [11]. Table I summarizes these strategies, highlighting their core features, training methods, and example applications.",
457
+ "bbox": [
458
+ 501,
459
+ 339,
460
+ 921,
461
+ 430
462
+ ],
463
+ "page_idx": 3
464
+ },
465
+ {
466
+ "type": "text",
467
+ "text": "Moreover, we conduct a review of recent retrieval-based approaches in communications and networking from 2023 to late 2024, as summarized in Fig. 2. Our analysis categorizes retrieval strategies into traditional, hybrid, semantic, knowledge-based, and agentic contextual retrieval, highlighting their applications across various domains, including wireless communications, network optimization, and intelligent decision-making. While retrieval-augmented methods have been increasingly integrated into AI-driven network resource management and semantic communication, we observe that agentic contextual retrieval remains largely unexplored for telecommunications-specific applications. Moreover, while [11] demonstrates agentic contextual retrieval for autonomous driving control, there is currently no direct implementation tailored for communication networks and telecom infrastructure. To fill this gap, the next section introduces our proposed framework, which leverages agentic contextual retrieval to enhance intelligent decision-making, troubleshooting, and autonomous adaptation in telecommunications and networking systems.",
468
+ "bbox": [
469
+ 501,
470
+ 430,
471
+ 921,
472
+ 718
473
+ ],
474
+ "page_idx": 3
475
+ },
476
+ {
477
+ "type": "text",
478
+ "text": "III. CASE STUDY: AGENTIC CONTEXTUAL RETRIEVAL FOR NETWORKING",
479
+ "text_level": 1,
480
+ "bbox": [
481
+ 519,
482
+ 737,
483
+ 906,
484
+ 767
485
+ ],
486
+ "page_idx": 3
487
+ },
488
+ {
489
+ "type": "text",
490
+ "text": "A. Motivation",
491
+ "text_level": 1,
492
+ "bbox": [
493
+ 504,
494
+ 773,
495
+ 604,
496
+ 787
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "In next-generation communications and networking, efficient resource allocation, adaptive service provisioning, and intelligent decision-making are crucial for optimizing user experience and network efficiency. Modern communication systems are shifting towards intent-driven networking, where mobile users express high-level requirements in natural language, and the network autonomously interprets and executes these requests. However, this paradigm introduces significant challenges in bridging the gap between user intents, structured communication standards, and real-time network",
503
+ "bbox": [
504
+ 501,
505
+ 792,
506
+ 921,
507
+ 946
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "page_number",
513
+ "text": "4",
514
+ "bbox": [
515
+ 911,
516
+ 31,
517
+ 919,
518
+ 39
519
+ ],
520
+ "page_idx": 3
521
+ },
522
+ {
523
+ "type": "image",
524
+ "img_path": "images/63738ac423ae7c446522285894ff5eb8bbe4643fcdee658f422399b94b8625e3.jpg",
525
+ "image_caption": [
526
+ "Fig. 3. Illustration of the agentic contextual retrieval enhanced intelligent base station for troubleshooting and decision-making. The framework follows a structured four-step workflow: (A) Query understanding and reformulation ensure alignment with 3GPP terminology using LLM-based query expansion. (B) Multi-source knowledge retrieval extracts relevant information from both structured (e.g., 3GPP standards) and unstructured (e.g., online sources) datasets. (C) Contextual evidence aggregation and reasoning synthesize retrieved knowledge into structured responses using chain-of-thought reasoning. (D) Decision-making and self-validation enhance accuracy through confidence-based verification and iterative refinement, reducing hallucinations and improving response consistency."
527
+ ],
528
+ "image_footnote": [],
529
+ "bbox": [
530
+ 101,
531
+ 69,
532
+ 901,
533
+ 270
534
+ ],
535
+ "page_idx": 4
536
+ },
537
+ {
538
+ "type": "text",
539
+ "text": "configurations. A key challenge lies in mapping natural language intent descriptions to actionable network configurations, requiring an understanding of both human semantics and telecommunications-specific knowledge. Traditional rule-based methods or static intent templates are insufficient in handling diverse user demands and evolving network conditions [4]. LLMs offer a promising solution due to their strong natural language understanding (NLU) and reasoning capabilities. However, LLMs lack domain-specific knowledge in telecommunications, such as 3GPP standards, intent translation templates, and network control logic. Consequently, their direct application to network automation remains limited by knowledge incompleteness, retrieval inefficiency, and contextual inconsistency.",
540
+ "bbox": [
541
+ 73,
542
+ 375,
543
+ 491,
544
+ 587
545
+ ],
546
+ "page_idx": 4
547
+ },
548
+ {
549
+ "type": "text",
550
+ "text": "To address these challenges, we propose a retrieval-enhanced intelligent base station architecture, where the network dynamically retrieves, synthesizes, and applies knowledge from 3GPP standards, network logs, and external telecom repositories to enhance decision-making. Specifically, the system employs a hybrid retrieval framework to convert user-generated intents into structured network actions, using a template-based approach that aligns with communication paradigms outlined in 3GPP [12]. In this framework, user requests (e.g., \"I need ultra-low latency for cloud gaming\") are processed by the network's AI module, which retrieves relevant telecom policies and configurations before generating a customized communication plan. Despite the advantages of retrieval-augmented LLMs, conventional retrieval-augmented generation (RAG) techniques face critical limitations in telecom-specific applications, including: (i) Contextual Ambiguity: Simple keyword-based retrieval struggles to retrieve relevant 3GPP policies and network parameters, as user intents often involve multiple layers of contextual interpretation. (ii) Data Sparsity: Telecommunications standards and policy documents are highly structured, yet spread across multiple releases and fragmented into different standardization documents. (iii) Retrieval Inefficiency: Traditional retrieval",
551
+ "bbox": [
552
+ 75,
553
+ 597,
554
+ 491,
555
+ 946
556
+ ],
557
+ "page_idx": 4
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "approaches lack multi-hop reasoning, failing to link user intents with both historical network behavior and real-time conditions.",
562
+ "bbox": [
563
+ 503,
564
+ 375,
565
+ 921,
566
+ 419
567
+ ],
568
+ "page_idx": 4
569
+ },
570
+ {
571
+ "type": "text",
572
+ "text": "To overcome these limitations, we introduce an agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-reflective validation to enhance intent-driven networking. Our framework enables intelligent base stations to map user intents to network configurations in real-time, leveraging LLM-powered decision-making while ensuring alignment with 3GPP compliance, traffic optimization strategies, and real-world deployment policies.",
573
+ "bbox": [
574
+ 503,
575
+ 421,
576
+ 921,
577
+ 556
578
+ ],
579
+ "page_idx": 4
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "B. Agentic Contextual Retrieval Framework",
584
+ "text_level": 1,
585
+ "bbox": [
586
+ 504,
587
+ 577,
588
+ 805,
589
+ 592
590
+ ],
591
+ "page_idx": 4
592
+ },
593
+ {
594
+ "type": "text",
595
+ "text": "As shown in Fig. 3, the deployment of the agentic contextual retrieval framework follows a structured four-step workflow, designed to enhance the retrieval, reasoning, and validation of knowledge specific to 3GPP standards and telecommunications networks.",
596
+ "bbox": [
597
+ 503,
598
+ 597,
599
+ 921,
600
+ 670
601
+ ],
602
+ "page_idx": 4
603
+ },
604
+ {
605
+ "type": "text",
606
+ "text": "1) Knowledge Preparation and Query Understanding: The system first loads 3GPP standards and network documentation from a database, segments them into context-aware knowledge chunks, and vectorizes them using sentence-transformer embeddings. To enable efficient semantic retrieval, the vectorized knowledge chunks are indexed using a vector database, allowing for efficient similarity searches. After that, once a query is received, the system analyzes user intent and performs query reformulation, ensuring that the query aligns with 3GPP-defined communication paradigms and technical configurations. In practice, telecommunications queries often contain ambiguous terms, incomplete phrasing, or require historical cross-referencing across multiple 3GPP releases. Therefore, it is necessary to fully understand the user intent and the key concepts in this context to improve retrieval accuracy. Specifically, we can use LLMs to realize that and ensure longitudinal consistency when retrieving regulatory and technical specifications [13]. In our experimental setup, user",
607
+ "bbox": [
608
+ 503,
609
+ 672,
610
+ 921,
611
+ 946
612
+ ],
613
+ "page_idx": 4
614
+ },
615
+ {
616
+ "type": "page_number",
617
+ "text": "5",
618
+ "bbox": [
619
+ 911,
620
+ 30,
621
+ 919,
622
+ 40
623
+ ],
624
+ "page_idx": 4
625
+ },
626
+ {
627
+ "type": "image",
628
+ "img_path": "images/aa7c8b674ef129d6247e35c9bddd339de200c51297552f4109ce9c561348d55d.jpg",
629
+ "image_caption": [
630
+ "Fig. 4. Performance comparison of Agentic Contextual Retrieval against baseline methods, including QWen-Max without retriever, traditional retrieval, and semantic retrieval."
631
+ ],
632
+ "image_footnote": [],
633
+ "bbox": [
634
+ 143,
635
+ 70,
636
+ 861,
637
+ 231
638
+ ],
639
+ "page_idx": 5
640
+ },
641
+ {
642
+ "type": "text",
643
+ "text": "intent queries, such as customized communication service requests (e.g., \"I need ultra-reliable low-latency communication for industrial automation\"), are first parsed and the key concepts such as \"ultra-reliable low-latency\", \"role of URLLC in industrial automation\" are extracted.",
644
+ "bbox": [
645
+ 73,
646
+ 291,
647
+ 491,
648
+ 366
649
+ ],
650
+ "page_idx": 5
651
+ },
652
+ {
653
+ "type": "text",
654
+ "text": "2) Multi-Source Knowledge Retrieval: Following query optimization, the second step involves multi-source retrieval to ensure both completeness and relevance in decision-making for network configuration and policy enforcement. Next, we integrate semantic vector-based retrieval with embedding models to extract key information from 3GPP specifications, network operation policies, and real-time telecom deployment scenarios. Embedding models generate dense vector representations of text, enabling context-aware similarity search rather than relying on exact keyword matches [14]. To further improve accuracy, structured knowledge representations establish relationships between frequency bands, protocol parameters, and QoS metrics, refining query precision. Additionally, real-time retrieval from online repositories ensures access to the latest standardization updates. For instance, when retrieving information on \"5G network slicing SLA guarantees,\" the system uses an embedding model to identify semantically relevant sections from TS 28.531 (Performance Assurance) and TS 28.554 (KPI Definitions) while incorporating recent case studies from network operators.",
655
+ "bbox": [
656
+ 73,
657
+ 368,
658
+ 491,
659
+ 671
660
+ ],
661
+ "page_idx": 5
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "3) Contextual Evidence Aggregation and Reasoning: Once relevant information is retrieved, the third step focuses on contextual evidence aggregation and reasoning, where multi-source knowledge is condensed into a structured and interpretable response. Given the vast amount of information available in telecom standardization, it is crucial to eliminate redundancy, enhance clarity, and ensure that the extracted content directly addresses the query [13]. Specifically, we use an LLM-powered reasoning agent, which autonomously identifies the most relevant text segments in the retrieved content based on the reformulated query. The agent then synthesizes these segments into a concise, context-aware summary, ensuring that only the most important evidence is retained, and irrelevant or redundant information is discarded. For example, in response to a question like \"What is the role of the serving network in fraud control?\", the retrieved information may contain detailed descriptions of charging functions, fraud detection, and policy enforcement. Instead of presenting all these details, the agent",
666
+ "bbox": [
667
+ 73,
668
+ 672,
669
+ 491,
670
+ 946
671
+ ],
672
+ "page_idx": 5
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "analyzes the content, extracts the core function of the serving network in fraud prevention, and generates a concise summary, emphasizing its role in real-time data collection and cost control.",
677
+ "bbox": [
678
+ 501,
679
+ 291,
680
+ 921,
681
+ 349
682
+ ],
683
+ "page_idx": 5
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "4) Decision-Making and Self-Validation: The final step involves a decision-making agent that simultaneously generates both the network action recommendations and justifications based on the optimized query and refined retrieval results. This agent applies CoT reasoning to synthesize a structured response, ensuring that the explanation logically supports the answer by drawing from the retrieved evidence [15]. To enhance reliability, a self-reflection agent evaluates the generated response, critically reviewing both the answer and explanation for consistency, factual accuracy, and alignment with authoritative 3GPP standards. If inconsistencies, incomplete reasoning, or speculative conclusions are detected, the self-reflection agent challenges the response and triggers an iterative refinement loop.",
688
+ "bbox": [
689
+ 501,
690
+ 352,
691
+ 921,
692
+ 564
693
+ ],
694
+ "page_idx": 5
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "C. Simulation",
699
+ "text_level": 1,
700
+ "bbox": [
701
+ 504,
702
+ 587,
703
+ 604,
704
+ 601
705
+ ],
706
+ "page_idx": 5
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "Simulation Settings: Our simulation is conducted using a structured retrieval and reasoning pipeline, integrating multiple knowledge sources and agent-driven query optimization. We employ Qwen2.5-Max as the base LLM, leveraging its advanced reasoning capabilities for telecom-related question-answering tasks. To evaluate retrieval performance, we selected 50 structured QA pairs related to 3GPP R18 from the TeleQnA dataset, which serves as the primary benchmark. For additional technical context, we use the 3GPP R18 dataset. To ensure retrieval efficiency, we utilize FAISS, an indexing tool optimized for high-speed vector similarity search. The document processing workflow involves segmenting 3GPP standard documents into 1000-character chunks with a 100-character overlap, followed by embedding generation using Mpnet-base-V2 $^{8}$ , a transformer-based model trained for dense vector representations. To evaluate the effectiveness of the proposed Agentic contextual retrieval framework, we compare its performance against three baselines: (i) Qwen-Max",
711
+ "bbox": [
712
+ 501,
713
+ 607,
714
+ 921,
715
+ 880
716
+ ],
717
+ "page_idx": 5
718
+ },
719
+ {
720
+ "type": "list",
721
+ "sub_type": "ref_text",
722
+ "list_items": [
723
+ "5https://huggingface.co/spaces/Qwen/Qwen2.5-Max-Demo",
724
+ "$^{6}$ https://huggingface.co/datasets/netop/3GPP-R18",
725
+ "<sup>7</sup>https://github.com/facebookresearch/faiss",
726
+ "<sup>8</sup>https://huggingface.co/sentence-transformers/all-mpnet-base-v2"
727
+ ],
728
+ "bbox": [
729
+ 517,
730
+ 893,
731
+ 867,
732
+ 944
733
+ ],
734
+ "page_idx": 5
735
+ },
736
+ {
737
+ "type": "page_number",
738
+ "text": "6",
739
+ "bbox": [
740
+ 911,
741
+ 31,
742
+ 919,
743
+ 40
744
+ ],
745
+ "page_idx": 5
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "without Retriever, representing a pure LLM-based approach, (ii) Qwen-Max with Traditional Retriever, utilizing standard retrieval-based augmentation, and (iii) Qwen-Max with Semantic Retriever, incorporating semantic embedding-based retrieval. The comparison is conducted across four key evaluation metrics, i.e., Answer Matching Accuracy, Answer Text F1 Score, Explanation BERT Score, and Explanation Cosine Similarity, as shown in Fig. 4.",
750
+ "bbox": [
751
+ 73,
752
+ 69,
753
+ 491,
754
+ 189
755
+ ],
756
+ "page_idx": 6
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "Fig. 4 demonstrates that Agentic contextual retrieval consistently outperforms all baseline methods across all evaluation metrics. In particular, the proposed framework achieves an answer matching accuracy of $84\\%$ and an answer text F1 score of $90.37\\%$ , surpassing the performance of semantic retrieval (i.e., $80\\%$ ) and traditional retrieval (i.e., $74\\%$ ), underscoring its effectiveness in generating precise and contextually relevant responses. This improvement is attributed to its dynamic multisource retrieval, which integrates structured 3GPP standards with external knowledge repositories, query reformulation mechanisms, ensuring alignment with telecom-specific terminology, and a structured reasoning pipeline, which employs CoT decision-making and self-validation loops to enhance logical consistency and factual accuracy. Moreover, unlike conventional retrieval methods that rely on static document matching, Agentic contextual retrieval dynamically extracts, synthesizes, and validates multi-hop contextual information, significantly enhancing retrieval precision and response coherence. Furthermore, the explanation quality also benefits significantly from our approach, as evidenced by the Explanation BERT Score (i.e., $90.95\\%$ ) and Cosine Similarity (i.e., $80.83\\%$ ), both of which outperform alternative retrieval methods. These improvements stem from the framework's ability to synthesize multi-source knowledge, apply structured reasoning, and iteratively refine responses through self-reflection mechanisms. In contrast, the semantic retrieval baseline, while effective at contextual retrieval, lacks robust reasoning capabilities and multi-turn validation, limiting its ability to handle complex telecom-specific queries.",
761
+ "bbox": [
762
+ 73,
763
+ 189,
764
+ 493,
765
+ 628
766
+ ],
767
+ "page_idx": 6
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "IV. FUTURE DIRECTIONS",
772
+ "text_level": 1,
773
+ "bbox": [
774
+ 189,
775
+ 638,
776
+ 375,
777
+ 652
778
+ ],
779
+ "page_idx": 6
780
+ },
781
+ {
782
+ "type": "text",
783
+ "text": "Security and Privacy in Retrieval-Augmented Networks: As agentic contextual retrieval frameworks increasingly rely on multi-source knowledge retrieval, ensuring data integrity, confidentiality, and adversarial robustness is critical. Future research should explore privacy-preserving retrieval techniques, such as federated retrieval, secure multi-party computation, and differential privacy-enhanced retrieval models, to mitigate risks associated with unauthorized data access and adversarial attacks in wireless and networking applications.",
784
+ "bbox": [
785
+ 73,
786
+ 657,
787
+ 490,
788
+ 792
789
+ ],
790
+ "page_idx": 6
791
+ },
792
+ {
793
+ "type": "text",
794
+ "text": "Energy-Efficient and Low-Latency Retrieval Networking Architectures: Deploying LLM-driven agentic contextual retrieval frameworks in real-world wireless and networking environments requires optimized inference efficiency and low-latency retrieval mechanisms. Future studies could investigate mobile device-aware retrieval strategies, knowledge distillation for lightweight retrieval models, and edge-based retrieval deployment to minimize computational overhead while maintaining retrieval accuracy in resource-constrained environments, such as 5G edge nodes and IoT devices.",
795
+ "bbox": [
796
+ 73,
797
+ 794,
798
+ 491,
799
+ 944
800
+ ],
801
+ "page_idx": 6
802
+ },
803
+ {
804
+ "type": "text",
805
+ "text": "Network-Aware Adaptive Retrieval for Real-Time Optimization: As telecom networks become increasingly complex and dynamic, retrieval systems must not only process knowledge efficiently but also adapt to real-time network conditions, congestion levels, and QoS constraints. Future research should explore network-aware retrieval architectures that dynamically adjust retrieval latency, query granularity, and resource allocation based on real-time network traffic and topology changes. Techniques such as reinforcement learning-based retrieval scheduling, adaptive caching, and traffic-aware retrieval pipelines could significantly enhance the responsiveness and efficiency in networking environments.",
806
+ "bbox": [
807
+ 501,
808
+ 69,
809
+ 921,
810
+ 250
811
+ ],
812
+ "page_idx": 6
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "V. CONCLUSION",
817
+ "text_level": 1,
818
+ "bbox": [
819
+ 651,
820
+ 267,
821
+ 774,
822
+ 281
823
+ ],
824
+ "page_idx": 6
825
+ },
826
+ {
827
+ "type": "text",
828
+ "text": "We have presented a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of retrieval in enhancing agentic AI for telecom systems. We have provided a comprehensive review of retrieval strategies. Additionally, we have reviewed recent retrieval-based studies in communications and networking. Then, we have introduced an LLM-based agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-validation.",
829
+ "bbox": [
830
+ 501,
831
+ 286,
832
+ 921,
833
+ 421
834
+ ],
835
+ "page_idx": 6
836
+ },
837
+ {
838
+ "type": "text",
839
+ "text": "REFERENCES",
840
+ "text_level": 1,
841
+ "bbox": [
842
+ 663,
843
+ 439,
844
+ 761,
845
+ 452
846
+ ],
847
+ "page_idx": 6
848
+ },
849
+ {
850
+ "type": "list",
851
+ "sub_type": "ref_text",
852
+ "list_items": [
853
+ "[1] A. Maatouk, N. Piovesan et al., \"Large language models for telecom: Forthcoming impact on the industry,\" IEEE Commun. Mag., vol. 63, no. 1, pp. 62-68, 2025.",
854
+ "[2] S. Sivakumar, \"Agentic AI in predictive AIOps: Enhancing IT autonomy and performance,\" IJSRM, vol. 12, no. 11, pp. 1631-1638, 2024.",
855
+ "[3] R. Zhang et al., \"Generative AI agents with large language model for satellite networks via a mixture of experts transmission,\" IEEE J. Sel. Area. Comm., vol. 42, no. 12, pp. 3581-3596, 2024.",
856
+ "[4] K. Dev, S. A. Khowaja, E. Zeydan, and M. Debbah, “Advanced architectures integrated with agentic AI for next-generation wireless networks,” arXiv preprint arXiv:2502.01089, 2025.",
857
+ "[5] A. Singh et al., \"Agentic retrieval-augmented generation: A survey on agentic RAG,\" arXiv preprint arXiv:2501.09136, 2025.",
858
+ "[6] S. Anupam, A. Shypula, and O. Bastani, \"LLM program optimization via retrieval augmented search,\" arXiv preprint arXiv:2501.18916, 2025.",
859
+ "[7] R. Zhang, H. Du, Y. Liu et al., \"Interactive AI with retrieval-augmented generation for next generation networking,\" IEEE Network, vol. 38, no. 6, pp. 414-424, 2024.",
860
+ "[8] H. Zeng et al., \"Federated recommendation via hybrid retrieval augmented generation,\" arXiv preprint arXiv:2403.04256, 2024.",
861
+ "[9] S. Tang et al., “Retrieval-augmented generation for GenAI-enabled semantic communications,” arXiv preprint arXiv:2412.19494, 2024.",
862
+ "[10] Y. Xiong et al., \"When graph meets retrieval augmented generation for wireless networks: A tutorial and case study,\" arXiv preprint arXiv:2412.07189, 2024.",
863
+ "[11] T. Kagaya et al., \"RAP: Retrieval-augmented planning with contextual memory for multimodal LLM agents,\" arXiv preprint arXiv:2402.03610, 2024.",
864
+ "[12] A. Maatouk et al., \"Teleqna: A benchmark dataset to assess large language models telecommunications knowledge,\" arXiv preprint arXiv:2310.15051, 2023.",
865
+ "[13] X. Li, G. Dong, J. Jin, Y. Zhang, Y. Zhou, Y. Zhu, P. Zhang, and Z. Dou, \"Search-o1: Agentic search-enhanced large reasoning models,\" arXiv preprint arXiv:2501.05366, 2025.",
866
+ "[14] S. Shankar, T. Chambers, T. Shah, A. G. Parameswaran, and E. Wu, \"Docetl: Agentic query rewriting and evaluation for complex document processing,\" arXiv preprint arXiv:2410.12189, 2024.",
867
+ "[15] F. Ayed, A. Maatouk, N. Piovesan, A. De Domenico, M. Debbah, and Z.-Q. Luo, “Hermes: A large language model framework on the journey to autonomous networks,” arXiv preprint arXiv:2411.06490, 2024."
868
+ ],
869
+ "bbox": [
870
+ 506,
871
+ 458,
872
+ 921,
873
+ 912
874
+ ],
875
+ "page_idx": 6
876
+ },
877
+ {
878
+ "type": "page_number",
879
+ "text": "7",
880
+ "bbox": [
881
+ 911,
882
+ 30,
883
+ 919,
884
+ 39
885
+ ],
886
+ "page_idx": 6
887
+ }
888
+ ]
2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_model.json ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "page_number",
5
+ "bbox": [
6
+ 0.912,
7
+ 0.031,
8
+ 0.921,
9
+ 0.041
10
+ ],
11
+ "angle": 0,
12
+ "content": "1"
13
+ },
14
+ {
15
+ "type": "aside_text",
16
+ "bbox": [
17
+ 0.023,
18
+ 0.273,
19
+ 0.058,
20
+ 0.705
21
+ ],
22
+ "angle": 270,
23
+ "content": "arXiv:2502.16866v1 [cs.NI] 24 Feb 2025"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.093,
29
+ 0.071,
30
+ 0.904,
31
+ 0.176
32
+ ],
33
+ "angle": 0,
34
+ "content": "Toward Agentic AI: Generative Information Retrieval Inspired Intelligent Communications and Networking"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.18,
40
+ 0.183,
41
+ 0.816,
42
+ 0.217
43
+ ],
44
+ "angle": 0,
45
+ "content": "Ruichen Zhang, Shunpu Tang, Yinqiu Liu, Dusit Niyato, Fellow, IEEE, Zehui Xiong, Sumei Sun, Fellow, IEEE, Shiwen Mao, Fellow, IEEE, and Zhu Han, Fellow, IEEE"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.074,
51
+ 0.267,
52
+ 0.493,
53
+ 0.621
54
+ ],
55
+ "angle": 0,
56
+ "content": "Abstract—The increasing complexity and scale of modern telecommunications networks demand intelligent automation to enhance efficiency, adaptability, and resilience. Agentic AI has emerged as a key paradigm for intelligent communications and networking, enabling AI-driven agents to perceive, reason, decide, and act within dynamic networking environments. However, effective decision-making in telecom applications, such as network planning, management, and resource allocation, requires integrating retrieval mechanisms that support multi-hop reasoning, historical cross-referencing, and compliance with evolving 3GPP standards. This article presents a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of knowledge acquisition, processing, and retrieval in agentic AI for telecom systems. We first provide a comprehensive review of generative information retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval. We then analyze their advantages, limitations, and suitability for various networking scenarios. Next, we present a survey about their applications in communications and networking. Additionally, we introduce an agentic contextual retrieval framework to enhance telecom-specific planning by integrating multi-source retrieval, structured reasoning, and self-reflective validation. Experimental results demonstrate that our framework significantly improves answer accuracy, explanation consistency, and retrieval efficiency compared to traditional and semantic retrieval methods. Finally, we outline future research directions."
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.217,
62
+ 0.648,
63
+ 0.35,
64
+ 0.661
65
+ ],
66
+ "angle": 0,
67
+ "content": "I. INTRODUCTION"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.074,
73
+ 0.669,
74
+ 0.491,
75
+ 0.76
76
+ ],
77
+ "angle": 0,
78
+ "content": "According to a Cisco report, the number of connected devices is expected to surpass 125 billion by \\(2030^{1}\\), requiring networking systems to process massive amounts of data while maintaining seamless interactions across diverse, heterogeneous infrastructures. To support this evolution, modern networks must incorporate intelligent decision-making"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.074,
84
+ 0.772,
85
+ 0.493,
86
+ 0.817
87
+ ],
88
+ "angle": 0,
89
+ "content": "R. Zhang, S. Tang, Y. Liu, and D. Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: ruichen.zhang@ntu.edu.sg, n2409411h@e.ntu.edu.sg, yinqiu001@e.ntu.edu.sg, dniyato@ntu.edu.sg)."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.075,
95
+ 0.817,
96
+ 0.493,
97
+ 0.841
98
+ ],
99
+ "angle": 0,
100
+ "content": "Z. Xiong is with the Computer Science and Design Pillar, University of Technology and Design, Singapore (e-mail: zehui_xiong@sutd.edu.sg)."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.075,
106
+ 0.841,
107
+ 0.493,
108
+ 0.864
109
+ ],
110
+ "angle": 0,
111
+ "content": "S. Sun is with the Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore (e-mail: sunsm@i2r.a-star.edu.sg)."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.075,
117
+ 0.864,
118
+ 0.493,
119
+ 0.887
120
+ ],
121
+ "angle": 0,
122
+ "content": "S. Mao is with the Department of Electrical and Computer Engineering, Auburn University, Auburn, AL 36849, USA (e-mail: smao@ieee.org)."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.075,
128
+ 0.886,
129
+ 0.493,
130
+ 0.921
131
+ ],
132
+ "angle": 0,
133
+ "content": "Z. Han is with the University of Houston, Houston TX 77004, USA, and also with the Department of Computer Science and Engineering, Kyung Hee University, Seoul 446701, South Korea (e-mail: hanzhu22@gmail.com)."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.075,
139
+ 0.921,
140
+ 0.493,
141
+ 0.944
142
+ ],
143
+ "angle": 0,
144
+ "content": "<sup>1</sup>https://blogs.cisco.com/industrial-iot/iot-is-creating-massive-growth-opportunities"
145
+ },
146
+ {
147
+ "type": "list",
148
+ "bbox": [
149
+ 0.074,
150
+ 0.772,
151
+ 0.493,
152
+ 0.944
153
+ ],
154
+ "angle": 0,
155
+ "content": null
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.503,
161
+ 0.266,
162
+ 0.923,
163
+ 0.688
164
+ ],
165
+ "angle": 0,
166
+ "content": "mechanisms that enable autonomous control, adaptive resource management, and real-time optimization [1]. Agentic AI has emerged as a promising paradigm for autonomous network intelligence, addressing the limitations of traditional rule-based and static AI architectures. Introduced by OpenAI\\(^2\\), DeepSeek\\(^3\\), and other research institutions, agentic AI refers to autonomous agents that can perceive, reason, act, and continuously learn from their environments, allowing them to dynamically optimize network configurations, manage resources, and mitigate failures in large-scale systems [2]. Unlike conventional AI, which operates on fixed rules or pretrained models, agentic AI leverages large language models (LLMs), generative AI-based decision-making, and multi-embodied AI agent collaboration to facilitate self-organizing, highly adaptive network architectures [3]. For example, in [4], the authors explored intent-based networking with agentic AI, where autonomous agents dynamically updated network management policies based on user-defined intents, achieving a \\(32\\%\\) improvement in QoS requirements and a \\(40\\%\\) reduction in manual intervention for network reconfiguration. Despite its potential, agentic AI faces critical limitations, particularly in handling large-scale network data, maintaining long-term memory, and retrieving historical insights for enhanced decision-making. Specifically, LLM-based agents often lack efficient information retrieval methods, resulting in hallucinations, context drift, and response inconsistency, which undermine their reliability in real-world networking applications."
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.504,
172
+ 0.689,
173
+ 0.923,
174
+ 0.902
175
+ ],
176
+ "angle": 0,
177
+ "content": "To mitigate these limitations, generative information retrieval has been proposed as a fundamental enhancement for agentic AI-driven network intelligence [5]. Unlike traditional retrieval techniques, which rely on static keyword searches and limited contextual matching, generative information retrieval dynamically retrieves, synthesizes, and integrates multi-source knowledge, enabling memory-augmented, context-aware reasoning. For instance, in real-world networking applications, retrieval-augmented AI systems can access historical network logs, regulatory standards, and prior optimization strategies, allowing them to infer multi-hop dependencies across diverse network data sources [6]. This approach significantly enhances decision accuracy, adaptability, and long-term contextual understanding. An example of generative information retrieval in"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.518,
183
+ 0.919,
184
+ 0.631,
185
+ 0.931
186
+ ],
187
+ "angle": 0,
188
+ "content": "\\(^{2}\\)https://openai.com/"
189
+ },
190
+ {
191
+ "type": "text",
192
+ "bbox": [
193
+ 0.518,
194
+ 0.931,
195
+ 0.674,
196
+ 0.945
197
+ ],
198
+ "angle": 0,
199
+ "content": "<sup>3</sup>https://www_deepseek.com/"
200
+ },
201
+ {
202
+ "type": "list",
203
+ "bbox": [
204
+ 0.518,
205
+ 0.919,
206
+ 0.674,
207
+ 0.945
208
+ ],
209
+ "angle": 0,
210
+ "content": null
211
+ }
212
+ ],
213
+ [
214
+ {
215
+ "type": "page_number",
216
+ "bbox": [
217
+ 0.912,
218
+ 0.032,
219
+ 0.921,
220
+ 0.041
221
+ ],
222
+ "angle": 0,
223
+ "content": "2"
224
+ },
225
+ {
226
+ "type": "image",
227
+ "bbox": [
228
+ 0.088,
229
+ 0.076,
230
+ 0.916,
231
+ 0.273
232
+ ],
233
+ "angle": 0,
234
+ "content": null
235
+ },
236
+ {
237
+ "type": "image_caption",
238
+ "bbox": [
239
+ 0.075,
240
+ 0.287,
241
+ 0.921,
242
+ 0.312
243
+ ],
244
+ "angle": 0,
245
+ "content": "Fig. 1. Overview of key retrieval strategies in networking. The figure highlights the methodologies, key components, and applications of different approaches, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.074,
251
+ 0.337,
252
+ 0.492,
253
+ 0.396
254
+ ],
255
+ "angle": 0,
256
+ "content": "practice is Meta AI's LlamaIndex<sup>4</sup>, which enables structured document retrieval for LLM-based applications. It allows AI agents to process and integrate domain-specific knowledge in real-time."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.074,
262
+ 0.397,
263
+ 0.493,
264
+ 0.592
265
+ ],
266
+ "angle": 0,
267
+ "content": "Building on these foundations, this article provides a forward-looking perspective on agentic contextual retrieval and its role in enhancing information retrieval and decision-making within 3GPP-driven autonomous networking environments. Unlike conventional retrieval-augmented AI frameworks, the proposed approach integrates multi-source retrieval, structured reasoning, and self-reflective validation, thereby ensuring improved retrieval accuracy, contextual coherence, and decision consistency. To the best of our knowledge, this is the first work to explore the potential of agentic contextual retrieval for 3GPP-based telecommunications troubleshooting and real-time standard-compliant decision-making. The key contributions of this work are summarized as follows."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.074,
273
+ 0.592,
274
+ 0.493,
275
+ 0.865
276
+ ],
277
+ "angle": 0,
278
+ "content": "Firstly, we summarize different retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and demonstrate the most advanced agentic contextual retrieval. We analyze their applications in networking environments, identifying key challenges and the role of retrieval in enhancing network intelligence. Secondly, we provide a comprehensive review of retrieval-based methodologies in networking and communications, categorizing existing works based on their scenarios, proposed techniques, and publication timelines. This analysis highlights research trends and the evolving role of retrieval in intelligent communications and networking. Finally, we introduce an LLM-based framework that integrates agentic contextual retrieval to improve telecom-specific planning and decision-making. This framework incorporates multi-source knowledge retrieval, reasoning-based decision augmentation, and contextual adaptation, leading to substantial improvements in network optimization, fault diagnosis, and adaptive policy enforcement."
279
+ },
280
+ {
281
+ "type": "title",
282
+ "bbox": [
283
+ 0.081,
284
+ 0.874,
285
+ 0.486,
286
+ 0.888
287
+ ],
288
+ "angle": 0,
289
+ "content": "II. DIFFERENT RETRIEVAL METHODS FOR NETWORKING"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.076,
295
+ 0.892,
296
+ 0.492,
297
+ 0.923
298
+ ],
299
+ "angle": 0,
300
+ "content": "In intelligent networking, retrieval systems help process vast amounts of unstructured data, optimize spectrum usage, and"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.503,
306
+ 0.337,
307
+ 0.923,
308
+ 0.442
309
+ ],
310
+ "angle": 0,
311
+ "content": "support AI-based network controllers [5]. In edge intelligence, retrieval techniques facilitate distributed learning, enhance federated AI models, and provide real-time recommendations with minimal latency. As shown in Fig 1, retrieval methods have evolved from traditional keyword-based approaches to hybrid and context-aware techniques, each addressing specific challenges in networking environments."
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.504,
317
+ 0.464,
318
+ 0.757,
319
+ 0.478
320
+ ],
321
+ "angle": 0,
322
+ "content": "A. Traditional Information Retrieval"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.503,
328
+ 0.482,
329
+ 0.923,
330
+ 0.875
331
+ ],
332
+ "angle": 0,
333
+ "content": "Traditional information retrieval is based on matching query terms with exact keywords in the dataset, often using simple yet effective algorithms such as Boolean matching or vector space models. These methods calculate document relevance by scoring terms according to their frequency within a document (i.e., term frequency, TF) and across the entire dataset (i.e., inverse document frequency, IDF). The resulting relevance scores rank documents based on their alignment with the query. This approach works well in structured datasets with clear and consistent keyword distributions, such as early library catalog systems or archival searches. However, it does not account for the semantic meaning of terms or the broader context in which the query occurs. To address such issues, for example, Salton et al. [7] proposed a foundational vector space model where documents and queries are represented as vectors in a multi-dimensional space. The similarity between these vectors is computed using cosine similarity, allowing for efficient ranking of documents based on query relevance. Experimental results demonstrated that the vector space model improved retrieval precision by \\(15\\%\\) compared to basic Boolean retrieval methods. However, when applied to dynamic datasets such as network resource management logs, its reliance on exact matches caused about a \\(20\\%\\) drop in recall for queries involving synonyms or contextually related terms. These limitations highlight the need for more adaptive retrieval methods in real-time scenarios."
334
+ },
335
+ {
336
+ "type": "title",
337
+ "bbox": [
338
+ 0.505,
339
+ 0.896,
340
+ 0.645,
341
+ 0.91
342
+ ],
343
+ "angle": 0,
344
+ "content": "B. Hybrid Retrieval"
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.504,
350
+ 0.915,
351
+ 0.922,
352
+ 0.946
353
+ ],
354
+ "angle": 0,
355
+ "content": "Hybrid retrieval combines traditional retrieval methods, such as TF-IDF scoring, with semantic embeddings generated by"
356
+ },
357
+ {
358
+ "type": "footer",
359
+ "bbox": [
360
+ 0.088,
361
+ 0.931,
362
+ 0.318,
363
+ 0.945
364
+ ],
365
+ "angle": 0,
366
+ "content": "<sup>4</sup>https://gpt-index.readthedocs.io/en/latest/"
367
+ }
368
+ ],
369
+ [
370
+ {
371
+ "type": "page_number",
372
+ "bbox": [
373
+ 0.912,
374
+ 0.032,
375
+ 0.92,
376
+ 0.04
377
+ ],
378
+ "angle": 0,
379
+ "content": "3"
380
+ },
381
+ {
382
+ "type": "table_caption",
383
+ "bbox": [
384
+ 0.364,
385
+ 0.072,
386
+ 0.632,
387
+ 0.094
388
+ ],
389
+ "angle": 0,
390
+ "content": "TABLEI COMPARISON OF KEY RETRIEVAL STRATEGIES."
391
+ },
392
+ {
393
+ "type": "table",
394
+ "bbox": [
395
+ 0.101,
396
+ 0.109,
397
+ 0.895,
398
+ 0.328
399
+ ],
400
+ "angle": 0,
401
+ "content": "<table><tr><td>Retrieval methods</td><td>Training strategies</td><td>Applicable network types</td><td>User demands</td><td>Agentic AI applications</td><td>Application examples</td></tr><tr><td>Traditional Information retrieval</td><td>●Based on explicit keyword matching or Boolean logic.</td><td>●Works well in relatively static networks or environments [6].</td><td>●Focused on delivering relevant results based on exact keyword matches</td><td>●Limited agent-based applications but can be used in simple chatbot systems.</td><td>●Elasticsearch (https://github.com/elastic/elasticsearch)●Apache Lucene (https://github.com/apache/lucene)</td></tr><tr><td>Hybrid retrieval</td><td>●Combines traditional keyword-based retrieval and machine learning models (e.g., TF-IDF and BERT).</td><td>●Works well in dynamic environments where content is constantly changing, and user preferences need to be understood [7].</td><td>●Users demand a more refined search experience where results are also tailored to personal preferences.</td><td>●Chatbots and recommendation systems use hybrid retrieval to suggest products, content, or responses.</td><td>●Recommendation System (https://github.com/lyst/lightfm)●Nixisearch (https://github.com/nixisearch/nixi research)</td></tr><tr><td>Semantic retrieval</td><td>●Uses deep learning (e.g., word embeddings like Word2Vec and BERT, etc.) to understand the meaning behind the query and the documents.</td><td>●Works well in environments where understanding context is important [8].</td><td>●Users demand results that understand the intent behind their queries rather than just keyword matches.</td><td>●Widely used in AI agents like virtual assistants (Google Assistant, Siri), or knowledge-based agents.</td><td>●Semantic Search Engine (https://github.com/deepset-al/haystack)●Txtai (https://github.com/neuml/txtai)</td></tr><tr><td>Knowledge-based retrieval</td><td>●Uses rule-based approaches and inference engines to retrieve relevant information based on predefined knowledge structures.</td><td>●Primarily used in static or semi-static networks, where domain knowledge remains relatively constant but is highly structured [9].</td><td>●Users expect highly accurate, factual, and structured information based on established knowledge.</td><td>●AI agents can act as expert consultants in areas like healthcare (e.g., IBM Watson) or legal systems.</td><td>●Knowledge Graph Search (https://github.com/neoj4/neoj4)●SciTDLR (https://github.com/allena/scitdlr)</td></tr><tr><td>Agentic contextual retrieval</td><td>●Methods like Reinforcement Learning (RLHF) for adaptive retrieval or meta-learning for fast adaption.</td><td>●Works well in multi-agent and dynamic environments where context is constantly evolving [10].</td><td>●Users expect adaptive and personalized retrieval based on evolving queries.</td><td>●Used in autonomous AI assistants (ChatGPT Agents, Claude, Google Gemini).</td><td>●AI-Powered Coding Assistants (https://github.com/features/copilot)●ModelScope-Agent (https://github.com/modelscope/odelscope-agent)</td></tr></table>"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.075,
407
+ 0.358,
408
+ 0.493,
409
+ 0.751
410
+ ],
411
+ "angle": 0,
412
+ "content": "pre-trained deep learning models such as BERT or GPT. This hybrid approach addresses the limitations of traditional methods by incorporating contextual understanding while maintaining computational efficiency. In hybrid retrieval, the process typically contains two stages: a coarse filtering stage, which uses lightweight traditional methods to identify a subset of candidate documents, followed by a re-ranking stage where semantic embeddings are applied to refine results. This two-stage approach ensures that hybrid retrieval is both efficient and accurate, making it particularly suitable for environments where computational resources are limited but semantic depth is required. In networking applications, hybrid retrieval can be particularly useful for AI-driven network monitoring and anomaly detection, where efficient pre-filtering combined with deep learning enables fast yet context-aware decision-making. For example, Zeng et al. [8] proposed a federated hybrid retrieval framework designed to integrate traditional TF-IDF filtering with semantic re-ranking using BERT embeddings. Their system processed candidate documents in two stages: first, TF-IDF was used to rapidly filter out irrelevant data at mobile edge nodes, significantly reducing the search space; second, the filtered candidates were semantically ranked using embeddings. Experimental results showed that this approach improved retrieval precision by \\(25\\%\\) and reduced computational latency by \\(20\\%\\) compared to other classical retrieval systems."
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.076,
418
+ 0.774,
419
+ 0.23,
420
+ 0.788
421
+ ],
422
+ "angle": 0,
423
+ "content": "C. Semantic Retrieval"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.074,
429
+ 0.794,
430
+ 0.491,
431
+ 0.947
432
+ ],
433
+ "angle": 0,
434
+ "content": "Semantic retrieval uses deep neural networks, particularly transformer-based architectures such as BERT, to encode queries and documents into a shared embedding space. This embedding space captures the semantic relationships between terms, enabling the retrieval system to understand the intent behind the query rather than relying solely on exact keyword matches. Semantic retrieval excels in handling complex queries that involve ambiguous or domain-specific language, such as medical diagnostics and network troubleshooting. For example, Tang et al. [9] proposed a semantic retrieval"
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.503,
440
+ 0.358,
441
+ 0.923,
442
+ 0.48
443
+ ],
444
+ "angle": 0,
445
+ "content": "framework leveraging BERT-based embeddings to optimize resource allocation in wireless networks. By encoding queries and documents into a shared semantic space, the system retrieved contextually related documents even for complex queries such as \"dynamic spectrum sharing in 5G\". Their experiments demonstrated a \\(32\\%\\) increase in recall compared to hybrid retrieval methods and an \\(18\\%\\) improvement in precision."
446
+ },
447
+ {
448
+ "type": "title",
449
+ "bbox": [
450
+ 0.505,
451
+ 0.503,
452
+ 0.72,
453
+ 0.518
454
+ ],
455
+ "angle": 0,
456
+ "content": "D. Knowledge-Based Retrieval"
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.503,
462
+ 0.523,
463
+ 0.923,
464
+ 0.856
465
+ ],
466
+ "angle": 0,
467
+ "content": "Knowledge-based retrieval integrates domain-specific ontologies and structured knowledge graphs to enhance retrieval performance. These systems excel in reasoning tasks by explicitly leveraging predefined relationships between entities, providing interpretable results that are often critical in regulated domains such as healthcare, finance, and telecommunications. In knowledge-based retrieval, it is performed by querying the knowledge graph to extract entities and their relationships that match the query context. This method allows for reasoning over linked data, enabling the retrieval of not just relevant documents but also actionable insights based on the relationships in the dataset. For example, Xiong et al. [10] proposed a knowledge graph-based retrieval system for wireless spectrum management. Their framework utilized a graph structure where nodes represented entities such as \"spectrum bands,\" \"user demands,\" and \"interference levels,\" while edges captured relationships such as \"interferes with\" or \"assigned to.\" The key advantage of this approach lies in its ability to provide structured, explainable decisions based on predefined rules. The system achieved a \\(25\\%\\) improvement in spectrum allocation efficiency and a \\(30\\%\\) reduction in interference conflicts compared to heuristic-based methods."
468
+ },
469
+ {
470
+ "type": "title",
471
+ "bbox": [
472
+ 0.505,
473
+ 0.879,
474
+ 0.727,
475
+ 0.894
476
+ ],
477
+ "angle": 0,
478
+ "content": "E. Agentic Contextual Retrieval"
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.504,
484
+ 0.899,
485
+ 0.922,
486
+ 0.947
487
+ ],
488
+ "angle": 0,
489
+ "content": "Agentic contextual retrieval leverages intelligent agent-based control mechanisms to dynamically adjust retrieval strategies based on task-specific requirements, multimodal"
490
+ }
491
+ ],
492
+ [
493
+ {
494
+ "type": "page_number",
495
+ "bbox": [
496
+ 0.912,
497
+ 0.032,
498
+ 0.921,
499
+ 0.04
500
+ ],
501
+ "angle": 0,
502
+ "content": "4"
503
+ },
504
+ {
505
+ "type": "image",
506
+ "bbox": [
507
+ 0.103,
508
+ 0.072,
509
+ 0.898,
510
+ 0.276
511
+ ],
512
+ "angle": 0,
513
+ "content": null
514
+ },
515
+ {
516
+ "type": "image_caption",
517
+ "bbox": [
518
+ 0.074,
519
+ 0.292,
520
+ 0.924,
521
+ 0.318
522
+ ],
523
+ "angle": 0,
524
+ "content": "Fig. 2. A summary of recent retrieval methods in communications and networking, which provides an overview of various proposals, research scenarios, and levels of human-AI interaction."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.074,
530
+ 0.34,
531
+ 0.493,
532
+ 0.657
533
+ ],
534
+ "angle": 0,
535
+ "content": "data integration, and real-time environmental changes. Unlike traditional or semantic retrieval methods, which rely on static queries and predefined indexing, this approach enables adaptive, goal-driven information extraction that continuously refines itself based on evolving conditions. By incorporating real-time system states, historical patterns, and structured knowledge representations, agentic contextual retrieval ensures high adaptability and context-aware decision-making, making it particularly suited for applications in network optimization, autonomous systems, and intelligent fault diagnostics. A key advantage of agentic contextual retrieval is its ability to enable autonomous decision-making agents that actively monitor, retrieve, and reason over multiple data sources to enhance performance in complex, dynamic environments. For example, Kagaya et al. [11] proposed a retrieval framework for autonomous driving, where an agent-driven control mechanism integrated LiDAR, GPS, real-time traffic updates, and weather conditions to dynamically adjust navigation strategies. By enabling real-time, intelligent retrieval and control, their system reduced recalibration time by \\(40\\%\\) and improved navigation accuracy by \\(28\\%\\)."
536
+ },
537
+ {
538
+ "type": "title",
539
+ "bbox": [
540
+ 0.075,
541
+ 0.67,
542
+ 0.398,
543
+ 0.685
544
+ ],
545
+ "angle": 0,
546
+ "content": "F. Retrieval Comparison and Lessons Learned"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.074,
552
+ 0.688,
553
+ 0.493,
554
+ 0.947
555
+ ],
556
+ "angle": 0,
557
+ "content": "Retrieval methods vary significantly in their methodologies, applications, and suitability for different networking scenarios. Specifically, traditional retrieval, which relies on explicit keyword matching, is well-suited for static local network management, where queries are simple, computational resources are limited, and speed is prioritized. Hybrid retrieval combines keyword-based search with machine learning models, making it effective for dynamic network environments, such as adaptive caching or content distribution, where user preferences evolve over time. Semantic retrieval, powered by deep learning models, enhances intent-driven network diagnostics by capturing query context, making it particularly useful for automated fault detection and troubleshooting in telecom networks. Knowledge-based retrieval, leveraging structured inference models, supports rule-based network security and access control, where highly accurate, structured decision-making is critical. Finally, agentic contextual retrieval offers"
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.503,
563
+ 0.34,
564
+ 0.923,
565
+ 0.431
566
+ ],
567
+ "angle": 0,
568
+ "content": "adaptive and real-time decision support in multi-agent network control systems, where dynamic environmental factors, such as interference levels or traffic congestion, require continuous learning and adjustment [11]. Table I summarizes these strategies, highlighting their core features, training methods, and example applications."
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.503,
574
+ 0.431,
575
+ 0.923,
576
+ 0.719
577
+ ],
578
+ "angle": 0,
579
+ "content": "Moreover, we conduct a review of recent retrieval-based approaches in communications and networking from 2023 to late 2024, as summarized in Fig. 2. Our analysis categorizes retrieval strategies into traditional, hybrid, semantic, knowledge-based, and agentic contextual retrieval, highlighting their applications across various domains, including wireless communications, network optimization, and intelligent decision-making. While retrieval-augmented methods have been increasingly integrated into AI-driven network resource management and semantic communication, we observe that agentic contextual retrieval remains largely unexplored for telecommunications-specific applications. Moreover, while [11] demonstrates agentic contextual retrieval for autonomous driving control, there is currently no direct implementation tailored for communication networks and telecom infrastructure. To fill this gap, the next section introduces our proposed framework, which leverages agentic contextual retrieval to enhance intelligent decision-making, troubleshooting, and autonomous adaptation in telecommunications and networking systems."
580
+ },
581
+ {
582
+ "type": "title",
583
+ "bbox": [
584
+ 0.52,
585
+ 0.738,
586
+ 0.907,
587
+ 0.768
588
+ ],
589
+ "angle": 0,
590
+ "content": "III. CASE STUDY: AGENTIC CONTEXTUAL RETRIEVAL FOR NETWORKING"
591
+ },
592
+ {
593
+ "type": "title",
594
+ "bbox": [
595
+ 0.505,
596
+ 0.774,
597
+ 0.606,
598
+ 0.788
599
+ ],
600
+ "angle": 0,
601
+ "content": "A. Motivation"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.503,
607
+ 0.794,
608
+ 0.923,
609
+ 0.947
610
+ ],
611
+ "angle": 0,
612
+ "content": "In next-generation communications and networking, efficient resource allocation, adaptive service provisioning, and intelligent decision-making are crucial for optimizing user experience and network efficiency. Modern communication systems are shifting towards intent-driven networking, where mobile users express high-level requirements in natural language, and the network autonomously interprets and executes these requests. However, this paradigm introduces significant challenges in bridging the gap between user intents, structured communication standards, and real-time network"
613
+ }
614
+ ],
615
+ [
616
+ {
617
+ "type": "page_number",
618
+ "bbox": [
619
+ 0.912,
620
+ 0.031,
621
+ 0.921,
622
+ 0.041
623
+ ],
624
+ "angle": 0,
625
+ "content": "5"
626
+ },
627
+ {
628
+ "type": "image",
629
+ "bbox": [
630
+ 0.102,
631
+ 0.07,
632
+ 0.903,
633
+ 0.271
634
+ ],
635
+ "angle": 0,
636
+ "content": null
637
+ },
638
+ {
639
+ "type": "image_caption",
640
+ "bbox": [
641
+ 0.074,
642
+ 0.28,
643
+ 0.925,
644
+ 0.352
645
+ ],
646
+ "angle": 0,
647
+ "content": "Fig. 3. Illustration of the agentic contextual retrieval enhanced intelligent base station for troubleshooting and decision-making. The framework follows a structured four-step workflow: (A) Query understanding and reformulation ensure alignment with 3GPP terminology using LLM-based query expansion. (B) Multi-source knowledge retrieval extracts relevant information from both structured (e.g., 3GPP standards) and unstructured (e.g., online sources) datasets. (C) Contextual evidence aggregation and reasoning synthesize retrieved knowledge into structured responses using chain-of-thought reasoning. (D) Decision-making and self-validation enhance accuracy through confidence-based verification and iterative refinement, reducing hallucinations and improving response consistency."
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.074,
653
+ 0.376,
654
+ 0.492,
655
+ 0.588
656
+ ],
657
+ "angle": 0,
658
+ "content": "configurations. A key challenge lies in mapping natural language intent descriptions to actionable network configurations, requiring an understanding of both human semantics and telecommunications-specific knowledge. Traditional rule-based methods or static intent templates are insufficient in handling diverse user demands and evolving network conditions [4]. LLMs offer a promising solution due to their strong natural language understanding (NLU) and reasoning capabilities. However, LLMs lack domain-specific knowledge in telecommunications, such as 3GPP standards, intent translation templates, and network control logic. Consequently, their direct application to network automation remains limited by knowledge incompleteness, retrieval inefficiency, and contextual inconsistency."
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.076,
664
+ 0.598,
665
+ 0.493,
666
+ 0.947
667
+ ],
668
+ "angle": 0,
669
+ "content": "To address these challenges, we propose a retrieval-enhanced intelligent base station architecture, where the network dynamically retrieves, synthesizes, and applies knowledge from 3GPP standards, network logs, and external telecom repositories to enhance decision-making. Specifically, the system employs a hybrid retrieval framework to convert user-generated intents into structured network actions, using a template-based approach that aligns with communication paradigms outlined in 3GPP [12]. In this framework, user requests (e.g., \"I need ultra-low latency for cloud gaming\") are processed by the network's AI module, which retrieves relevant telecom policies and configurations before generating a customized communication plan. Despite the advantages of retrieval-augmented LLMs, conventional retrieval-augmented generation (RAG) techniques face critical limitations in telecom-specific applications, including: (i) Contextual Ambiguity: Simple keyword-based retrieval struggles to retrieve relevant 3GPP policies and network parameters, as user intents often involve multiple layers of contextual interpretation. (ii) Data Sparsity: Telecommunications standards and policy documents are highly structured, yet spread across multiple releases and fragmented into different standardization documents. (iii) Retrieval Inefficiency: Traditional retrieval"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.504,
675
+ 0.376,
676
+ 0.922,
677
+ 0.42
678
+ ],
679
+ "angle": 0,
680
+ "content": "approaches lack multi-hop reasoning, failing to link user intents with both historical network behavior and real-time conditions."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.504,
686
+ 0.422,
687
+ 0.923,
688
+ 0.557
689
+ ],
690
+ "angle": 0,
691
+ "content": "To overcome these limitations, we introduce an agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-reflective validation to enhance intent-driven networking. Our framework enables intelligent base stations to map user intents to network configurations in real-time, leveraging LLM-powered decision-making while ensuring alignment with 3GPP compliance, traffic optimization strategies, and real-world deployment policies."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.505,
697
+ 0.578,
698
+ 0.807,
699
+ 0.593
700
+ ],
701
+ "angle": 0,
702
+ "content": "B. Agentic Contextual Retrieval Framework"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.504,
708
+ 0.598,
709
+ 0.922,
710
+ 0.671
711
+ ],
712
+ "angle": 0,
713
+ "content": "As shown in Fig. 3, the deployment of the agentic contextual retrieval framework follows a structured four-step workflow, designed to enhance the retrieval, reasoning, and validation of knowledge specific to 3GPP standards and telecommunications networks."
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.504,
719
+ 0.673,
720
+ 0.922,
721
+ 0.947
722
+ ],
723
+ "angle": 0,
724
+ "content": "1) Knowledge Preparation and Query Understanding: The system first loads 3GPP standards and network documentation from a database, segments them into context-aware knowledge chunks, and vectorizes them using sentence-transformer embeddings. To enable efficient semantic retrieval, the vectorized knowledge chunks are indexed using a vector database, allowing for efficient similarity searches. After that, once a query is received, the system analyzes user intent and performs query reformulation, ensuring that the query aligns with 3GPP-defined communication paradigms and technical configurations. In practice, telecommunications queries often contain ambiguous terms, incomplete phrasing, or require historical cross-referencing across multiple 3GPP releases. Therefore, it is necessary to fully understand the user intent and the key concepts in this context to improve retrieval accuracy. Specifically, we can use LLMs to realize that and ensure longitudinal consistency when retrieving regulatory and technical specifications [13]. In our experimental setup, user"
725
+ }
726
+ ],
727
+ [
728
+ {
729
+ "type": "page_number",
730
+ "bbox": [
731
+ 0.912,
732
+ 0.032,
733
+ 0.921,
734
+ 0.041
735
+ ],
736
+ "angle": 0,
737
+ "content": "6"
738
+ },
739
+ {
740
+ "type": "image",
741
+ "bbox": [
742
+ 0.144,
743
+ 0.071,
744
+ 0.862,
745
+ 0.232
746
+ ],
747
+ "angle": 0,
748
+ "content": null
749
+ },
750
+ {
751
+ "type": "image_caption",
752
+ "bbox": [
753
+ 0.074,
754
+ 0.243,
755
+ 0.924,
756
+ 0.269
757
+ ],
758
+ "angle": 0,
759
+ "content": "Fig. 4. Performance comparison of Agentic Contextual Retrieval against baseline methods, including QWen-Max without retriever, traditional retrieval, and semantic retrieval."
760
+ },
761
+ {
762
+ "type": "text",
763
+ "bbox": [
764
+ 0.074,
765
+ 0.292,
766
+ 0.492,
767
+ 0.367
768
+ ],
769
+ "angle": 0,
770
+ "content": "intent queries, such as customized communication service requests (e.g., \"I need ultra-reliable low-latency communication for industrial automation\"), are first parsed and the key concepts such as \"ultra-reliable low-latency\", \"role of URLLC in industrial automation\" are extracted."
771
+ },
772
+ {
773
+ "type": "text",
774
+ "bbox": [
775
+ 0.074,
776
+ 0.369,
777
+ 0.493,
778
+ 0.672
779
+ ],
780
+ "angle": 0,
781
+ "content": "2) Multi-Source Knowledge Retrieval: Following query optimization, the second step involves multi-source retrieval to ensure both completeness and relevance in decision-making for network configuration and policy enforcement. Next, we integrate semantic vector-based retrieval with embedding models to extract key information from 3GPP specifications, network operation policies, and real-time telecom deployment scenarios. Embedding models generate dense vector representations of text, enabling context-aware similarity search rather than relying on exact keyword matches [14]. To further improve accuracy, structured knowledge representations establish relationships between frequency bands, protocol parameters, and QoS metrics, refining query precision. Additionally, real-time retrieval from online repositories ensures access to the latest standardization updates. For instance, when retrieving information on \"5G network slicing SLA guarantees,\" the system uses an embedding model to identify semantically relevant sections from TS 28.531 (Performance Assurance) and TS 28.554 (KPI Definitions) while incorporating recent case studies from network operators."
782
+ },
783
+ {
784
+ "type": "text",
785
+ "bbox": [
786
+ 0.074,
787
+ 0.673,
788
+ 0.493,
789
+ 0.947
790
+ ],
791
+ "angle": 0,
792
+ "content": "3) Contextual Evidence Aggregation and Reasoning: Once relevant information is retrieved, the third step focuses on contextual evidence aggregation and reasoning, where multi-source knowledge is condensed into a structured and interpretable response. Given the vast amount of information available in telecom standardization, it is crucial to eliminate redundancy, enhance clarity, and ensure that the extracted content directly addresses the query [13]. Specifically, we use an LLM-powered reasoning agent, which autonomously identifies the most relevant text segments in the retrieved content based on the reformulated query. The agent then synthesizes these segments into a concise, context-aware summary, ensuring that only the most important evidence is retained, and irrelevant or redundant information is discarded. For example, in response to a question like \"What is the role of the serving network in fraud control?\", the retrieved information may contain detailed descriptions of charging functions, fraud detection, and policy enforcement. Instead of presenting all these details, the agent"
793
+ },
794
+ {
795
+ "type": "text",
796
+ "bbox": [
797
+ 0.503,
798
+ 0.292,
799
+ 0.922,
800
+ 0.351
801
+ ],
802
+ "angle": 0,
803
+ "content": "analyzes the content, extracts the core function of the serving network in fraud prevention, and generates a concise summary, emphasizing its role in real-time data collection and cost control."
804
+ },
805
+ {
806
+ "type": "text",
807
+ "bbox": [
808
+ 0.503,
809
+ 0.353,
810
+ 0.923,
811
+ 0.565
812
+ ],
813
+ "angle": 0,
814
+ "content": "4) Decision-Making and Self-Validation: The final step involves a decision-making agent that simultaneously generates both the network action recommendations and justifications based on the optimized query and refined retrieval results. This agent applies CoT reasoning to synthesize a structured response, ensuring that the explanation logically supports the answer by drawing from the retrieved evidence [15]. To enhance reliability, a self-reflection agent evaluates the generated response, critically reviewing both the answer and explanation for consistency, factual accuracy, and alignment with authoritative 3GPP standards. If inconsistencies, incomplete reasoning, or speculative conclusions are detected, the self-reflection agent challenges the response and triggers an iterative refinement loop."
815
+ },
816
+ {
817
+ "type": "title",
818
+ "bbox": [
819
+ 0.505,
820
+ 0.588,
821
+ 0.606,
822
+ 0.602
823
+ ],
824
+ "angle": 0,
825
+ "content": "C. Simulation"
826
+ },
827
+ {
828
+ "type": "text",
829
+ "bbox": [
830
+ 0.503,
831
+ 0.608,
832
+ 0.923,
833
+ 0.881
834
+ ],
835
+ "angle": 0,
836
+ "content": "Simulation Settings: Our simulation is conducted using a structured retrieval and reasoning pipeline, integrating multiple knowledge sources and agent-driven query optimization. We employ Qwen2.5-Max as the base LLM, leveraging its advanced reasoning capabilities for telecom-related question-answering tasks. To evaluate retrieval performance, we selected 50 structured QA pairs related to 3GPP R18 from the TeleQnA dataset, which serves as the primary benchmark. For additional technical context, we use the 3GPP R18 dataset. To ensure retrieval efficiency, we utilize FAISS, an indexing tool optimized for high-speed vector similarity search. The document processing workflow involves segmenting 3GPP standard documents into 1000-character chunks with a 100-character overlap, followed by embedding generation using Mpnet-base-V2\\(^{8}\\), a transformer-based model trained for dense vector representations. To evaluate the effectiveness of the proposed Agentic contextual retrieval framework, we compare its performance against three baselines: (i) Qwen-Max"
837
+ },
838
+ {
839
+ "type": "ref_text",
840
+ "bbox": [
841
+ 0.518,
842
+ 0.894,
843
+ 0.836,
844
+ 0.907
845
+ ],
846
+ "angle": 0,
847
+ "content": "5https://huggingface.co/spaces/Qwen/Qwen2.5-Max-Demo"
848
+ },
849
+ {
850
+ "type": "ref_text",
851
+ "bbox": [
852
+ 0.518,
853
+ 0.907,
854
+ 0.786,
855
+ 0.919
856
+ ],
857
+ "angle": 0,
858
+ "content": "\\(^{6}\\)https://huggingface.co/datasets/netop/3GPP-R18"
859
+ },
860
+ {
861
+ "type": "ref_text",
862
+ "bbox": [
863
+ 0.518,
864
+ 0.919,
865
+ 0.749,
866
+ 0.932
867
+ ],
868
+ "angle": 0,
869
+ "content": "<sup>7</sup>https://github.com/facebookresearch/faiss"
870
+ },
871
+ {
872
+ "type": "ref_text",
873
+ "bbox": [
874
+ 0.518,
875
+ 0.932,
876
+ 0.868,
877
+ 0.945
878
+ ],
879
+ "angle": 0,
880
+ "content": "<sup>8</sup>https://huggingface.co/sentence-transformers/all-mpnet-base-v2"
881
+ },
882
+ {
883
+ "type": "list",
884
+ "bbox": [
885
+ 0.518,
886
+ 0.894,
887
+ 0.868,
888
+ 0.945
889
+ ],
890
+ "angle": 0,
891
+ "content": null
892
+ }
893
+ ],
894
+ [
895
+ {
896
+ "type": "page_number",
897
+ "bbox": [
898
+ 0.912,
899
+ 0.031,
900
+ 0.921,
901
+ 0.04
902
+ ],
903
+ "angle": 0,
904
+ "content": "7"
905
+ },
906
+ {
907
+ "type": "text",
908
+ "bbox": [
909
+ 0.074,
910
+ 0.07,
911
+ 0.492,
912
+ 0.19
913
+ ],
914
+ "angle": 0,
915
+ "content": "without Retriever, representing a pure LLM-based approach, (ii) Qwen-Max with Traditional Retriever, utilizing standard retrieval-based augmentation, and (iii) Qwen-Max with Semantic Retriever, incorporating semantic embedding-based retrieval. The comparison is conducted across four key evaluation metrics, i.e., Answer Matching Accuracy, Answer Text F1 Score, Explanation BERT Score, and Explanation Cosine Similarity, as shown in Fig. 4."
916
+ },
917
+ {
918
+ "type": "text",
919
+ "bbox": [
920
+ 0.074,
921
+ 0.19,
922
+ 0.495,
923
+ 0.629
924
+ ],
925
+ "angle": 0,
926
+ "content": "Fig. 4 demonstrates that Agentic contextual retrieval consistently outperforms all baseline methods across all evaluation metrics. In particular, the proposed framework achieves an answer matching accuracy of \\(84\\%\\) and an answer text F1 score of \\(90.37\\%\\), surpassing the performance of semantic retrieval (i.e., \\(80\\%\\)) and traditional retrieval (i.e., \\(74\\%\\)), underscoring its effectiveness in generating precise and contextually relevant responses. This improvement is attributed to its dynamic multisource retrieval, which integrates structured 3GPP standards with external knowledge repositories, query reformulation mechanisms, ensuring alignment with telecom-specific terminology, and a structured reasoning pipeline, which employs CoT decision-making and self-validation loops to enhance logical consistency and factual accuracy. Moreover, unlike conventional retrieval methods that rely on static document matching, Agentic contextual retrieval dynamically extracts, synthesizes, and validates multi-hop contextual information, significantly enhancing retrieval precision and response coherence. Furthermore, the explanation quality also benefits significantly from our approach, as evidenced by the Explanation BERT Score (i.e., \\(90.95\\%\\)) and Cosine Similarity (i.e., \\(80.83\\%\\)), both of which outperform alternative retrieval methods. These improvements stem from the framework's ability to synthesize multi-source knowledge, apply structured reasoning, and iteratively refine responses through self-reflection mechanisms. In contrast, the semantic retrieval baseline, while effective at contextual retrieval, lacks robust reasoning capabilities and multi-turn validation, limiting its ability to handle complex telecom-specific queries."
927
+ },
928
+ {
929
+ "type": "title",
930
+ "bbox": [
931
+ 0.191,
932
+ 0.64,
933
+ 0.376,
934
+ 0.654
935
+ ],
936
+ "angle": 0,
937
+ "content": "IV. FUTURE DIRECTIONS"
938
+ },
939
+ {
940
+ "type": "text",
941
+ "bbox": [
942
+ 0.074,
943
+ 0.659,
944
+ 0.491,
945
+ 0.794
946
+ ],
947
+ "angle": 0,
948
+ "content": "Security and Privacy in Retrieval-Augmented Networks: As agentic contextual retrieval frameworks increasingly rely on multi-source knowledge retrieval, ensuring data integrity, confidentiality, and adversarial robustness is critical. Future research should explore privacy-preserving retrieval techniques, such as federated retrieval, secure multi-party computation, and differential privacy-enhanced retrieval models, to mitigate risks associated with unauthorized data access and adversarial attacks in wireless and networking applications."
949
+ },
950
+ {
951
+ "type": "text",
952
+ "bbox": [
953
+ 0.074,
954
+ 0.795,
955
+ 0.492,
956
+ 0.945
957
+ ],
958
+ "angle": 0,
959
+ "content": "Energy-Efficient and Low-Latency Retrieval Networking Architectures: Deploying LLM-driven agentic contextual retrieval frameworks in real-world wireless and networking environments requires optimized inference efficiency and low-latency retrieval mechanisms. Future studies could investigate mobile device-aware retrieval strategies, knowledge distillation for lightweight retrieval models, and edge-based retrieval deployment to minimize computational overhead while maintaining retrieval accuracy in resource-constrained environments, such as 5G edge nodes and IoT devices."
960
+ },
961
+ {
962
+ "type": "text",
963
+ "bbox": [
964
+ 0.503,
965
+ 0.07,
966
+ 0.923,
967
+ 0.251
968
+ ],
969
+ "angle": 0,
970
+ "content": "Network-Aware Adaptive Retrieval for Real-Time Optimization: As telecom networks become increasingly complex and dynamic, retrieval systems must not only process knowledge efficiently but also adapt to real-time network conditions, congestion levels, and QoS constraints. Future research should explore network-aware retrieval architectures that dynamically adjust retrieval latency, query granularity, and resource allocation based on real-time network traffic and topology changes. Techniques such as reinforcement learning-based retrieval scheduling, adaptive caching, and traffic-aware retrieval pipelines could significantly enhance the responsiveness and efficiency in networking environments."
971
+ },
972
+ {
973
+ "type": "title",
974
+ "bbox": [
975
+ 0.652,
976
+ 0.268,
977
+ 0.776,
978
+ 0.282
979
+ ],
980
+ "angle": 0,
981
+ "content": "V. CONCLUSION"
982
+ },
983
+ {
984
+ "type": "text",
985
+ "bbox": [
986
+ 0.503,
987
+ 0.287,
988
+ 0.923,
989
+ 0.422
990
+ ],
991
+ "angle": 0,
992
+ "content": "We have presented a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of retrieval in enhancing agentic AI for telecom systems. We have provided a comprehensive review of retrieval strategies. Additionally, we have reviewed recent retrieval-based studies in communications and networking. Then, we have introduced an LLM-based agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-validation."
993
+ },
994
+ {
995
+ "type": "title",
996
+ "bbox": [
997
+ 0.665,
998
+ 0.44,
999
+ 0.762,
1000
+ 0.453
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "REFERENCES"
1004
+ },
1005
+ {
1006
+ "type": "ref_text",
1007
+ "bbox": [
1008
+ 0.515,
1009
+ 0.459,
1010
+ 0.922,
1011
+ 0.496
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "[1] A. Maatouk, N. Piovesan et al., \"Large language models for telecom: Forthcoming impact on the industry,\" IEEE Commun. Mag., vol. 63, no. 1, pp. 62-68, 2025."
1015
+ },
1016
+ {
1017
+ "type": "ref_text",
1018
+ "bbox": [
1019
+ 0.514,
1020
+ 0.496,
1021
+ 0.921,
1022
+ 0.517
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "[2] S. Sivakumar, \"Agentic AI in predictive AIOps: Enhancing IT autonomy and performance,\" IJSRM, vol. 12, no. 11, pp. 1631-1638, 2024."
1026
+ },
1027
+ {
1028
+ "type": "ref_text",
1029
+ "bbox": [
1030
+ 0.514,
1031
+ 0.518,
1032
+ 0.921,
1033
+ 0.551
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "[3] R. Zhang et al., \"Generative AI agents with large language model for satellite networks via a mixture of experts transmission,\" IEEE J. Sel. Area. Comm., vol. 42, no. 12, pp. 3581-3596, 2024."
1037
+ },
1038
+ {
1039
+ "type": "ref_text",
1040
+ "bbox": [
1041
+ 0.514,
1042
+ 0.552,
1043
+ 0.921,
1044
+ 0.585
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "[4] K. Dev, S. A. Khowaja, E. Zeydan, and M. Debbah, “Advanced architectures integrated with agentic AI for next-generation wireless networks,” arXiv preprint arXiv:2502.01089, 2025."
1048
+ },
1049
+ {
1050
+ "type": "ref_text",
1051
+ "bbox": [
1052
+ 0.514,
1053
+ 0.586,
1054
+ 0.921,
1055
+ 0.608
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "[5] A. Singh et al., \"Agentic retrieval-augmented generation: A survey on agentic RAG,\" arXiv preprint arXiv:2501.09136, 2025."
1059
+ },
1060
+ {
1061
+ "type": "ref_text",
1062
+ "bbox": [
1063
+ 0.514,
1064
+ 0.609,
1065
+ 0.921,
1066
+ 0.631
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "[6] S. Anupam, A. Shypula, and O. Bastani, \"LLM program optimization via retrieval augmented search,\" arXiv preprint arXiv:2501.18916, 2025."
1070
+ },
1071
+ {
1072
+ "type": "ref_text",
1073
+ "bbox": [
1074
+ 0.514,
1075
+ 0.632,
1076
+ 0.921,
1077
+ 0.664
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "[7] R. Zhang, H. Du, Y. Liu et al., \"Interactive AI with retrieval-augmented generation for next generation networking,\" IEEE Network, vol. 38, no. 6, pp. 414-424, 2024."
1081
+ },
1082
+ {
1083
+ "type": "ref_text",
1084
+ "bbox": [
1085
+ 0.514,
1086
+ 0.665,
1087
+ 0.921,
1088
+ 0.687
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "[8] H. Zeng et al., \"Federated recommendation via hybrid retrieval augmented generation,\" arXiv preprint arXiv:2403.04256, 2024."
1092
+ },
1093
+ {
1094
+ "type": "ref_text",
1095
+ "bbox": [
1096
+ 0.514,
1097
+ 0.688,
1098
+ 0.921,
1099
+ 0.71
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "[9] S. Tang et al., “Retrieval-augmented generation for GenAI-enabled semantic communications,” arXiv preprint arXiv:2412.19494, 2024."
1103
+ },
1104
+ {
1105
+ "type": "ref_text",
1106
+ "bbox": [
1107
+ 0.508,
1108
+ 0.711,
1109
+ 0.921,
1110
+ 0.744
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "[10] Y. Xiong et al., \"When graph meets retrieval augmented generation for wireless networks: A tutorial and case study,\" arXiv preprint arXiv:2412.07189, 2024."
1114
+ },
1115
+ {
1116
+ "type": "ref_text",
1117
+ "bbox": [
1118
+ 0.508,
1119
+ 0.745,
1120
+ 0.921,
1121
+ 0.777
1122
+ ],
1123
+ "angle": 0,
1124
+ "content": "[11] T. Kagaya et al., \"RAP: Retrieval-augmented planning with contextual memory for multimodal LLM agents,\" arXiv preprint arXiv:2402.03610, 2024."
1125
+ },
1126
+ {
1127
+ "type": "ref_text",
1128
+ "bbox": [
1129
+ 0.508,
1130
+ 0.779,
1131
+ 0.921,
1132
+ 0.811
1133
+ ],
1134
+ "angle": 0,
1135
+ "content": "[12] A. Maatouk et al., \"Teleqna: A benchmark dataset to assess large language models telecommunications knowledge,\" arXiv preprint arXiv:2310.15051, 2023."
1136
+ },
1137
+ {
1138
+ "type": "ref_text",
1139
+ "bbox": [
1140
+ 0.508,
1141
+ 0.812,
1142
+ 0.921,
1143
+ 0.845
1144
+ ],
1145
+ "angle": 0,
1146
+ "content": "[13] X. Li, G. Dong, J. Jin, Y. Zhang, Y. Zhou, Y. Zhu, P. Zhang, and Z. Dou, \"Search-o1: Agentic search-enhanced large reasoning models,\" arXiv preprint arXiv:2501.05366, 2025."
1147
+ },
1148
+ {
1149
+ "type": "ref_text",
1150
+ "bbox": [
1151
+ 0.508,
1152
+ 0.846,
1153
+ 0.921,
1154
+ 0.88
1155
+ ],
1156
+ "angle": 0,
1157
+ "content": "[14] S. Shankar, T. Chambers, T. Shah, A. G. Parameswaran, and E. Wu, \"Docetl: Agentic query rewriting and evaluation for complex document processing,\" arXiv preprint arXiv:2410.12189, 2024."
1158
+ },
1159
+ {
1160
+ "type": "ref_text",
1161
+ "bbox": [
1162
+ 0.508,
1163
+ 0.88,
1164
+ 0.921,
1165
+ 0.914
1166
+ ],
1167
+ "angle": 0,
1168
+ "content": "[15] F. Ayed, A. Maatouk, N. Piovesan, A. De Domenico, M. Debbah, and Z.-Q. Luo, “Hermes: A large language model framework on the journey to autonomous networks,” arXiv preprint arXiv:2411.06490, 2024."
1169
+ },
1170
+ {
1171
+ "type": "list",
1172
+ "bbox": [
1173
+ 0.508,
1174
+ 0.459,
1175
+ 0.922,
1176
+ 0.914
1177
+ ],
1178
+ "angle": 0,
1179
+ "content": null
1180
+ }
1181
+ ]
1182
+ ]
2502.16xxx/2502.16866/f7b20614-d83e-47e9-9174-14dc0a1174b1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2675f8f59c462343088b7b68614c2cb246ea13e8055d348e7a00e845c344350b
3
+ size 1075423
2502.16xxx/2502.16866/full.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Toward Agentic AI: Generative Information Retrieval Inspired Intelligent Communications and Networking
2
+
3
+ Ruichen Zhang, Shunpu Tang, Yinqiu Liu, Dusit Niyato, Fellow, IEEE, Zehui Xiong, Sumei Sun, Fellow, IEEE, Shiwen Mao, Fellow, IEEE, and Zhu Han, Fellow, IEEE
4
+
5
+ Abstract—The increasing complexity and scale of modern telecommunications networks demand intelligent automation to enhance efficiency, adaptability, and resilience. Agentic AI has emerged as a key paradigm for intelligent communications and networking, enabling AI-driven agents to perceive, reason, decide, and act within dynamic networking environments. However, effective decision-making in telecom applications, such as network planning, management, and resource allocation, requires integrating retrieval mechanisms that support multi-hop reasoning, historical cross-referencing, and compliance with evolving 3GPP standards. This article presents a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of knowledge acquisition, processing, and retrieval in agentic AI for telecom systems. We first provide a comprehensive review of generative information retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval. We then analyze their advantages, limitations, and suitability for various networking scenarios. Next, we present a survey about their applications in communications and networking. Additionally, we introduce an agentic contextual retrieval framework to enhance telecom-specific planning by integrating multi-source retrieval, structured reasoning, and self-reflective validation. Experimental results demonstrate that our framework significantly improves answer accuracy, explanation consistency, and retrieval efficiency compared to traditional and semantic retrieval methods. Finally, we outline future research directions.
6
+
7
+ # I. INTRODUCTION
8
+
9
+ According to a Cisco report, the number of connected devices is expected to surpass 125 billion by $2030^{1}$ , requiring networking systems to process massive amounts of data while maintaining seamless interactions across diverse, heterogeneous infrastructures. To support this evolution, modern networks must incorporate intelligent decision-making
10
+
11
+ R. Zhang, S. Tang, Y. Liu, and D. Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: ruichen.zhang@ntu.edu.sg, n2409411h@e.ntu.edu.sg, yinqiu001@e.ntu.edu.sg, dniyato@ntu.edu.sg).
12
+ Z. Xiong is with the Computer Science and Design Pillar, University of Technology and Design, Singapore (e-mail: zehui_xiong@sutd.edu.sg).
13
+ S. Sun is with the Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore (e-mail: sunsm@i2r.a-star.edu.sg).
14
+ S. Mao is with the Department of Electrical and Computer Engineering, Auburn University, Auburn, AL 36849, USA (e-mail: smao@ieee.org).
15
+ Z. Han is with the University of Houston, Houston TX 77004, USA, and also with the Department of Computer Science and Engineering, Kyung Hee University, Seoul 446701, South Korea (e-mail: hanzhu22@gmail.com).
16
+ <sup>1</sup>https://blogs.cisco.com/industrial-iot/iot-is-creating-massive-growth-opportunities
17
+
18
+ mechanisms that enable autonomous control, adaptive resource management, and real-time optimization [1]. Agentic AI has emerged as a promising paradigm for autonomous network intelligence, addressing the limitations of traditional rule-based and static AI architectures. Introduced by OpenAI $^2$ , DeepSeek $^3$ , and other research institutions, agentic AI refers to autonomous agents that can perceive, reason, act, and continuously learn from their environments, allowing them to dynamically optimize network configurations, manage resources, and mitigate failures in large-scale systems [2]. Unlike conventional AI, which operates on fixed rules or pretrained models, agentic AI leverages large language models (LLMs), generative AI-based decision-making, and multi-embodied AI agent collaboration to facilitate self-organizing, highly adaptive network architectures [3]. For example, in [4], the authors explored intent-based networking with agentic AI, where autonomous agents dynamically updated network management policies based on user-defined intents, achieving a $32\%$ improvement in QoS requirements and a $40\%$ reduction in manual intervention for network reconfiguration. Despite its potential, agentic AI faces critical limitations, particularly in handling large-scale network data, maintaining long-term memory, and retrieving historical insights for enhanced decision-making. Specifically, LLM-based agents often lack efficient information retrieval methods, resulting in hallucinations, context drift, and response inconsistency, which undermine their reliability in real-world networking applications.
19
+
20
+ To mitigate these limitations, generative information retrieval has been proposed as a fundamental enhancement for agentic AI-driven network intelligence [5]. Unlike traditional retrieval techniques, which rely on static keyword searches and limited contextual matching, generative information retrieval dynamically retrieves, synthesizes, and integrates multi-source knowledge, enabling memory-augmented, context-aware reasoning. For instance, in real-world networking applications, retrieval-augmented AI systems can access historical network logs, regulatory standards, and prior optimization strategies, allowing them to infer multi-hop dependencies across diverse network data sources [6]. This approach significantly enhances decision accuracy, adaptability, and long-term contextual understanding. An example of generative information retrieval in
21
+
22
+ $^{2}$ https://openai.com/
23
+ <sup>3</sup>https://www_deepseek.com/
24
+
25
+ ![](images/fab36b19c36a4adc0f425bf9783ceb86800cc59ad2b4b095bbda03c986ecc738.jpg)
26
+ Fig. 1. Overview of key retrieval strategies in networking. The figure highlights the methodologies, key components, and applications of different approaches, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and agentic contextual retrieval.
27
+
28
+ practice is Meta AI's LlamaIndex<sup>4</sup>, which enables structured document retrieval for LLM-based applications. It allows AI agents to process and integrate domain-specific knowledge in real-time.
29
+
30
+ Building on these foundations, this article provides a forward-looking perspective on agentic contextual retrieval and its role in enhancing information retrieval and decision-making within 3GPP-driven autonomous networking environments. Unlike conventional retrieval-augmented AI frameworks, the proposed approach integrates multi-source retrieval, structured reasoning, and self-reflective validation, thereby ensuring improved retrieval accuracy, contextual coherence, and decision consistency. To the best of our knowledge, this is the first work to explore the potential of agentic contextual retrieval for 3GPP-based telecommunications troubleshooting and real-time standard-compliant decision-making. The key contributions of this work are summarized as follows.
31
+
32
+ Firstly, we summarize different retrieval strategies, including traditional retrieval, hybrid retrieval, semantic retrieval, knowledge-based retrieval, and demonstrate the most advanced agentic contextual retrieval. We analyze their applications in networking environments, identifying key challenges and the role of retrieval in enhancing network intelligence. Secondly, we provide a comprehensive review of retrieval-based methodologies in networking and communications, categorizing existing works based on their scenarios, proposed techniques, and publication timelines. This analysis highlights research trends and the evolving role of retrieval in intelligent communications and networking. Finally, we introduce an LLM-based framework that integrates agentic contextual retrieval to improve telecom-specific planning and decision-making. This framework incorporates multi-source knowledge retrieval, reasoning-based decision augmentation, and contextual adaptation, leading to substantial improvements in network optimization, fault diagnosis, and adaptive policy enforcement.
33
+
34
+ # II. DIFFERENT RETRIEVAL METHODS FOR NETWORKING
35
+
36
+ In intelligent networking, retrieval systems help process vast amounts of unstructured data, optimize spectrum usage, and
37
+
38
+ support AI-based network controllers [5]. In edge intelligence, retrieval techniques facilitate distributed learning, enhance federated AI models, and provide real-time recommendations with minimal latency. As shown in Fig 1, retrieval methods have evolved from traditional keyword-based approaches to hybrid and context-aware techniques, each addressing specific challenges in networking environments.
39
+
40
+ # A. Traditional Information Retrieval
41
+
42
+ Traditional information retrieval is based on matching query terms with exact keywords in the dataset, often using simple yet effective algorithms such as Boolean matching or vector space models. These methods calculate document relevance by scoring terms according to their frequency within a document (i.e., term frequency, TF) and across the entire dataset (i.e., inverse document frequency, IDF). The resulting relevance scores rank documents based on their alignment with the query. This approach works well in structured datasets with clear and consistent keyword distributions, such as early library catalog systems or archival searches. However, it does not account for the semantic meaning of terms or the broader context in which the query occurs. To address such issues, for example, Salton et al. [7] proposed a foundational vector space model where documents and queries are represented as vectors in a multi-dimensional space. The similarity between these vectors is computed using cosine similarity, allowing for efficient ranking of documents based on query relevance. Experimental results demonstrated that the vector space model improved retrieval precision by $15\%$ compared to basic Boolean retrieval methods. However, when applied to dynamic datasets such as network resource management logs, its reliance on exact matches caused about a $20\%$ drop in recall for queries involving synonyms or contextually related terms. These limitations highlight the need for more adaptive retrieval methods in real-time scenarios.
43
+
44
+ # B. Hybrid Retrieval
45
+
46
+ Hybrid retrieval combines traditional retrieval methods, such as TF-IDF scoring, with semantic embeddings generated by
47
+
48
+ TABLEI COMPARISON OF KEY RETRIEVAL STRATEGIES.
49
+
50
+ <table><tr><td>Retrieval methods</td><td>Training strategies</td><td>Applicable network types</td><td>User demands</td><td>Agentic AI applications</td><td>Application examples</td></tr><tr><td>Traditional Information retrieval</td><td>●Based on explicit keyword matching or Boolean logic.</td><td>●Works well in relatively static networks or environments [6].</td><td>●Focused on delivering relevant results based on exact keyword matches</td><td>●Limited agent-based applications but can be used in simple chatbot systems.</td><td>●Elasticsearch (https://github.com/elastic/elasticsearch)●Apache Lucene (https://github.com/apache/lucene)</td></tr><tr><td>Hybrid retrieval</td><td>●Combines traditional keyword-based retrieval and machine learning models (e.g., TF-IDF and BERT).</td><td>●Works well in dynamic environments where content is constantly changing, and user preferences need to be understood [7].</td><td>●Users demand a more refined search experience where results are also tailored to personal preferences.</td><td>●Chatbots and recommendation systems use hybrid retrieval to suggest products, content, or responses.</td><td>●Recommendation System (https://github.com/lyst/lightfm)●Nixisearch (https://github.com/nixisearch/nixi research)</td></tr><tr><td>Semantic retrieval</td><td>●Uses deep learning (e.g., word embeddings like Word2Vec and BERT, etc.) to understand the meaning behind the query and the documents.</td><td>●Works well in environments where understanding context is important [8].</td><td>●Users demand results that understand the intent behind their queries rather than just keyword matches.</td><td>●Widely used in AI agents like virtual assistants (Google Assistant, Siri), or knowledge-based agents.</td><td>●Semantic Search Engine (https://github.com/deepset-al/haystack)●Txtai (https://github.com/neuml/txtai)</td></tr><tr><td>Knowledge-based retrieval</td><td>●Uses rule-based approaches and inference engines to retrieve relevant information based on predefined knowledge structures.</td><td>●Primarily used in static or semi-static networks, where domain knowledge remains relatively constant but is highly structured [9].</td><td>●Users expect highly accurate, factual, and structured information based on established knowledge.</td><td>●AI agents can act as expert consultants in areas like healthcare (e.g., IBM Watson) or legal systems.</td><td>●Knowledge Graph Search (https://github.com/neoj4/neoj4)●SciTDLR (https://github.com/allena/scitdlr)</td></tr><tr><td>Agentic contextual retrieval</td><td>●Methods like Reinforcement Learning (RLHF) for adaptive retrieval or meta-learning for fast adaption.</td><td>●Works well in multi-agent and dynamic environments where context is constantly evolving [10].</td><td>●Users expect adaptive and personalized retrieval based on evolving queries.</td><td>●Used in autonomous AI assistants (ChatGPT Agents, Claude, Google Gemini).</td><td>●AI-Powered Coding Assistants (https://github.com/features/copilot)●ModelScope-Agent (https://github.com/modelscope/odelscope-agent)</td></tr></table>
51
+
52
+ pre-trained deep learning models such as BERT or GPT. This hybrid approach addresses the limitations of traditional methods by incorporating contextual understanding while maintaining computational efficiency. In hybrid retrieval, the process typically contains two stages: a coarse filtering stage, which uses lightweight traditional methods to identify a subset of candidate documents, followed by a re-ranking stage where semantic embeddings are applied to refine results. This two-stage approach ensures that hybrid retrieval is both efficient and accurate, making it particularly suitable for environments where computational resources are limited but semantic depth is required. In networking applications, hybrid retrieval can be particularly useful for AI-driven network monitoring and anomaly detection, where efficient pre-filtering combined with deep learning enables fast yet context-aware decision-making. For example, Zeng et al. [8] proposed a federated hybrid retrieval framework designed to integrate traditional TF-IDF filtering with semantic re-ranking using BERT embeddings. Their system processed candidate documents in two stages: first, TF-IDF was used to rapidly filter out irrelevant data at mobile edge nodes, significantly reducing the search space; second, the filtered candidates were semantically ranked using embeddings. Experimental results showed that this approach improved retrieval precision by $25\%$ and reduced computational latency by $20\%$ compared to other classical retrieval systems.
53
+
54
+ # C. Semantic Retrieval
55
+
56
+ Semantic retrieval uses deep neural networks, particularly transformer-based architectures such as BERT, to encode queries and documents into a shared embedding space. This embedding space captures the semantic relationships between terms, enabling the retrieval system to understand the intent behind the query rather than relying solely on exact keyword matches. Semantic retrieval excels in handling complex queries that involve ambiguous or domain-specific language, such as medical diagnostics and network troubleshooting. For example, Tang et al. [9] proposed a semantic retrieval
57
+
58
+ framework leveraging BERT-based embeddings to optimize resource allocation in wireless networks. By encoding queries and documents into a shared semantic space, the system retrieved contextually related documents even for complex queries such as "dynamic spectrum sharing in 5G". Their experiments demonstrated a $32\%$ increase in recall compared to hybrid retrieval methods and an $18\%$ improvement in precision.
59
+
60
+ # D. Knowledge-Based Retrieval
61
+
62
+ Knowledge-based retrieval integrates domain-specific ontologies and structured knowledge graphs to enhance retrieval performance. These systems excel in reasoning tasks by explicitly leveraging predefined relationships between entities, providing interpretable results that are often critical in regulated domains such as healthcare, finance, and telecommunications. In knowledge-based retrieval, it is performed by querying the knowledge graph to extract entities and their relationships that match the query context. This method allows for reasoning over linked data, enabling the retrieval of not just relevant documents but also actionable insights based on the relationships in the dataset. For example, Xiong et al. [10] proposed a knowledge graph-based retrieval system for wireless spectrum management. Their framework utilized a graph structure where nodes represented entities such as "spectrum bands," "user demands," and "interference levels," while edges captured relationships such as "interferes with" or "assigned to." The key advantage of this approach lies in its ability to provide structured, explainable decisions based on predefined rules. The system achieved a $25\%$ improvement in spectrum allocation efficiency and a $30\%$ reduction in interference conflicts compared to heuristic-based methods.
63
+
64
+ # E. Agentic Contextual Retrieval
65
+
66
+ Agentic contextual retrieval leverages intelligent agent-based control mechanisms to dynamically adjust retrieval strategies based on task-specific requirements, multimodal
67
+
68
+ ![](images/a4c32f6074e92f6d7720240033cd20dea9f3dcb36b54fa4862d32ebae0bd4673.jpg)
69
+ Fig. 2. A summary of recent retrieval methods in communications and networking, which provides an overview of various proposals, research scenarios, and levels of human-AI interaction.
70
+
71
+ data integration, and real-time environmental changes. Unlike traditional or semantic retrieval methods, which rely on static queries and predefined indexing, this approach enables adaptive, goal-driven information extraction that continuously refines itself based on evolving conditions. By incorporating real-time system states, historical patterns, and structured knowledge representations, agentic contextual retrieval ensures high adaptability and context-aware decision-making, making it particularly suited for applications in network optimization, autonomous systems, and intelligent fault diagnostics. A key advantage of agentic contextual retrieval is its ability to enable autonomous decision-making agents that actively monitor, retrieve, and reason over multiple data sources to enhance performance in complex, dynamic environments. For example, Kagaya et al. [11] proposed a retrieval framework for autonomous driving, where an agent-driven control mechanism integrated LiDAR, GPS, real-time traffic updates, and weather conditions to dynamically adjust navigation strategies. By enabling real-time, intelligent retrieval and control, their system reduced recalibration time by $40\%$ and improved navigation accuracy by $28\%$ .
72
+
73
+ # F. Retrieval Comparison and Lessons Learned
74
+
75
+ Retrieval methods vary significantly in their methodologies, applications, and suitability for different networking scenarios. Specifically, traditional retrieval, which relies on explicit keyword matching, is well-suited for static local network management, where queries are simple, computational resources are limited, and speed is prioritized. Hybrid retrieval combines keyword-based search with machine learning models, making it effective for dynamic network environments, such as adaptive caching or content distribution, where user preferences evolve over time. Semantic retrieval, powered by deep learning models, enhances intent-driven network diagnostics by capturing query context, making it particularly useful for automated fault detection and troubleshooting in telecom networks. Knowledge-based retrieval, leveraging structured inference models, supports rule-based network security and access control, where highly accurate, structured decision-making is critical. Finally, agentic contextual retrieval offers
76
+
77
+ adaptive and real-time decision support in multi-agent network control systems, where dynamic environmental factors, such as interference levels or traffic congestion, require continuous learning and adjustment [11]. Table I summarizes these strategies, highlighting their core features, training methods, and example applications.
78
+
79
+ Moreover, we conduct a review of recent retrieval-based approaches in communications and networking from 2023 to late 2024, as summarized in Fig. 2. Our analysis categorizes retrieval strategies into traditional, hybrid, semantic, knowledge-based, and agentic contextual retrieval, highlighting their applications across various domains, including wireless communications, network optimization, and intelligent decision-making. While retrieval-augmented methods have been increasingly integrated into AI-driven network resource management and semantic communication, we observe that agentic contextual retrieval remains largely unexplored for telecommunications-specific applications. Moreover, while [11] demonstrates agentic contextual retrieval for autonomous driving control, there is currently no direct implementation tailored for communication networks and telecom infrastructure. To fill this gap, the next section introduces our proposed framework, which leverages agentic contextual retrieval to enhance intelligent decision-making, troubleshooting, and autonomous adaptation in telecommunications and networking systems.
80
+
81
+ # III. CASE STUDY: AGENTIC CONTEXTUAL RETRIEVAL FOR NETWORKING
82
+
83
+ # A. Motivation
84
+
85
+ In next-generation communications and networking, efficient resource allocation, adaptive service provisioning, and intelligent decision-making are crucial for optimizing user experience and network efficiency. Modern communication systems are shifting towards intent-driven networking, where mobile users express high-level requirements in natural language, and the network autonomously interprets and executes these requests. However, this paradigm introduces significant challenges in bridging the gap between user intents, structured communication standards, and real-time network
86
+
87
+ ![](images/63738ac423ae7c446522285894ff5eb8bbe4643fcdee658f422399b94b8625e3.jpg)
88
+ Fig. 3. Illustration of the agentic contextual retrieval enhanced intelligent base station for troubleshooting and decision-making. The framework follows a structured four-step workflow: (A) Query understanding and reformulation ensure alignment with 3GPP terminology using LLM-based query expansion. (B) Multi-source knowledge retrieval extracts relevant information from both structured (e.g., 3GPP standards) and unstructured (e.g., online sources) datasets. (C) Contextual evidence aggregation and reasoning synthesize retrieved knowledge into structured responses using chain-of-thought reasoning. (D) Decision-making and self-validation enhance accuracy through confidence-based verification and iterative refinement, reducing hallucinations and improving response consistency.
89
+
90
+ configurations. A key challenge lies in mapping natural language intent descriptions to actionable network configurations, requiring an understanding of both human semantics and telecommunications-specific knowledge. Traditional rule-based methods or static intent templates are insufficient in handling diverse user demands and evolving network conditions [4]. LLMs offer a promising solution due to their strong natural language understanding (NLU) and reasoning capabilities. However, LLMs lack domain-specific knowledge in telecommunications, such as 3GPP standards, intent translation templates, and network control logic. Consequently, their direct application to network automation remains limited by knowledge incompleteness, retrieval inefficiency, and contextual inconsistency.
91
+
92
+ To address these challenges, we propose a retrieval-enhanced intelligent base station architecture, where the network dynamically retrieves, synthesizes, and applies knowledge from 3GPP standards, network logs, and external telecom repositories to enhance decision-making. Specifically, the system employs a hybrid retrieval framework to convert user-generated intents into structured network actions, using a template-based approach that aligns with communication paradigms outlined in 3GPP [12]. In this framework, user requests (e.g., "I need ultra-low latency for cloud gaming") are processed by the network's AI module, which retrieves relevant telecom policies and configurations before generating a customized communication plan. Despite the advantages of retrieval-augmented LLMs, conventional retrieval-augmented generation (RAG) techniques face critical limitations in telecom-specific applications, including: (i) Contextual Ambiguity: Simple keyword-based retrieval struggles to retrieve relevant 3GPP policies and network parameters, as user intents often involve multiple layers of contextual interpretation. (ii) Data Sparsity: Telecommunications standards and policy documents are highly structured, yet spread across multiple releases and fragmented into different standardization documents. (iii) Retrieval Inefficiency: Traditional retrieval
93
+
94
+ approaches lack multi-hop reasoning, failing to link user intents with both historical network behavior and real-time conditions.
95
+
96
+ To overcome these limitations, we introduce an agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-reflective validation to enhance intent-driven networking. Our framework enables intelligent base stations to map user intents to network configurations in real-time, leveraging LLM-powered decision-making while ensuring alignment with 3GPP compliance, traffic optimization strategies, and real-world deployment policies.
97
+
98
+ # B. Agentic Contextual Retrieval Framework
99
+
100
+ As shown in Fig. 3, the deployment of the agentic contextual retrieval framework follows a structured four-step workflow, designed to enhance the retrieval, reasoning, and validation of knowledge specific to 3GPP standards and telecommunications networks.
101
+
102
+ 1) Knowledge Preparation and Query Understanding: The system first loads 3GPP standards and network documentation from a database, segments them into context-aware knowledge chunks, and vectorizes them using sentence-transformer embeddings. To enable efficient semantic retrieval, the vectorized knowledge chunks are indexed using a vector database, allowing for efficient similarity searches. After that, once a query is received, the system analyzes user intent and performs query reformulation, ensuring that the query aligns with 3GPP-defined communication paradigms and technical configurations. In practice, telecommunications queries often contain ambiguous terms, incomplete phrasing, or require historical cross-referencing across multiple 3GPP releases. Therefore, it is necessary to fully understand the user intent and the key concepts in this context to improve retrieval accuracy. Specifically, we can use LLMs to realize that and ensure longitudinal consistency when retrieving regulatory and technical specifications [13]. In our experimental setup, user
103
+
104
+ ![](images/aa7c8b674ef129d6247e35c9bddd339de200c51297552f4109ce9c561348d55d.jpg)
105
+ Fig. 4. Performance comparison of Agentic Contextual Retrieval against baseline methods, including QWen-Max without retriever, traditional retrieval, and semantic retrieval.
106
+
107
+ intent queries, such as customized communication service requests (e.g., "I need ultra-reliable low-latency communication for industrial automation"), are first parsed and the key concepts such as "ultra-reliable low-latency", "role of URLLC in industrial automation" are extracted.
108
+
109
+ 2) Multi-Source Knowledge Retrieval: Following query optimization, the second step involves multi-source retrieval to ensure both completeness and relevance in decision-making for network configuration and policy enforcement. Next, we integrate semantic vector-based retrieval with embedding models to extract key information from 3GPP specifications, network operation policies, and real-time telecom deployment scenarios. Embedding models generate dense vector representations of text, enabling context-aware similarity search rather than relying on exact keyword matches [14]. To further improve accuracy, structured knowledge representations establish relationships between frequency bands, protocol parameters, and QoS metrics, refining query precision. Additionally, real-time retrieval from online repositories ensures access to the latest standardization updates. For instance, when retrieving information on "5G network slicing SLA guarantees," the system uses an embedding model to identify semantically relevant sections from TS 28.531 (Performance Assurance) and TS 28.554 (KPI Definitions) while incorporating recent case studies from network operators.
110
+
111
+ 3) Contextual Evidence Aggregation and Reasoning: Once relevant information is retrieved, the third step focuses on contextual evidence aggregation and reasoning, where multi-source knowledge is condensed into a structured and interpretable response. Given the vast amount of information available in telecom standardization, it is crucial to eliminate redundancy, enhance clarity, and ensure that the extracted content directly addresses the query [13]. Specifically, we use an LLM-powered reasoning agent, which autonomously identifies the most relevant text segments in the retrieved content based on the reformulated query. The agent then synthesizes these segments into a concise, context-aware summary, ensuring that only the most important evidence is retained, and irrelevant or redundant information is discarded. For example, in response to a question like "What is the role of the serving network in fraud control?", the retrieved information may contain detailed descriptions of charging functions, fraud detection, and policy enforcement. Instead of presenting all these details, the agent
112
+
113
+ analyzes the content, extracts the core function of the serving network in fraud prevention, and generates a concise summary, emphasizing its role in real-time data collection and cost control.
114
+
115
+ 4) Decision-Making and Self-Validation: The final step involves a decision-making agent that simultaneously generates both the network action recommendations and justifications based on the optimized query and refined retrieval results. This agent applies CoT reasoning to synthesize a structured response, ensuring that the explanation logically supports the answer by drawing from the retrieved evidence [15]. To enhance reliability, a self-reflection agent evaluates the generated response, critically reviewing both the answer and explanation for consistency, factual accuracy, and alignment with authoritative 3GPP standards. If inconsistencies, incomplete reasoning, or speculative conclusions are detected, the self-reflection agent challenges the response and triggers an iterative refinement loop.
116
+
117
+ # C. Simulation
118
+
119
+ Simulation Settings: Our simulation is conducted using a structured retrieval and reasoning pipeline, integrating multiple knowledge sources and agent-driven query optimization. We employ Qwen2.5-Max as the base LLM, leveraging its advanced reasoning capabilities for telecom-related question-answering tasks. To evaluate retrieval performance, we selected 50 structured QA pairs related to 3GPP R18 from the TeleQnA dataset, which serves as the primary benchmark. For additional technical context, we use the 3GPP R18 dataset. To ensure retrieval efficiency, we utilize FAISS, an indexing tool optimized for high-speed vector similarity search. The document processing workflow involves segmenting 3GPP standard documents into 1000-character chunks with a 100-character overlap, followed by embedding generation using Mpnet-base-V2 $^{8}$ , a transformer-based model trained for dense vector representations. To evaluate the effectiveness of the proposed Agentic contextual retrieval framework, we compare its performance against three baselines: (i) Qwen-Max
120
+
121
+ 5https://huggingface.co/spaces/Qwen/Qwen2.5-Max-Demo
122
+ $^{6}$ https://huggingface.co/datasets/netop/3GPP-R18
123
+ <sup>7</sup>https://github.com/facebookresearch/faiss
124
+ <sup>8</sup>https://huggingface.co/sentence-transformers/all-mpnet-base-v2
125
+
126
+ without Retriever, representing a pure LLM-based approach, (ii) Qwen-Max with Traditional Retriever, utilizing standard retrieval-based augmentation, and (iii) Qwen-Max with Semantic Retriever, incorporating semantic embedding-based retrieval. The comparison is conducted across four key evaluation metrics, i.e., Answer Matching Accuracy, Answer Text F1 Score, Explanation BERT Score, and Explanation Cosine Similarity, as shown in Fig. 4.
127
+
128
+ Fig. 4 demonstrates that Agentic contextual retrieval consistently outperforms all baseline methods across all evaluation metrics. In particular, the proposed framework achieves an answer matching accuracy of $84\%$ and an answer text F1 score of $90.37\%$ , surpassing the performance of semantic retrieval (i.e., $80\%$ ) and traditional retrieval (i.e., $74\%$ ), underscoring its effectiveness in generating precise and contextually relevant responses. This improvement is attributed to its dynamic multisource retrieval, which integrates structured 3GPP standards with external knowledge repositories, query reformulation mechanisms, ensuring alignment with telecom-specific terminology, and a structured reasoning pipeline, which employs CoT decision-making and self-validation loops to enhance logical consistency and factual accuracy. Moreover, unlike conventional retrieval methods that rely on static document matching, Agentic contextual retrieval dynamically extracts, synthesizes, and validates multi-hop contextual information, significantly enhancing retrieval precision and response coherence. Furthermore, the explanation quality also benefits significantly from our approach, as evidenced by the Explanation BERT Score (i.e., $90.95\%$ ) and Cosine Similarity (i.e., $80.83\%$ ), both of which outperform alternative retrieval methods. These improvements stem from the framework's ability to synthesize multi-source knowledge, apply structured reasoning, and iteratively refine responses through self-reflection mechanisms. In contrast, the semantic retrieval baseline, while effective at contextual retrieval, lacks robust reasoning capabilities and multi-turn validation, limiting its ability to handle complex telecom-specific queries.
129
+
130
+ # IV. FUTURE DIRECTIONS
131
+
132
+ Security and Privacy in Retrieval-Augmented Networks: As agentic contextual retrieval frameworks increasingly rely on multi-source knowledge retrieval, ensuring data integrity, confidentiality, and adversarial robustness is critical. Future research should explore privacy-preserving retrieval techniques, such as federated retrieval, secure multi-party computation, and differential privacy-enhanced retrieval models, to mitigate risks associated with unauthorized data access and adversarial attacks in wireless and networking applications.
133
+
134
+ Energy-Efficient and Low-Latency Retrieval Networking Architectures: Deploying LLM-driven agentic contextual retrieval frameworks in real-world wireless and networking environments requires optimized inference efficiency and low-latency retrieval mechanisms. Future studies could investigate mobile device-aware retrieval strategies, knowledge distillation for lightweight retrieval models, and edge-based retrieval deployment to minimize computational overhead while maintaining retrieval accuracy in resource-constrained environments, such as 5G edge nodes and IoT devices.
135
+
136
+ Network-Aware Adaptive Retrieval for Real-Time Optimization: As telecom networks become increasingly complex and dynamic, retrieval systems must not only process knowledge efficiently but also adapt to real-time network conditions, congestion levels, and QoS constraints. Future research should explore network-aware retrieval architectures that dynamically adjust retrieval latency, query granularity, and resource allocation based on real-time network traffic and topology changes. Techniques such as reinforcement learning-based retrieval scheduling, adaptive caching, and traffic-aware retrieval pipelines could significantly enhance the responsiveness and efficiency in networking environments.
137
+
138
+ # V. CONCLUSION
139
+
140
+ We have presented a forward-looking perspective on generative information retrieval-inspired intelligent communications and networking, emphasizing the role of retrieval in enhancing agentic AI for telecom systems. We have provided a comprehensive review of retrieval strategies. Additionally, we have reviewed recent retrieval-based studies in communications and networking. Then, we have introduced an LLM-based agentic contextual retrieval framework, which integrates multi-source knowledge retrieval, structured reasoning, and self-validation.
141
+
142
+ # REFERENCES
143
+
144
+ [1] A. Maatouk, N. Piovesan et al., "Large language models for telecom: Forthcoming impact on the industry," IEEE Commun. Mag., vol. 63, no. 1, pp. 62-68, 2025.
145
+ [2] S. Sivakumar, "Agentic AI in predictive AIOps: Enhancing IT autonomy and performance," IJSRM, vol. 12, no. 11, pp. 1631-1638, 2024.
146
+ [3] R. Zhang et al., "Generative AI agents with large language model for satellite networks via a mixture of experts transmission," IEEE J. Sel. Area. Comm., vol. 42, no. 12, pp. 3581-3596, 2024.
147
+ [4] K. Dev, S. A. Khowaja, E. Zeydan, and M. Debbah, “Advanced architectures integrated with agentic AI for next-generation wireless networks,” arXiv preprint arXiv:2502.01089, 2025.
148
+ [5] A. Singh et al., "Agentic retrieval-augmented generation: A survey on agentic RAG," arXiv preprint arXiv:2501.09136, 2025.
149
+ [6] S. Anupam, A. Shypula, and O. Bastani, "LLM program optimization via retrieval augmented search," arXiv preprint arXiv:2501.18916, 2025.
150
+ [7] R. Zhang, H. Du, Y. Liu et al., "Interactive AI with retrieval-augmented generation for next generation networking," IEEE Network, vol. 38, no. 6, pp. 414-424, 2024.
151
+ [8] H. Zeng et al., "Federated recommendation via hybrid retrieval augmented generation," arXiv preprint arXiv:2403.04256, 2024.
152
+ [9] S. Tang et al., “Retrieval-augmented generation for GenAI-enabled semantic communications,” arXiv preprint arXiv:2412.19494, 2024.
153
+ [10] Y. Xiong et al., "When graph meets retrieval augmented generation for wireless networks: A tutorial and case study," arXiv preprint arXiv:2412.07189, 2024.
154
+ [11] T. Kagaya et al., "RAP: Retrieval-augmented planning with contextual memory for multimodal LLM agents," arXiv preprint arXiv:2402.03610, 2024.
155
+ [12] A. Maatouk et al., "Teleqna: A benchmark dataset to assess large language models telecommunications knowledge," arXiv preprint arXiv:2310.15051, 2023.
156
+ [13] X. Li, G. Dong, J. Jin, Y. Zhang, Y. Zhou, Y. Zhu, P. Zhang, and Z. Dou, "Search-o1: Agentic search-enhanced large reasoning models," arXiv preprint arXiv:2501.05366, 2025.
157
+ [14] S. Shankar, T. Chambers, T. Shah, A. G. Parameswaran, and E. Wu, "Docetl: Agentic query rewriting and evaluation for complex document processing," arXiv preprint arXiv:2410.12189, 2024.
158
+ [15] F. Ayed, A. Maatouk, N. Piovesan, A. De Domenico, M. Debbah, and Z.-Q. Luo, “Hermes: A large language model framework on the journey to autonomous networks,” arXiv preprint arXiv:2411.06490, 2024.
2502.16xxx/2502.16866/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:920e7c996067eee0eae215a915d22080522f13fee1285f7291218256760f778e
3
+ size 630141
2502.16xxx/2502.16866/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16923/7f32aa61-7357-4e3f-9d73-7092bde8c54f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d74ab43d8b3c15a54915ff4c03df4362b403f2afc37436ad932d18c9906f5a0
3
+ size 661580
2502.16xxx/2502.16923/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16923/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa8508ac1b6cd5b727a6f0fa90e026b7ce230c6a6e5e774366b15d06da38ce5
3
+ size 2982929
2502.16xxx/2502.16923/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.16xxx/2502.16932/d7ef2337-f096-458d-8785-a2073415facb_content_list.json ADDED
The diff for this file is too large to render. See raw diff