SlowGuess commited on
Commit
4d31c8f
·
verified ·
1 Parent(s): 34b86d5

Add Batch 9469f9ed-907c-412c-8ffb-20a12e048e3c

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_content_list.json +1836 -0
  3. 2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_model.json +0 -0
  4. 2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_origin.pdf +3 -0
  5. 2402.18xxx/2402.18150/full.md +371 -0
  6. 2402.18xxx/2402.18150/images.zip +3 -0
  7. 2402.18xxx/2402.18150/layout.json +0 -0
  8. 2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_content_list.json +0 -0
  9. 2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_model.json +0 -0
  10. 2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_origin.pdf +3 -0
  11. 2402.18xxx/2402.18153/full.md +0 -0
  12. 2402.18xxx/2402.18153/images.zip +3 -0
  13. 2402.18xxx/2402.18153/layout.json +0 -0
  14. 2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_content_list.json +0 -0
  15. 2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_model.json +0 -0
  16. 2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_origin.pdf +3 -0
  17. 2402.18xxx/2402.18154/full.md +571 -0
  18. 2402.18xxx/2402.18154/images.zip +3 -0
  19. 2402.18xxx/2402.18154/layout.json +0 -0
  20. 2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_content_list.json +0 -0
  21. 2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_model.json +0 -0
  22. 2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_origin.pdf +3 -0
  23. 2402.18xxx/2402.18158/full.md +0 -0
  24. 2402.18xxx/2402.18158/images.zip +3 -0
  25. 2402.18xxx/2402.18158/layout.json +0 -0
  26. 2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_content_list.json +0 -0
  27. 2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_model.json +0 -0
  28. 2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_origin.pdf +3 -0
  29. 2402.18xxx/2402.18191/full.md +453 -0
  30. 2402.18xxx/2402.18191/images.zip +3 -0
  31. 2402.18xxx/2402.18191/layout.json +0 -0
  32. 2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_content_list.json +1490 -0
  33. 2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_model.json +2173 -0
  34. 2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_origin.pdf +3 -0
  35. 2402.18xxx/2402.18205/full.md +261 -0
  36. 2402.18xxx/2402.18205/images.zip +3 -0
  37. 2402.18xxx/2402.18205/layout.json +0 -0
  38. 2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_content_list.json +0 -0
  39. 2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_model.json +0 -0
  40. 2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_origin.pdf +3 -0
  41. 2402.18xxx/2402.18206/full.md +462 -0
  42. 2402.18xxx/2402.18206/images.zip +3 -0
  43. 2402.18xxx/2402.18206/layout.json +0 -0
  44. 2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_content_list.json +0 -0
  45. 2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_model.json +0 -0
  46. 2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_origin.pdf +3 -0
  47. 2402.18xxx/2402.18225/full.md +724 -0
  48. 2402.18xxx/2402.18225/images.zip +3 -0
  49. 2402.18xxx/2402.18225/layout.json +0 -0
  50. 2402.18xxx/2402.18240/c7ca7793-42ba-4590-a6fe-4ef50e44f9dc_content_list.json +2146 -0
.gitattributes CHANGED
@@ -8599,3 +8599,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
8599
  2403.00xxx/2403.00867/cc97b085-0e47-43a6-a582-1fb629c796d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
8600
  2403.00xxx/2403.00871/16db5b6f-858d-49d0-bfa2-02180ae0dc47_origin.pdf filter=lfs diff=lfs merge=lfs -text
8601
  2405.00xxx/2405.00031/c388039e-5087-4763-a0b5-7738dd7c3acf_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8599
  2403.00xxx/2403.00867/cc97b085-0e47-43a6-a582-1fb629c796d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
8600
  2403.00xxx/2403.00871/16db5b6f-858d-49d0-bfa2-02180ae0dc47_origin.pdf filter=lfs diff=lfs merge=lfs -text
8601
  2405.00xxx/2405.00031/c388039e-5087-4763-a0b5-7738dd7c3acf_origin.pdf filter=lfs diff=lfs merge=lfs -text
8602
+ 2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_origin.pdf filter=lfs diff=lfs merge=lfs -text
8603
+ 2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_origin.pdf filter=lfs diff=lfs merge=lfs -text
8604
+ 2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8605
+ 2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_origin.pdf filter=lfs diff=lfs merge=lfs -text
8606
+ 2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_origin.pdf filter=lfs diff=lfs merge=lfs -text
8607
+ 2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8608
+ 2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_origin.pdf filter=lfs diff=lfs merge=lfs -text
8609
+ 2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_origin.pdf filter=lfs diff=lfs merge=lfs -text
8610
+ 2402.18xxx/2402.18240/c7ca7793-42ba-4590-a6fe-4ef50e44f9dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
8611
+ 2402.18xxx/2402.18243/c99a0bfa-6dcf-415b-a80b-9c1d8f93809c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8612
+ 2402.18xxx/2402.18272/d33083a8-499c-4a07-9a27-0e64c222d246_origin.pdf filter=lfs diff=lfs merge=lfs -text
8613
+ 2402.18xxx/2402.18294/5cfe7c85-6d74-4b1b-907b-98cff1b81cec_origin.pdf filter=lfs diff=lfs merge=lfs -text
8614
+ 2402.18xxx/2402.18312/169effb0-135f-464f-95aa-c638a22bd86d_origin.pdf filter=lfs diff=lfs merge=lfs -text
8615
+ 2402.18xxx/2402.18334/7bf7e9dc-7f6d-4f9c-ba80-70f19812d352_origin.pdf filter=lfs diff=lfs merge=lfs -text
8616
+ 2402.18xxx/2402.18376/1edf6043-623d-4023-9438-a769fde357f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
8617
+ 2402.18xxx/2402.18377/72ef9219-7390-4030-ae2d-044e78575609_origin.pdf filter=lfs diff=lfs merge=lfs -text
8618
+ 2402.18xxx/2402.18381/c045380c-d60a-4fbe-86d1-bf14d4b18b46_origin.pdf filter=lfs diff=lfs merge=lfs -text
8619
+ 2402.18xxx/2402.18387/e09294b9-0017-4324-bf0b-20fcf283d8b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
8620
+ 2402.18xxx/2402.18396/50c9dbd9-4c44-47c6-a330-2b5252f312c6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8621
+ 2402.18xxx/2402.18447/37f38d8f-30c4-4155-b560-6fb241217783_origin.pdf filter=lfs diff=lfs merge=lfs -text
8622
+ 2402.18xxx/2402.18467/bb70aba3-4b43-455a-8939-86e2c10e93a9_origin.pdf filter=lfs diff=lfs merge=lfs -text
8623
+ 2402.18xxx/2402.18476/f7fc8cd7-a848-4b52-98a7-c021bf1ec20b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8624
+ 2402.18xxx/2402.18485/7855ecca-a9f4-44f6-b70c-a879b89552d2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8625
+ 2402.18xxx/2402.18491/7b3f97eb-a67f-4899-914c-ddc0e9763dfd_origin.pdf filter=lfs diff=lfs merge=lfs -text
8626
+ 2402.18xxx/2402.18510/55283981-5ba1-412c-97ea-951cfa2acb15_origin.pdf filter=lfs diff=lfs merge=lfs -text
8627
+ 2402.18xxx/2402.18528/beb85b90-c0e5-4ca6-9753-f376743ea3f2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8628
+ 2402.18xxx/2402.18540/c072ea7d-0faf-4c9b-9d79-a35b17dcb8cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
8629
+ 2402.18xxx/2402.18563/ad5af5ea-3d58-4cdb-b093-70c817ec0ac6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8630
+ 2402.18xxx/2402.18567/ae131d91-350f-4efa-a1ab-212fd8b34906_origin.pdf filter=lfs diff=lfs merge=lfs -text
8631
+ 2402.18xxx/2402.18571/e1dd3051-d5cc-4559-9006-9227a1d838f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
8632
+ 2402.18xxx/2402.18612/7af6ff00-6cd0-4661-9ac3-bd3ebb34ea2f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8633
+ 2402.18xxx/2402.18649/5e9ec3d5-a257-4bdf-9f5f-c1e9029b0f49_origin.pdf filter=lfs diff=lfs merge=lfs -text
8634
+ 2402.18xxx/2402.18659/671d14b9-0400-4d1c-a10c-9dc56922e9de_origin.pdf filter=lfs diff=lfs merge=lfs -text
8635
+ 2402.18xxx/2402.18667/f0c257da-36cf-4421-a984-0f51d9d516e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
8636
+ 2402.18xxx/2402.18668/d64a0715-bf5f-4997-abfd-3697986ec9fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
8637
+ 2402.18xxx/2402.18679/c037255b-8c87-43dc-9e37-440eb87e4238_origin.pdf filter=lfs diff=lfs merge=lfs -text
8638
+ 2402.18xxx/2402.18700/6a69addd-e35e-450f-b897-de523f3d7d77_origin.pdf filter=lfs diff=lfs merge=lfs -text
8639
+ 2402.18xxx/2402.18743/7bf8ed93-2df8-42d8-9cc2-6383aa72bf8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8640
+ 2402.18xxx/2402.18747/ee69dae5-9409-40cd-b112-a5dd90262f85_origin.pdf filter=lfs diff=lfs merge=lfs -text
8641
+ 2402.18xxx/2402.18749/849e24c4-2046-4924-ac31-649cfe32f04c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8642
+ 2402.18xxx/2402.18754/8ba2d724-a036-4035-94ab-b54f42d97b2f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8643
+ 2402.18xxx/2402.18762/7773b332-0e2f-4a00-a916-12b1e45d08a3_origin.pdf filter=lfs diff=lfs merge=lfs -text
8644
+ 2402.18xxx/2402.18774/5c48a132-8071-410a-8edd-a91b42f9f230_origin.pdf filter=lfs diff=lfs merge=lfs -text
8645
+ 2402.18xxx/2402.18815/cab09b25-3bf9-42bb-bfc4-3530d59f18fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
8646
+ 2402.18xxx/2402.18817/6b2ae680-69ac-41ba-883c-4ce48d455624_origin.pdf filter=lfs diff=lfs merge=lfs -text
8647
+ 2402.18xxx/2402.18819/e831574e-aee6-4ceb-b6d4-2351883e2cff_origin.pdf filter=lfs diff=lfs merge=lfs -text
8648
+ 2402.18xxx/2402.18835/ebd22bb3-aaec-42e8-8687-2e391d489328_origin.pdf filter=lfs diff=lfs merge=lfs -text
8649
+ 2402.18xxx/2402.18847/69e64064-6fb5-444d-8f49-8f52d00f225a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8650
+ 2402.18xxx/2402.18848/2f941d90-ee08-4ca0-b6ff-9f5188802c13_origin.pdf filter=lfs diff=lfs merge=lfs -text
8651
+ 2402.18xxx/2402.18853/4e816515-123f-4752-854b-5098fc7b7d45_origin.pdf filter=lfs diff=lfs merge=lfs -text
8652
+ 2402.18xxx/2402.18865/775bcce8-8ef6-42ba-8208-78df829c4044_origin.pdf filter=lfs diff=lfs merge=lfs -text
8653
+ 2402.18xxx/2402.18909/e7fd424f-8e99-4f9c-9122-55d04ca199ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
8654
+ 2402.18xxx/2402.18913/b636e29f-0907-4052-a691-f0dc36c09a2e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8655
+ 2402.18xxx/2402.18944/3f26cb9b-3cac-4f33-a9a6-90113ea2986d_origin.pdf filter=lfs diff=lfs merge=lfs -text
8656
+ 2402.19xxx/2402.19004/f8c1bfd2-ca55-4d4c-bf08-715321aab46e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8657
+ 2402.19xxx/2402.19014/f51e74c5-0d58-4514-8dee-4dc75794a791_origin.pdf filter=lfs diff=lfs merge=lfs -text
8658
+ 2402.19xxx/2402.19043/22e26a90-6569-43d5-a84c-685ea909e9ae_origin.pdf filter=lfs diff=lfs merge=lfs -text
8659
+ 2402.19xxx/2402.19047/1092b505-c0be-41b8-a75d-885e8b69479b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8660
+ 2402.19xxx/2402.19072/6b69d9cd-846b-402d-a7ab-467c307c5596_origin.pdf filter=lfs diff=lfs merge=lfs -text
8661
+ 2403.00xxx/2403.00025/01b28bb0-ad7e-4e08-8dba-a42f0d91305b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8662
+ 2403.00xxx/2403.00041/32c69fdd-f240-4313-b403-4a78a7d3d9c6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8663
+ 2403.00xxx/2403.00835/c46f94c7-bb12-4e2e-a20d-7d6ecccbf201_origin.pdf filter=lfs diff=lfs merge=lfs -text
8664
+ 2403.00xxx/2403.00839/0bf6479c-94be-4ca1-aff8-6ce84d6daa62_origin.pdf filter=lfs diff=lfs merge=lfs -text
8665
+ 2403.05xxx/2403.05579/07bc0629-2b05-432d-9412-cd97cf1c3c14_origin.pdf filter=lfs diff=lfs merge=lfs -text
2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_content_list.json ADDED
@@ -0,0 +1,1836 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Unsupervised Information Refinement Training of Large Language Models for Retrieval-Augmented Generation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 115,
8
+ 79,
9
+ 880,
10
+ 118
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Shicheng Xu $^{1,2*}$ Liang Pang $^{1\\dagger}$ Mo Yu $^{3\\dagger}$ Fandong Meng $^{3}$ Huawei Shen $^{1}$ Xueqi Cheng $^{1}$ Jie Zhou $^{3}$",
17
+ "bbox": [
18
+ 166,
19
+ 124,
20
+ 828,
21
+ 158
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ CAS Key Laboratory of AI Safety, Institute of Computing Technology, CAS $^{2}$ University of Chinese Academy of Sciences $^{3}$ Pattern Recognition Center, WeChat AI",
28
+ "bbox": [
29
+ 188,
30
+ 159,
31
+ 813,
32
+ 208
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "{xushicheng21s,pangliang,shenhuawei,cxq}@ict.ac.cn",
39
+ "bbox": [
40
+ 310,
41
+ 211,
42
+ 690,
43
+ 224
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "moyumyu@global.tencent.com {fandongmeng,withtomzhou}@tencent.com",
50
+ "bbox": [
51
+ 250,
52
+ 228,
53
+ 752,
54
+ 241
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Abstract",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 260,
64
+ 260,
65
+ 339,
66
+ 275
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Retrieval-augmented generation (RAG) enhances large language models (LLMs) by incorporating additional information from retrieval. However, studies have shown that LLMs still face challenges in effectively using the retrieved information, even ignoring it or being misled by it. The key reason is that the training of LLMs does not clearly make LLMs learn how to utilize input retrieved texts with varied quality. In this paper, we propose a novel perspective that considers the role of LLMs in RAG as \"Information Refiner\", which means that regardless of correctness, completeness, or usefulness of retrieved texts, LLMs can consistently integrate knowledge within the retrieved texts and model parameters to generate the texts that are more concise, accurate, and complete than the retrieved texts. To this end, we propose an information refinement training method named INFO-RAG that optimizes LLMs for RAG in an unsupervised manner. INFO-RAG is low-cost and general across various tasks. Extensive experiments on zero-shot prediction of 11 datasets in diverse tasks including Question Answering, Slot-Filling, Language Modeling, Dialogue, and Code Generation show that INFO-RAG improves the performance of LLaMA2 by an average of $9.39\\%$ relative points. INFO-RAG also shows advantages in in-context learning and robustness of RAG.",
73
+ "bbox": [
74
+ 141,
75
+ 291,
76
+ 460,
77
+ 718
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "1 Introduction",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 114,
87
+ 732,
88
+ 258,
89
+ 747
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Retrieval-augmented generation (RAG) is a popular framework in modern NLP systems that equips neural with retrieved information for text generation like open-domain question answering, dialogue (Lewis et al., 2020; Guu et al., 2020) etc. Recently, RAG has been applied to large language models (LLMs) to provide additional knowledge",
96
+ "bbox": [
97
+ 112,
98
+ 759,
99
+ 489,
100
+ 873
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "image",
106
+ "img_path": "images/6f62f172aacff9a1c2d1f982b7395b0f00fb794514c65747538065b9076b4f48.jpg",
107
+ "image_caption": [
108
+ "nuee eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae",
109
+ "Figure 1: We consider the role of LLMs in RAG as \"Information Refiner\" that can generate more concise, accurate, and complete texts than the input retrieved texts. In this way, LLM can consistently make RAG system produce positive information gain."
110
+ ],
111
+ "image_footnote": [],
112
+ "bbox": [
113
+ 510,
114
+ 258,
115
+ 860,
116
+ 418
117
+ ],
118
+ "page_idx": 0
119
+ },
120
+ {
121
+ "type": "text",
122
+ "text": "and mitigate issues such as hallucination (Peng et al., 2023; Shi et al., 2023; Ren et al., 2023).",
123
+ "bbox": [
124
+ 507,
125
+ 532,
126
+ 880,
127
+ 564
128
+ ],
129
+ "page_idx": 0
130
+ },
131
+ {
132
+ "type": "text",
133
+ "text": "Despite the improved performance of retrieval models, the internet continues to be inundated with fake news, rumors, and fragmented, noisy information, posing challenges for retrieval models to reliably identify and shield against such content (Sun et al., 2022; Thakur et al., 2021). Consequently, not all retrieved texts are beneficial, necessitating that LLMs determine how to judiciously utilize them. However, pre-training tasks do not explicitly enable LLMs to learn how to utilize the retrieved texts with varied quality for generation. For a question and its retrieved texts as input sequence, RAG aims to minimize the negative log-likelihood (NLL) of sub-sequence (question and generated answer) by referring to the retrieved texts. However, mainstream pre-training for LLMs with decoder-only architecture is language modeling based on the prefix (Radford et al., 2018; Touvron et al., 2023a), the training objective aims to minimize the negative log-likelihood (NLL) of the entire input sequence (retrieved texts, question, and generated answer) (Mikolov et al., 2012). This gap causes",
134
+ "bbox": [
135
+ 507,
136
+ 568,
137
+ 884,
138
+ 921
139
+ ],
140
+ "page_idx": 0
141
+ },
142
+ {
143
+ "type": "aside_text",
144
+ "text": "arXiv:2402.18150v2 [cs.CL] 12 Jun 2024",
145
+ "bbox": [
146
+ 21,
147
+ 312,
148
+ 60,
149
+ 725
150
+ ],
151
+ "page_idx": 0
152
+ },
153
+ {
154
+ "type": "page_footnote",
155
+ "text": "*Work done during the Tencent Rhino-bird Research Elite Program at WeChat.",
156
+ "bbox": [
157
+ 112,
158
+ 881,
159
+ 487,
160
+ 906
161
+ ],
162
+ "page_idx": 0
163
+ },
164
+ {
165
+ "type": "page_footnote",
166
+ "text": "† Corresponding authors.",
167
+ "bbox": [
168
+ 142,
169
+ 906,
170
+ 294,
171
+ 920
172
+ ],
173
+ "page_idx": 0
174
+ },
175
+ {
176
+ "type": "text",
177
+ "text": "LLMs to only regard the input retrieved texts as a part of the prefix for language modeling rather than additional reference, which leads to the following problems. Firstly, for the long and complex retrieved texts, LLMs struggle to extract the correct answers (Deng et al., 2023) accurately. Secondly, in situations where the retrieved texts cannot address the task, LLMs lack the capability to integrate the knowledge within model parameters with the retrieved texts to generate improved texts. Thirdly, LLMs are susceptible to incorrect and noisy information in retrieved texts, posing a risk of being misled (Chen et al., 2023; Yoran et al., 2023).",
178
+ "bbox": [
179
+ 110,
180
+ 84,
181
+ 492,
182
+ 294
183
+ ],
184
+ "page_idx": 1
185
+ },
186
+ {
187
+ "type": "text",
188
+ "text": "To solve above problems, some previous methods explore strategies for how or when to perform retrieval for LLMs by prompt techniques (Press et al., 2023; Khattab et al., 2022; Xu et al., 2023; Asai et al., 2023). However, prompt cannot materially change the ability of LLMs to utilize retrieved texts because model parameters are not updated for this ability. Some methods fine-tune LLMs on the constructed RAG data for a specific task such as QA (Yoran et al., 2023; Yu et al., 2023). However, under the trend that LLMs are regarded as foundation models for various tasks in zero-shot setting, fine-tuning LLMs only on a few tasks make LLMs limited to the RAG of training tasks and lose their generalizability. Because catastrophic forgetting still exists in supervised fine-tuning of LLMs (Luo et al., 2023). Although constructing data for a large number of tasks can alleviate this, it is hard to design the data in various RAG tasks and requires high data annotation costs. Our paper aims to fundamentally improve the ability of LLMs to utilize retrieved texts while preserving the generalizability of LLMs for various RAG tasks in zero-shot setting, which is orthogonal to prompt techniques and can be combined with them to get better performance.",
189
+ "bbox": [
190
+ 110,
191
+ 294,
192
+ 492,
193
+ 696
194
+ ],
195
+ "page_idx": 1
196
+ },
197
+ {
198
+ "type": "text",
199
+ "text": "In this paper, considering that LLMs have a certain ability to use their own knowledge to examine information (Dhuliawala et al., 2023), we introduce a novel perspective to reassess the role of LLMs in RAG. Specifically, we propose considering LLMs as \"Information Refiner\". The key idea behind this is to continue training the pre-trained LLMs with an Information Refinement objective that regardless of the correctness, completeness, or usefulness of the input retrieved texts, LLMs can consistently integrate knowledge within the retrieved texts and model parameters to generate the texts that are more concise, accurate, and complete than the retrieved texts (Figure 1). We term this",
200
+ "bbox": [
201
+ 110,
202
+ 696,
203
+ 492,
204
+ 923
205
+ ],
206
+ "page_idx": 1
207
+ },
208
+ {
209
+ "type": "text",
210
+ "text": "process \"Positive Information Gain\". This enables LLMs to extract correct information from complex texts as well as resist and rectify retrieved erroneous information and noise, thereby improving the information bottleneck of the RAG and allowing the knowledge capacity of RAG to approximate the combined knowledge of IR and LLMs.",
211
+ "bbox": [
212
+ 507,
213
+ 84,
214
+ 885,
215
+ 197
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "We make the information refinement training work in a completely unsupervised manner, such that it is easy to obtain large-scale training data and maintain the generalizability of the trained LLMs that can be used in various RAG tasks in zero-shot setting. Specifically, we propose an unsupervised training method named INFO-RAG. INFO-RAG classifies the retrieved texts into three scenarios (shown in Figure 1) and proposes the unsupervised training task for each scenario. For the first scenario that all knowledge for the question is already in the retrieved texts, LLMs need to accurately extract relevant knowledge from complex retrieved texts and generate more concise texts. For the second scenario that retrieved texts are incomplete or incorrect for the question, LLMs need to combine the knowledge within model parameters to verify the retrieved texts, correct the wrong knowledge, and complete the missing knowledge. For the third scenario that retrieved texts are relevant but do not have any answer, LLMs need to find the knowledge within model parameters based on relevant context to generate correct answers. We mix the above three tasks to train INFO-RAG unsupervisedly.",
222
+ "bbox": [
223
+ 507,
224
+ 199,
225
+ 885,
226
+ 583
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "list",
232
+ "sub_type": "text",
233
+ "list_items": [
234
+ "Main contributions of this paper are as follows: (1) We introduce a novel perspective to reassess the role of LLMs in the RAG system that considers LLMs as \"Information Refiner\" that can produce positive information gain in RAG scenarios.",
235
+ "(2) We propose an unsupervised training method named INFO-RAG that enables LLMs to perform information refinement in RAG. INFO-RAG is low-cost and general for various RAG tasks.",
236
+ "(3) Extensive experiments show INFO-RAG enhances the zero-shot RAG of LLaMA2 across Question Answering, Slot-Filling, Language Modeling, Dialog, and Code Generation. INFO-RAG also shows advantages in in-context learning and robustness of RAG. Code is released at https://github.com/xsc1234/INFO-RAG/."
237
+ ],
238
+ "bbox": [
239
+ 507,
240
+ 586,
241
+ 885,
242
+ 845
243
+ ],
244
+ "page_idx": 1
245
+ },
246
+ {
247
+ "type": "text",
248
+ "text": "2 Related Work",
249
+ "text_level": 1,
250
+ "bbox": [
251
+ 507,
252
+ 860,
253
+ 665,
254
+ 876
255
+ ],
256
+ "page_idx": 1
257
+ },
258
+ {
259
+ "type": "text",
260
+ "text": "Retrieval Augmented Generation Retrieval augmented generation (RAG) aims to provide addi",
261
+ "bbox": [
262
+ 507,
263
+ 889,
264
+ 885,
265
+ 922
266
+ ],
267
+ "page_idx": 1
268
+ },
269
+ {
270
+ "type": "text",
271
+ "text": "tional knowledge for language models by retrieving information from external databases (Lewis et al., 2020; Guu et al., 2020; Borgeaud et al., 2022; Izacard et al., 2022). RAG makes the text generated by LLM more accurate and credible, and is widely used in Open-domain QA (Karpukhin et al., 2020; Trivedi et al., 2022a), dialogue (Cai et al., 2018, 2019) and Code Generation (Parvez et al., 2021). Recently, RAG has also been widely applied in LLMs (Peng et al., 2023; Shi et al., 2023; Ren et al., 2023). The form of RAG in LLMs is using the retrieved texts as contexts (Ram et al., 2023).",
272
+ "bbox": [
273
+ 115,
274
+ 84,
275
+ 485,
276
+ 275
277
+ ],
278
+ "page_idx": 2
279
+ },
280
+ {
281
+ "type": "text",
282
+ "text": "Some studies have noted that noise in retrieved texts will interfere with the performance of the language model or even mislead it (Xu et al., 2023; Wang et al., 2023; Chen et al., 2023; Xu et al., 2024). These works try to solve this problem from the interactive framework between IR and LM, while our work points out a more essential view. That is, previous studies on RAG do not define the role of LLMs in RAG clearly. Our paper introduces a novel perspective to reassess the role of LLMs in RAG that considers LLMs as \"Information Refiner\".",
283
+ "bbox": [
284
+ 115,
285
+ 278,
286
+ 485,
287
+ 468
288
+ ],
289
+ "page_idx": 2
290
+ },
291
+ {
292
+ "type": "text",
293
+ "text": "Unsupervised Learning of RAG Unsupervised learning of RAG can be divided into the training of retrievers and language models. As for retrievers, REALM (Guu et al., 2020) proposes using masked language modeling to pre-train a knowledge retriever. REPLUG (Shi et al., 2023) trains the retriever according to the feedback from black-box LM. As for language models, RETRO (Borgeaud et al., 2022) improves language models by retrieving tokens. Atlas proposes pretext tasks to jointly train the retriever and language model. However, these two methods focus on the model of encoder-decoder architecture, which is inconsistent with the current mainstream LLMs based on decoder-only.",
294
+ "bbox": [
295
+ 115,
296
+ 480,
297
+ 485,
298
+ 703
299
+ ],
300
+ "page_idx": 2
301
+ },
302
+ {
303
+ "type": "text",
304
+ "text": "Previous unsupervised training methods do not consider the specific role that language models should play in RAG. In this paper, we focus on training language model as an \"Information Refiner\" that can further improve the information bottleneck of RAG and be robust to retrieved texts.",
305
+ "bbox": [
306
+ 115,
307
+ 706,
308
+ 485,
309
+ 800
310
+ ],
311
+ "page_idx": 2
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "3 Our INFO-RAG",
316
+ "text_level": 1,
317
+ "bbox": [
318
+ 115,
319
+ 814,
320
+ 292,
321
+ 829
322
+ ],
323
+ "page_idx": 2
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "This section introduces our INFO-RAG, an unsupervised training method to enable LLMs to perform information refinement in RAG. Firstly, we summarize the retrieved texts in RAG into three scenarios and define the positive information gain",
328
+ "bbox": [
329
+ 115,
330
+ 841,
331
+ 485,
332
+ 919
333
+ ],
334
+ "page_idx": 2
335
+ },
336
+ {
337
+ "type": "text",
338
+ "text": "for each scenario. Secondly, we construct sample pairs in which the output has information gain compared to the input for these three scenarios and design three training tasks. Thirdly, we train LLMs under our designed tasks on the unsupervised samples. Unsupervised training makes INFO-RAG low-cost and general for RAG in various tasks.",
339
+ "bbox": [
340
+ 512,
341
+ 84,
342
+ 880,
343
+ 195
344
+ ],
345
+ "page_idx": 2
346
+ },
347
+ {
348
+ "type": "text",
349
+ "text": "3.1 Positive Information Gain in RAG",
350
+ "text_level": 1,
351
+ "bbox": [
352
+ 512,
353
+ 206,
354
+ 821,
355
+ 222
356
+ ],
357
+ "page_idx": 2
358
+ },
359
+ {
360
+ "type": "text",
361
+ "text": "In this paper, we introduce a novel perspective to reassess the role of LLMs in RAG that LLMs should be the \"Information Refiner\" that can produce \"Positive Information Gain\" in the information flow of RAG. This section details the scenarios of retrieved texts and defines specific information gain LLMs should produce in each scenario.",
362
+ "bbox": [
363
+ 512,
364
+ 229,
365
+ 880,
366
+ 338
367
+ ],
368
+ "page_idx": 2
369
+ },
370
+ {
371
+ "type": "text",
372
+ "text": "Scenario 1. The first scenario is that all knowledge for the question is already in the retrieved texts. Even if the correct knowledge already exists in the retrieved texts, complex and lengthy retrieved texts are not conducive for users to directly obtain the knowledge. Therefore, the positive information gain in this scenario means that LLMs extract correct knowledge as much as possible while removing irrelevant information, thereby generating more direct and concise texts for users.",
373
+ "bbox": [
374
+ 512,
375
+ 341,
376
+ 880,
377
+ 500
378
+ ],
379
+ "page_idx": 2
380
+ },
381
+ {
382
+ "type": "text",
383
+ "text": "Scenario 2. The second scenario is that although the retrieved texts contain some usable knowledge, they still contain some incomplete or incorrect knowledge. This scenario is very common, especially with the current proliferation of fake news, misinformation, and fragmented knowledge on the Internet. There has been study proving that noise and erroneous knowledge in retrieved texts greatly mislead the generation of LLMs (Xu et al., 2023). The positive information gain in this scenario is that LLMs can exploit the knowledge within their parameters to verify the knowledge in the retrieved texts. Utilize accurate knowledge, rectify incorrect knowledge, and complete missing knowledge",
384
+ "bbox": [
385
+ 512,
386
+ 502,
387
+ 880,
388
+ 727
389
+ ],
390
+ "page_idx": 2
391
+ },
392
+ {
393
+ "type": "text",
394
+ "text": "Scenario 3. The third scenario is that the retrieved texts do not have any answer that can be used to solve the question. This scenario means that the question is very difficult or the target knowledge is very long-tail for information retrieval systems. Even in this case, the retrieval model's ability to model semantics allows it to provide texts that are semantically related to the question (Karpukhin et al., 2020). Therefore, the positive information gain in this scenario is that LLMs can stimulate the knowledge within their parameters based on semantically relevant context to solve the question.",
395
+ "bbox": [
396
+ 512,
397
+ 728,
398
+ 880,
399
+ 919
400
+ ],
401
+ "page_idx": 2
402
+ },
403
+ {
404
+ "type": "image",
405
+ "img_path": "images/179a27e5d46bb133682c03cd9a5e1c035af9d24e41253d3eb5b4d7279d96937d.jpg",
406
+ "image_caption": [
407
+ "Figure 2: Overview of our INFO-RAG. Each sample is only processed for a single scenario to avoid data leakage."
408
+ ],
409
+ "image_footnote": [],
410
+ "bbox": [
411
+ 117,
412
+ 80,
413
+ 270,
414
+ 267
415
+ ],
416
+ "page_idx": 3
417
+ },
418
+ {
419
+ "type": "image",
420
+ "img_path": "images/c111599f5429af9a9f4678a128b14ce4afd9e9079f1486644b4f42367d017038.jpg",
421
+ "image_caption": [],
422
+ "image_footnote": [],
423
+ "bbox": [
424
+ 275,
425
+ 80,
426
+ 884,
427
+ 267
428
+ ],
429
+ "page_idx": 3
430
+ },
431
+ {
432
+ "type": "text",
433
+ "text": "3.2 Unsupervised Learning",
434
+ "text_level": 1,
435
+ "bbox": [
436
+ 112,
437
+ 317,
438
+ 346,
439
+ 332
440
+ ],
441
+ "page_idx": 3
442
+ },
443
+ {
444
+ "type": "text",
445
+ "text": "This section introduces unsupervised learning in INFO-RAG. We construct the input-output pairs that satisfy the information gain in the above three scenarios on Wikipedia. We continue to train pretrained LLMs on the constructed data to perform information refinement in the form of next token prediction in prefix language modeling, which is general for various tasks. Pipeline is in Figure 2.",
446
+ "bbox": [
447
+ 112,
448
+ 338,
449
+ 487,
450
+ 467
451
+ ],
452
+ "page_idx": 3
453
+ },
454
+ {
455
+ "type": "text",
456
+ "text": "3.2.1 Data Collection",
457
+ "text_level": 1,
458
+ "bbox": [
459
+ 112,
460
+ 476,
461
+ 297,
462
+ 491
463
+ ],
464
+ "page_idx": 3
465
+ },
466
+ {
467
+ "type": "text",
468
+ "text": "The data construction is performed on English Wikipedia. Specifically, for each document $d$ in Wikipedia, we intercept $k$ consecutive sentences from $d$ and get the sentence set $S = [s_1, s_2, \\dots, s_k]$ . Our method randomly selects $s_l$ from $S$ and uses it as the object for language modeling. The first $\\frac{1}{3}$ to $\\frac{2}{3}$ of the tokens of $s_l$ are randomly intercepted as the prefix $(s_l^p)$ and the other tokens of $s_l$ are used as the prediction target $(s_l^t)$ . We also perform the process (Section 3.2.2) on sentence set $S$ so that it can be used to simulate the retrieved texts $\\mathcal{R}(s_l^p)$ for prefix $s_l^p$ in three scenarios for conditioning the generation of $s_l^t$ . Then, we can get an unsupervised training sample for prefix language modeling that predicts $s_l^t$ given the prefix $s_l^p$ and the retrieved texts $\\mathcal{R}(s_l^p)$ . This can be formulated as:",
469
+ "bbox": [
470
+ 112,
471
+ 495,
472
+ 489,
473
+ 753
474
+ ],
475
+ "page_idx": 3
476
+ },
477
+ {
478
+ "type": "equation",
479
+ "text": "\n$$\np (s _ {l} ^ {t}) = p _ {\\theta} ([ \\mathcal {R} (s _ {l} ^ {p}); s _ {l} ^ {p} ]), \\qquad (1)\n$$\n",
480
+ "text_format": "latex",
481
+ "bbox": [
482
+ 206,
483
+ 765,
484
+ 487,
485
+ 785
486
+ ],
487
+ "page_idx": 3
488
+ },
489
+ {
490
+ "type": "text",
491
+ "text": "$\\theta$ are parameters of LLMs, $[\\mathcal{R}(s_l^p);s_l^p ]$ is the concatenation of $\\mathcal{R}(s_l^p)$ and $s_l^p$ by a special token.",
492
+ "bbox": [
493
+ 112,
494
+ 795,
495
+ 487,
496
+ 829
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "3.2.2 Data Construction and Training Tasks",
503
+ "text_level": 1,
504
+ "bbox": [
505
+ 112,
506
+ 837,
507
+ 477,
508
+ 853
509
+ ],
510
+ "page_idx": 3
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "This section details our data construction and training tasks for three scenarios in Section 3.1.",
515
+ "bbox": [
516
+ 112,
517
+ 856,
518
+ 487,
519
+ 888
520
+ ],
521
+ "page_idx": 3
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "For Scenario 1 that needs LLMs to extract the correct knowledge from the complex texts, we",
526
+ "bbox": [
527
+ 112,
528
+ 889,
529
+ 487,
530
+ 921
531
+ ],
532
+ "page_idx": 3
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "propose the training task named Select and Copy. Specifically, given the sentence set $S$ for a sample, Select and Copy directly uses all sentences in $S$ as retrieved texts for conditioning LLMs to predict $s_l^t$ for the given prefix $s_l^p$ . This can be formulated as:",
537
+ "bbox": [
538
+ 507,
539
+ 317,
540
+ 884,
541
+ 398
542
+ ],
543
+ "page_idx": 3
544
+ },
545
+ {
546
+ "type": "equation",
547
+ "text": "\n$$\np \\left(s _ {l} ^ {t}\\right) = p _ {\\theta} ([ S; s _ {l} ^ {p} ]). \\tag {2}\n$$\n",
548
+ "text_format": "latex",
549
+ "bbox": [
550
+ 618,
551
+ 411,
552
+ 882,
553
+ 430
554
+ ],
555
+ "page_idx": 3
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "In Select and Copy, $s_l$ (both $s_l^p$ and $s_l^t$ ) has been contained in the retrieved texts $S$ , this needs LLMs to select the texts matching the prefix $s_l^p$ from the complex retrieved texts $S$ and directly copy the target $s_l^t$ for generation. The information gain between $s_l^t$ and input retrieved texts $S$ is that $s_l^t$ is more concise to be used as the postfix for $s_l^p$ .",
560
+ "bbox": [
561
+ 507,
562
+ 442,
563
+ 882,
564
+ 556
565
+ ],
566
+ "page_idx": 3
567
+ },
568
+ {
569
+ "type": "text",
570
+ "text": "For Scenario 2 that needs LLMs to verify the knowledge in the retrieved texts, utilize accurate knowledge, rectify incorrect knowledge, and complete missing knowledge. We propose the training task named Correct and Complete. Given a sentence set $S$ , firstly, this task uses the stability of word distribution between layers to get informative tokens. The intention for this is that the more unstable the word distribution of the token among the topmost layers is, the more it indicates that the token is an informative token. We follow (Chuang et al., 2023) to achieve this. Specifically, for each sentence $s_i$ in $S$ , our method obtains the next word distribution of the $a$ -th token $s_i^{[a]}$ given prefix $s_i^{<a}$ of $s_i$ in each layer of LLM as:",
571
+ "bbox": [
572
+ 507,
573
+ 557,
574
+ 882,
575
+ 797
576
+ ],
577
+ "page_idx": 3
578
+ },
579
+ {
580
+ "type": "equation",
581
+ "text": "\n$$\nd _ {j} \\left(s _ {i} ^ {[ a ]} \\mid s _ {i} ^ {< a}\\right) = \\operatorname {s o f t m a x} \\left(\\mathbf {W H} _ {j} ^ {[ a ]}\\right), \\tag {3}\n$$\n",
582
+ "text_format": "latex",
583
+ "bbox": [
584
+ 571,
585
+ 810,
586
+ 882,
587
+ 834
588
+ ],
589
+ "page_idx": 3
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "in which $j$ indicates the $j$ -th layer of LLMs, $\\mathbf{H}_j^{[a]} \\in \\mathbb{R}^h$ is the hidden states for token $s_i^{[a]}$ in the $j$ -th layer, $\\mathbf{W} \\in \\mathbb{R}^{h \\times v}$ is the vocabulary head that maps the hidden states $\\mathbf{H}_j^{[a]}$ to the word distribution with",
594
+ "bbox": [
595
+ 507,
596
+ 847,
597
+ 882,
598
+ 925
599
+ ],
600
+ "page_idx": 3
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "vocabulary size $v$ . Then, for the LLM with $N$ layers, our method uses Jensen-Shannon Divergence (JSD) to measure the differences in word distribution between layers and gets the word distribution stability of token $s_{i}^{[a]}$ as:",
605
+ "bbox": [
606
+ 112,
607
+ 84,
608
+ 489,
609
+ 167
610
+ ],
611
+ "page_idx": 4
612
+ },
613
+ {
614
+ "type": "equation",
615
+ "text": "\n$$\nO _ {i} ^ {[ a ]} = \\max _ {j \\in J} \\mathrm {J S D} (d _ {N} (s _ {i} ^ {[ a ]} | s _ {i} ^ {< a}) | | d _ {j} (s _ {i} ^ {[ a ]} | s _ {i} ^ {< a})),\n$$\n",
616
+ "text_format": "latex",
617
+ "bbox": [
618
+ 127,
619
+ 171,
620
+ 472,
621
+ 199
622
+ ],
623
+ "page_idx": 4
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "in which $J$ is the set of candidate layers (0-th to $\\frac{N}{2}$ -th layers), $d_{N}(s_{i}^{[a]}|s_{i}^{< a})$ is the word distribution of the last layer. This design aims to find the layer with the largest word distribution difference between the last layer and use the JSD of the two as the word distribution stability of the token $s_{i}^{[a]}$ (Chuang et al., 2023). For each token of $s_{i}$ , we obtain its word distribution stability in parallel and get the set of word distribution stability for $s_{i}$ as:",
628
+ "bbox": [
629
+ 112,
630
+ 203,
631
+ 489,
632
+ 351
633
+ ],
634
+ "page_idx": 4
635
+ },
636
+ {
637
+ "type": "equation",
638
+ "text": "\n$$\n\\mathbb {O} _ {i} = \\left\\{O _ {i} ^ {[ 0 ]}, O _ {i} ^ {[ 1 ]}, \\dots , O _ {i} ^ {[ n ]} \\right\\}. \\tag {4}\n$$\n",
639
+ "text_format": "latex",
640
+ "bbox": [
641
+ 196,
642
+ 355,
643
+ 487,
644
+ 378
645
+ ],
646
+ "page_idx": 4
647
+ },
648
+ {
649
+ "type": "text",
650
+ "text": "We choose the tokens corresponding to the top $50\\%$ of the elements in $\\mathbb{O}_i$ as informative tokens within the sentence $s_i$ . Subsequently, we apply a specific percentage $(30\\%)$ of random masking and replacement to these tokens. For the randomly selected token, we replace it with [MASK] with a $50\\%$ probability to simulate the incomplete knowledge, and randomly replace it with another token with a $40\\%$ probability to simulate the incorrect knowledge, while keeping it unchanged with a $10\\%$ probability to simulate the correct knowledge. We do the above pipeline for each sentence in the set $S$ and get the processed set $S'$ . RAG in Correct and Complete can be formulated as:",
651
+ "bbox": [
652
+ 112,
653
+ 382,
654
+ 489,
655
+ 605
656
+ ],
657
+ "page_idx": 4
658
+ },
659
+ {
660
+ "type": "equation",
661
+ "text": "\n$$\np \\left(s _ {l} ^ {t}\\right) = p _ {\\theta} \\left(\\left[ S ^ {\\prime}; s _ {l} ^ {p} \\right]\\right). \\tag {5}\n$$\n",
662
+ "text_format": "latex",
663
+ "bbox": [
664
+ 220,
665
+ 612,
666
+ 487,
667
+ 631
668
+ ],
669
+ "page_idx": 4
670
+ },
671
+ {
672
+ "type": "text",
673
+ "text": "In Correct and Complete, the broken $s_l$ with noise is already in $S'$ . The information gain in this task requires LLM to extract, correct, and complete the knowledge in $s_l$ from $S'$ to generate $s_l^t$ .",
674
+ "bbox": [
675
+ 112,
676
+ 636,
677
+ 487,
678
+ 699
679
+ ],
680
+ "page_idx": 4
681
+ },
682
+ {
683
+ "type": "text",
684
+ "text": "For Scenario 3 that needs LLMs to find answers from their knowledge based on relevant texts in context. We propose the training task named Contextual Stimulation. Contextual Stimulation eliminates $s_l$ (both $s_l^p$ and $s_l^t$ ) from the set $S$ and uses the remaining sentences as retrieved tests for generation, which can be formulated as:",
685
+ "bbox": [
686
+ 112,
687
+ 700,
688
+ 489,
689
+ 810
690
+ ],
691
+ "page_idx": 4
692
+ },
693
+ {
694
+ "type": "equation",
695
+ "text": "\n$$\np (s _ {l} ^ {t}) = p _ {\\theta} ([ S - \\{s _ {l} \\}; s _ {l} ^ {p} ]). \\qquad (6)\n$$\n",
696
+ "text_format": "latex",
697
+ "bbox": [
698
+ 194,
699
+ 816,
700
+ 487,
701
+ 837
702
+ ],
703
+ "page_idx": 4
704
+ },
705
+ {
706
+ "type": "text",
707
+ "text": "In Contextual Stimulation, each sentence in retrieved texts $S - \\{s_l\\}$ is semantically relevant to $s_l^p$ but cannot help LLMs to directly generate $s_l^t$ . LLMs need to be stimulated by relevant information to generate $s_l^t$ based on their own knowledge.",
708
+ "bbox": [
709
+ 112,
710
+ 841,
711
+ 490,
712
+ 921
713
+ ],
714
+ "page_idx": 4
715
+ },
716
+ {
717
+ "type": "text",
718
+ "text": "3.2.3 Training Strategy",
719
+ "text_level": 1,
720
+ "bbox": [
721
+ 507,
722
+ 84,
723
+ 709,
724
+ 99
725
+ ],
726
+ "page_idx": 4
727
+ },
728
+ {
729
+ "type": "text",
730
+ "text": "After the data construction for three training tasks, we mix them for multi-task training. Specifically, we use LoRA (Hu et al., 2021) to train the pretrained LLMs on the mixed dataset of three tasks. Three tasks are trained alternately in batches. Since Select and Copy is relatively simple for LLMs, it only accounts for $20\\%$ of the batches, while Correct and Complete and Contextual Stimulation each account for $40\\%$ of the batches. Using LoRA not only reduces training costs but also makes our method plug-and-play. The trained LoRA parameters are loaded when LLMs need to perform RAG and unloaded when RAG is not needed.",
731
+ "bbox": [
732
+ 507,
733
+ 105,
734
+ 884,
735
+ 315
736
+ ],
737
+ "page_idx": 4
738
+ },
739
+ {
740
+ "type": "text",
741
+ "text": "4 Experiments",
742
+ "text_level": 1,
743
+ "bbox": [
744
+ 507,
745
+ 331,
746
+ 655,
747
+ 348
748
+ ],
749
+ "page_idx": 4
750
+ },
751
+ {
752
+ "type": "text",
753
+ "text": "4.1 Datasets and Evaluation Metrics",
754
+ "text_level": 1,
755
+ "bbox": [
756
+ 507,
757
+ 360,
758
+ 811,
759
+ 374
760
+ ],
761
+ "page_idx": 4
762
+ },
763
+ {
764
+ "type": "text",
765
+ "text": "To demonstrate the generality of our unsupervised training method, we evaluate the performance of INFO-RAG on eleven datasets across seven tasks.",
766
+ "bbox": [
767
+ 507,
768
+ 382,
769
+ 882,
770
+ 430
771
+ ],
772
+ "page_idx": 4
773
+ },
774
+ {
775
+ "type": "text",
776
+ "text": "Open-domain Question Answering Open-domain QA is a typical knowledge-intensive task that can directly evaluate the knowledge of LLMs. We use Natural Questions (Kwiatkowski et al., 2019) (NQ) and WebQuestions (Berant et al., 2013) (WebQ) as the datasets. We use cover Exact Match (EM) to determine whether the ground truth exactly appears in the output and the accuracy is used as the evaluation metric, following (Schick et al., 2023)",
777
+ "bbox": [
778
+ 507,
779
+ 432,
780
+ 882,
781
+ 577
782
+ ],
783
+ "page_idx": 4
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "Soft Filling Soft filling requires LLMs to output the object entities for the input subject entity and relation. We use two knowledge-intensive datasets including Zero Shot RE (Levy et al., 2017) (ZS) and T-REx (Elsahar et al., 2018). We use the same evaluation metric as Open-domain QA.",
788
+ "bbox": [
789
+ 507,
790
+ 579,
791
+ 882,
792
+ 675
793
+ ],
794
+ "page_idx": 4
795
+ },
796
+ {
797
+ "type": "text",
798
+ "text": "Long-Form Question Answering Compared with open-domain QA, LFQA is the QA task whose ground truth answer is a relatively long text. We use ELI5 (Fan et al., 2019), a knowledge-intensive dataset for LFQA. We use ROUGE-L as the evaluation metric (Petroni et al., 2020).",
799
+ "bbox": [
800
+ 507,
801
+ 677,
802
+ 882,
803
+ 772
804
+ ],
805
+ "page_idx": 4
806
+ },
807
+ {
808
+ "type": "text",
809
+ "text": "Dialogue Dialogue in our experiment focuses on the factual knowledge. We use Wizard of Wikipedia (Dinan et al., 2018) (WoW), a knowledge-powered dialogue dataset whose conversation is grounded with knowledge. We use F1 as the evaluation metric (Petroni et al., 2020).",
810
+ "bbox": [
811
+ 507,
812
+ 775,
813
+ 882,
814
+ 870
815
+ ],
816
+ "page_idx": 4
817
+ },
818
+ {
819
+ "type": "text",
820
+ "text": "Language Modeling We use WikiText-103 (M erity, 2016), a popular dataset for language modeling. We use ROUGE-L as the evaluation metric.",
821
+ "bbox": [
822
+ 507,
823
+ 873,
824
+ 882,
825
+ 920
826
+ ],
827
+ "page_idx": 4
828
+ },
829
+ {
830
+ "type": "table",
831
+ "img_path": "images/33686387d224dc9d4983b1364a76a0754d540d38e91530aecb2b7d32a9e36ac9.jpg",
832
+ "table_caption": [],
833
+ "table_footnote": [],
834
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td colspan=\"2\">Soft-Filling Accuracy</td><td colspan=\"2\">ODQA Accuracy</td><td colspan=\"2\">Multi-Hop QA Accuracy</td><td rowspan=\"2\">LFQA ROUGE EII5</td><td rowspan=\"2\">Dialog F1 Wow</td><td rowspan=\"2\">LM ROUGE WikiText</td><td rowspan=\"2\" colspan=\"2\">Code Gen CodeBLEU Python Java</td><td rowspan=\"2\">Overall</td></tr><tr><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td><td>Hotpot</td><td>Musique</td></tr><tr><td>LLaMA-2-7B</td><td>55.60</td><td>54.08</td><td>46.82</td><td>43.52</td><td>39.40</td><td>25.95</td><td>15.18</td><td>7.85</td><td>60.77</td><td>21.44</td><td>22.99</td><td>35.78</td></tr><tr><td>+ INFO-RAG</td><td>65.91</td><td>57.01</td><td>45.74</td><td>44.68</td><td>46.56</td><td>30.19</td><td>17.18</td><td>9.09</td><td>62.91</td><td>26.75</td><td>32.06</td><td>39.83</td></tr><tr><td>LLaMA-2-7B-chat</td><td>60.63</td><td>55.03</td><td>49.42</td><td>46.72</td><td>50.03</td><td>42.69</td><td>27.81</td><td>10.21</td><td>60.26</td><td>22.46</td><td>23.90</td><td>40.83</td></tr><tr><td>+ INFO-RAG</td><td>65.77</td><td>58.32</td><td>53.93</td><td>49.13</td><td>52.01</td><td>44.45</td><td>28.15</td><td>10.49</td><td>63.24</td><td>27.25</td><td>28.79</td><td>43.78</td></tr><tr><td>LLaMA-2-13B</td><td>60.08</td><td>50.77</td><td>47.40</td><td>44.62</td><td>42.12</td><td>25.78</td><td>14.80</td><td>7.04</td><td>62.20</td><td>21.52</td><td>29.16</td><td>36.86</td></tr><tr><td>+ INFO-RAG</td><td>62.80</td><td>55.63</td><td>47.82</td><td>45.42</td><td>51.48</td><td>35.02</td><td>17.48</td><td>7.20</td><td>64.14</td><td>29.00</td><td>35.50</td><td>41.04</td></tr><tr><td>LLaMA-2-13B-chat</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td><td>61.23</td><td>47.06</td><td>27.07</td><td>11.19</td><td>60.52</td><td>22.34</td><td>30.96</td><td>43.23</td></tr><tr><td>+ INFO-RAG</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td><td>61.91</td><td>47.93</td><td>27.24</td><td>11.38</td><td>63.92</td><td>31.98</td><td>38.12</td><td>46.55</td></tr></table>",
835
+ "bbox": [
836
+ 119,
837
+ 80,
838
+ 884,
839
+ 258
840
+ ],
841
+ "page_idx": 5
842
+ },
843
+ {
844
+ "type": "text",
845
+ "text": "Table 1: Overall performance on retrieval-augmented generation on 11 datasets across 7 tasks in zero-shot setting.",
846
+ "bbox": [
847
+ 112,
848
+ 269,
849
+ 880,
850
+ 285
851
+ ],
852
+ "page_idx": 5
853
+ },
854
+ {
855
+ "type": "text",
856
+ "text": "Multi-Hop Question Answering Multi-hop QA measures the ability of LLMs to perform combined reasoning on multiple knowledge. We use HotpotQA (Yang et al., 2018) and Musique (Trivedi et al., 2022b) for this task. We use the same evaluation metric as Open-domain QA.",
857
+ "bbox": [
858
+ 112,
859
+ 309,
860
+ 487,
861
+ 405
862
+ ],
863
+ "page_idx": 5
864
+ },
865
+ {
866
+ "type": "text",
867
+ "text": "Code Generation Code generation aims to generate the code for the given natural language. We use Java and Python in CodeXGLUE (Iyer et al., 2018) for this task. We use CodeBLEU (Ren et al., 2020) as the evaluation metric.",
868
+ "bbox": [
869
+ 112,
870
+ 407,
871
+ 489,
872
+ 488
873
+ ],
874
+ "page_idx": 5
875
+ },
876
+ {
877
+ "type": "text",
878
+ "text": "4.2 Experimental Settings",
879
+ "text_level": 1,
880
+ "bbox": [
881
+ 112,
882
+ 507,
883
+ 334,
884
+ 524
885
+ ],
886
+ "page_idx": 5
887
+ },
888
+ {
889
+ "type": "text",
890
+ "text": "LLMs in our paper include LLaMA-2-7B, 13B and their chat version (Touvron et al., 2023b). We use LoRA to fine-tune these pre-trained LLMs on four A100 GPUs with the learning rate of 1e-5, per-gpu batch size of 4 (for 7B) and 2 (for 13B) for 5K steps. As for the training data, we intercept 15 consecutive sentences for each example.",
891
+ "bbox": [
892
+ 112,
893
+ 532,
894
+ 487,
895
+ 645
896
+ ],
897
+ "page_idx": 5
898
+ },
899
+ {
900
+ "type": "text",
901
+ "text": "As for the retrieval model and retrieval database, for Open-domain QA, Soft Filling and Language Modeling, we use ColBERTv2 (Santhanam et al., 2022), a late-interaction model with excellent generalization ability as the retriever, and use Wikipedia consisting of 21,015,324 passages (Karpukhin et al., 2020) as retrieval database. For Code Generation, we SCODE-R (Parvez et al., 2021) as code retriever and use deduplicated source codes in CodeSearchNET (Husain et al., 2019) as retrieval database. For all the above tasks, we give Top-5 retrieved passages to each example. For LFQA, Dialog, and Multi-Hop QA, we use the list of contextual passages provided in the datasets as the retrieved list (distractor setting). In each experiment, all baselines and our method share the same retrieved documents.",
902
+ "bbox": [
903
+ 112,
904
+ 646,
905
+ 489,
906
+ 920
907
+ ],
908
+ "page_idx": 5
909
+ },
910
+ {
911
+ "type": "text",
912
+ "text": "4.3 Experimental Results",
913
+ "text_level": 1,
914
+ "bbox": [
915
+ 507,
916
+ 309,
917
+ 724,
918
+ 325
919
+ ],
920
+ "page_idx": 5
921
+ },
922
+ {
923
+ "type": "text",
924
+ "text": "Main Results (Zero-Shot Setting) Experimental results in Table 1 show the improvement (the average is $9.39\\%$ ) of our method on the utilization of retrieved knowledge from four aspects.",
925
+ "bbox": [
926
+ 507,
927
+ 338,
928
+ 882,
929
+ 404
930
+ ],
931
+ "page_idx": 5
932
+ },
933
+ {
934
+ "type": "list",
935
+ "sub_type": "text",
936
+ "list_items": [
937
+ "(1) Short and Direct Knowledge. Our method can significantly improve the RAG performance of LLaMA on ODQA and Slot-Filling tasks. The answer in ODQA and Slot-Filling is short and direct, it can directly reflect the ability of LLMs to utilize the knowledge in retrieved texts.",
938
+ "(2) Reasoning on Multiple Knowledge. Our INFO-RAG has advantages in cross-passage reasoning on multiple knowledge of retrieval lists. Questions in both HotpotQA and Musique are complex and need multiple knowledge from different passages. These questions not only require LLMs to extract correct knowledge from the retrieved passage list but also to combine the knowledge of different passages in the list for reasoning to give the final answer.",
939
+ "(3) Long and Complex Knowledge. Our INFO-RAG can improve the RAG performance of LLaMA on LFQA, Dialogue and Language Modeling. These tasks require LLaMA to output long and complex texts grounded with intensive knowledge.",
940
+ "(4) Code Knowledge. Our INFO-RAG can also improve the RAG performance of LLaMA on Code Generation. This further demonstrates the cros-task generality of INFO-RAG. Our method is only trained on natural language but can also show advantages in programming language tasks, which demonstrates that INFO-RAG successfully enables LLMs to learn how to exploit the retrieved information rather than just fitting the data. Unsupervised and prefix language modeling training paradigms make INFO-RAG general in various tasks."
941
+ ],
942
+ "bbox": [
943
+ 507,
944
+ 407,
945
+ 885,
946
+ 921
947
+ ],
948
+ "page_idx": 5
949
+ },
950
+ {
951
+ "type": "table",
952
+ "img_path": "images/e94864c634e982334315d165bbb2ac4ab51750fc4213ad0fc26df5df236c9fb8.jpg",
953
+ "table_caption": [],
954
+ "table_footnote": [],
955
+ "table_body": "<table><tr><td></td><td colspan=\"3\">T-REx</td><td colspan=\"3\">ZS</td><td colspan=\"3\">NQ</td><td colspan=\"3\">WebQ</td></tr><tr><td></td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td></tr><tr><td>LLaMA-2-7B</td><td>67.19</td><td>38.37</td><td>6.49</td><td>64.41</td><td>12.78</td><td>2.44</td><td>65.54</td><td>16.91</td><td>3.41</td><td>60.64</td><td>25.68</td><td>7.90</td></tr><tr><td>+ INFO-RAG</td><td>79.80</td><td>41.79</td><td>7.04</td><td>68.10</td><td>13.55</td><td>3.26</td><td>64.43</td><td>22.68</td><td>4.70</td><td>62.70</td><td>26.48</td><td>8.96</td></tr><tr><td>LLaMA-2-7B-chat</td><td>73.79</td><td>40.56</td><td>4.87</td><td>66.71</td><td>14.19</td><td>1.63</td><td>68.72</td><td>20.81</td><td>4.50</td><td>66.86</td><td>28.63</td><td>5.62</td></tr><tr><td>+ INFO-RAG</td><td>80.01</td><td>42.92</td><td>5.42</td><td>69.64</td><td>15.02</td><td>2.65</td><td>70.99</td><td>23.14</td><td>5.62</td><td>68.73</td><td>29.74</td><td>9.12</td></tr><tr><td>LLaMA-2-13B</td><td>72.26</td><td>39.47</td><td>7.76</td><td>60.14</td><td>19.71</td><td>4.69</td><td>65.94</td><td>18.45</td><td>4.42</td><td>62.09</td><td>26.63</td><td>9.27</td></tr><tr><td>+ INFO-RAG</td><td>75.80</td><td>44.08</td><td>8.48</td><td>65.94</td><td>23.21</td><td>4.90</td><td>64.98</td><td>27.60</td><td>8.02</td><td>63.51</td><td>28.24</td><td>9.88</td></tr><tr><td>LLaMA-2-13B-chat</td><td>75.96</td><td>43.79</td><td>5.59</td><td>67.03</td><td>16.58</td><td>1.42</td><td>69.37</td><td>30.72</td><td>6.16</td><td>65.07</td><td>31.88</td><td>5.47</td></tr><tr><td>+ INFO-RAG</td><td>79.25</td><td>48.59</td><td>6.67</td><td>70.26</td><td>25.02</td><td>3.87</td><td>73.73</td><td>33.85</td><td>8.39</td><td>70.59</td><td>37.48</td><td>11.25</td></tr></table>",
956
+ "bbox": [
957
+ 119,
958
+ 80,
959
+ 887,
960
+ 234
961
+ ],
962
+ "page_idx": 6
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "Results on In-context Learning for RAG Besides, our INFO-RAG allows further improvement cooperating with in-context learning (ICL). ICL (Brown et al., 2020) works by preponding a few examples of the target task before the query, which helps LLMs understand the task. However, ICL may not always help in the RAG setting, mainly due to the confusion between the retrieved texts of the query and the few-shot examples. As shown in Table 3, LLaMA-2 cannot further improve the RAG performance from ICL, even sometimes hurt by the few-shot examples while INFO-RAG can further improve RAG by ICL. This is mainly because INFO-RAG enables LLaMA to understand the task form of RAG, thereby better learning the general task pattern from ICL examples. In this experiment, we construct the ICL example consisting of a query, a relevant passage, and an answer. For a fair comparison, we need to ensure that the performance of our method and the baseline are close in non-ICL setting. Therefore, we select queries for which the baseline gives the same answer as our method (both correct or both incorrect) and evaluate the ICL performance on these queries.",
967
+ "bbox": [
968
+ 115,
969
+ 315,
970
+ 489,
971
+ 717
972
+ ],
973
+ "page_idx": 6
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "Enhancing Previous SOTA in Open-Retrieval Setting We further show that our INFO-RAG can cooperate well with the recent prompting techniques that perform multi-step reasoning to combine with retrieval to solve questions (Xu et al., 2023; Khattab et al., 2022; Press et al., 2023; Yao et al., 2022). To make a fair comparison, we follow SearChain (Xu et al., 2023) that runs on Multi-Hop QA and Slot-Filling in open-retrieval setting that retrieves passages from the full Wikipedia in each reasoning step. SearChain and other baselines use LLaMA-2-13B-chat as the backbone. Then, we",
978
+ "bbox": [
979
+ 112,
980
+ 728,
981
+ 489,
982
+ 921
983
+ ],
984
+ "page_idx": 6
985
+ },
986
+ {
987
+ "type": "table",
988
+ "img_path": "images/21bb6796610484e18a74f8c26ff4f49cf0521e18b6b8d149c06939928b147bab.jpg",
989
+ "table_caption": [
990
+ "Table 2: Experimental results on three scenarios. \"has-ans.\" is the first scenario that correct answers are in retrieved texts. \"replace\" is the second scenario that correct answers are randomly replaced with other phrases to simulate the incorrect and incomplete knowledge. \"no-ans.\" is the third scenario that retrieval cannot find any answers."
991
+ ],
992
+ "table_footnote": [],
993
+ "table_body": "<table><tr><td rowspan=\"2\">Data</td><td rowspan=\"2\">Model</td><td colspan=\"6\">Number of Examples in ICL</td></tr><tr><td>0</td><td>2</td><td>4</td><td>8</td><td>12</td><td>16</td></tr><tr><td rowspan=\"2\">NQ</td><td>LLaMA-2</td><td>43.36</td><td>23.34</td><td>16.60</td><td>39.22</td><td>44.32</td><td>43.00</td></tr><tr><td>+INFO-RAG</td><td>43.36</td><td>44.35</td><td>45.88</td><td>44.45</td><td>47.75</td><td>46.25</td></tr><tr><td rowspan=\"2\">WebQ</td><td>LLaMA-2</td><td>43.20</td><td>18.36</td><td>9.40</td><td>36.71</td><td>44.80</td><td>44.81</td></tr><tr><td>+INFO-RAG</td><td>43.20</td><td>48.03</td><td>49.82</td><td>48.25</td><td>47.86</td><td>47.29</td></tr><tr><td rowspan=\"2\">T-REx</td><td>LLaMA-2</td><td>59.83</td><td>47.05</td><td>49.11</td><td>56.51</td><td>55.23</td><td>56.31</td></tr><tr><td>+INFO-RAG</td><td>59.83</td><td>63.08</td><td>63.45</td><td>63.54</td><td>63.57</td><td>63.38</td></tr><tr><td rowspan=\"2\">ZS</td><td>LLaMA-2</td><td>52.41</td><td>42.71</td><td>37.05</td><td>50.40</td><td>50.20</td><td>51.01</td></tr><tr><td>+INFO-RAG</td><td>52.41</td><td>56.53</td><td>60.37</td><td>59.86</td><td>59.75</td><td>59.85</td></tr></table>",
994
+ "bbox": [
995
+ 512,
996
+ 312,
997
+ 889,
998
+ 463
999
+ ],
1000
+ "page_idx": 6
1001
+ },
1002
+ {
1003
+ "type": "table",
1004
+ "img_path": "images/1fbb6d8d054861d2b384c91b694fa54305224b7ea17d0c67ce4c673a4072dc49.jpg",
1005
+ "table_caption": [
1006
+ "Table 3: RAG performance changes with number of examples in In-context learning."
1007
+ ],
1008
+ "table_footnote": [],
1009
+ "table_body": "<table><tr><td></td><td colspan=\"2\">Multi-Hop QA</td><td colspan=\"2\">Slot-Filling</td></tr><tr><td></td><td>HotpotQA</td><td>Musique</td><td>T-REx</td><td>zsRE</td></tr><tr><td>Previous SOTA</td><td>28.19</td><td>10.03</td><td>63.10</td><td>57.09</td></tr><tr><td>SearChain</td><td>31.21</td><td>11.27</td><td>64.58</td><td>58.91</td></tr><tr><td>+ INFO-RAG</td><td>33.04</td><td>12.10</td><td>66.95</td><td>60.72</td></tr></table>",
1010
+ "bbox": [
1011
+ 512,
1012
+ 519,
1013
+ 882,
1014
+ 594
1015
+ ],
1016
+ "page_idx": 6
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "text": "Table 4: Enhancement to the state-of-the-art RAG framework. Previous SOTA includes DSP, Self-Ask, React.",
1021
+ "bbox": [
1022
+ 507,
1023
+ 607,
1024
+ 882,
1025
+ 634
1026
+ ],
1027
+ "page_idx": 6
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "text": "perform SearChain based on LLaMA-2-13B-chat trained by INFO-RAG to show the enhancement to SearChain by INFO-RAG. Results in Table 4 show that INFO-RAG can make SearChain achieve better performance. This provides additional support that our unsupervised INFO training fundamentally improves the RAG performance of LLMs.",
1032
+ "bbox": [
1033
+ 507,
1034
+ 662,
1035
+ 882,
1036
+ 776
1037
+ ],
1038
+ "page_idx": 6
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "text": "4.4 Analysis",
1043
+ "text_level": 1,
1044
+ "bbox": [
1045
+ 507,
1046
+ 787,
1047
+ 623,
1048
+ 802
1049
+ ],
1050
+ "page_idx": 6
1051
+ },
1052
+ {
1053
+ "type": "text",
1054
+ "text": "Fine-grained Analysis for Three Scenarios As shown in Table 2, our INFO-RAG is effective in all three RAG scenarios and shows better robustness to incorrect, incomplete, and noisy retrieved texts. We propose corresponding unsupervised training tasks for the three scenarios of RAG. This section introduces the fine-grained analysis for each sce",
1055
+ "bbox": [
1056
+ 505,
1057
+ 808,
1058
+ 884,
1059
+ 921
1060
+ ],
1061
+ "page_idx": 6
1062
+ },
1063
+ {
1064
+ "type": "table",
1065
+ "img_path": "images/95d6030e60673e475f81f9352341cc89a49436a597cec43f9ca0a8b230d61805.jpg",
1066
+ "table_caption": [],
1067
+ "table_footnote": [],
1068
+ "table_body": "<table><tr><td></td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td><td>Hotpot</td><td>Musique</td><td>EI5</td><td>Wow</td><td>WikiText</td><td>Python</td><td>Java</td><td>Overall</td></tr><tr><td>LLaMA-2 w/o RAG</td><td>35.60</td><td>10.99</td><td>32.67</td><td>39.13</td><td>29.16</td><td>5.83</td><td>26.05</td><td>10.71</td><td>41.80</td><td>20.67</td><td>25.87</td><td>25.32</td></tr><tr><td>LLaMA-2 w/ RAG</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td><td>61.23</td><td>47.06</td><td>27.07</td><td>11.19</td><td>60.52</td><td>22.34</td><td>30.96</td><td>43.23</td></tr><tr><td>+ training on wiki</td><td>62.55</td><td>56.79</td><td>49.23</td><td>45.05</td><td>61.00</td><td>46.95</td><td>26.31</td><td>11.05</td><td>60.84</td><td>22.05</td><td>30.28</td><td>42.92</td></tr><tr><td>+ INFO-RAG</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td><td>61.91</td><td>47.93</td><td>27.24</td><td>11.38</td><td>63.92</td><td>31.98</td><td>38.12</td><td>46.55</td></tr></table>",
1069
+ "bbox": [
1070
+ 119,
1071
+ 82,
1072
+ 880,
1073
+ 156
1074
+ ],
1075
+ "page_idx": 7
1076
+ },
1077
+ {
1078
+ "type": "table",
1079
+ "img_path": "images/28f005edaeb9a7a5032769b554983c0b32de3faccd261950996019583a1a43b4.jpg",
1080
+ "table_caption": [
1081
+ "Table 5: Analysis on the best-performed model LLaMA-2-13B-chat."
1082
+ ],
1083
+ "table_footnote": [],
1084
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"4\">NQ</td></tr><tr><td colspan=\"4\">original has-ans. replace no-ans.</td></tr><tr><td>Baseline</td><td>50.36</td><td>69.37</td><td>30.72</td><td>6.16</td></tr><tr><td>S1: Select and Copy</td><td>48.77</td><td>69.59</td><td>25.40</td><td>0.11</td></tr><tr><td>S2: Correct and Complete</td><td>51.59</td><td>70.42</td><td>32.71</td><td>4.48</td></tr><tr><td>S3: Contextual Stimulation</td><td>52.75</td><td>72.50</td><td>31.77</td><td>8.86</td></tr><tr><td>S2&amp;S3</td><td>53.73</td><td>73.01</td><td>32.50</td><td>9.01</td></tr><tr><td>INFO-RAG (S1&amp; S2&amp;S3)</td><td>54.04</td><td>73.73</td><td>33.85</td><td>8.39</td></tr></table>",
1085
+ "bbox": [
1086
+ 117,
1087
+ 205,
1088
+ 489,
1089
+ 319
1090
+ ],
1091
+ "page_idx": 7
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "text": "nario. For Scenario 1, we use cover EM to select those samples that already contain the correct answers in the retrieval list. For Scenario 2, we randomly replace the correct answers in the retrieved texts with another phrase with the same properties. For Scenario 3, we use cover EM to select those samples that retrieved texts do not contain any correct answers. We count the accuracy of LLaMA on samples of these three scenarios respectively. Questions in the third scenario are more difficult than in the second scenario because retrieval models cannot find anything to solve them. Table 2 indicates that our method shows advantages in each scenario and is more robust regardless of whether the retrieved texts contain the correct answer.",
1096
+ "bbox": [
1097
+ 112,
1098
+ 374,
1099
+ 489,
1100
+ 615
1101
+ ],
1102
+ "page_idx": 7
1103
+ },
1104
+ {
1105
+ "type": "text",
1106
+ "text": "Ablation Study We conduct ablation study to explore the effects of the following factors.",
1107
+ "bbox": [
1108
+ 112,
1109
+ 627,
1110
+ 485,
1111
+ 659
1112
+ ],
1113
+ "page_idx": 7
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "text": "(1) Additional Training on Wikipedia. We study whether our improvement is from helping the model to achieve information refinement, or simply because of additional training on Wikipedia. To this end, we train LLaMA-2 on Wikipedia with standard language modeling objective, by setting the same hyperparameters as our INFO-RAG. The results in Table 5 show that this baseline leads to no improvement over the backbone LLaMA-2, confirming the effectiveness of our training method rather than additional training on Wikipedia.",
1118
+ "bbox": [
1119
+ 112,
1120
+ 662,
1121
+ 485,
1122
+ 838
1123
+ ],
1124
+ "page_idx": 7
1125
+ },
1126
+ {
1127
+ "type": "text",
1128
+ "text": "(2) Training tasks. We perform three training tasks proposed in INFO-RAG separately on original data and data constructed for each scenario to explore their effects respectively. Table 6 shows that both S2 and S3 have gains in their scenarios. Although",
1129
+ "bbox": [
1130
+ 112,
1131
+ 841,
1132
+ 485,
1133
+ 921
1134
+ ],
1135
+ "page_idx": 7
1136
+ },
1137
+ {
1138
+ "type": "table",
1139
+ "img_path": "images/0ea8de493ca846fecf20ced3b53fce968c2e7774a272ffc03e1e8fa09ba7b12a.jpg",
1140
+ "table_caption": [
1141
+ "Table 6: Effects of three training tasks."
1142
+ ],
1143
+ "table_footnote": [],
1144
+ "table_body": "<table><tr><td>Datasets</td><td>Method</td><td>Max Δ ratio</td><td>Max Δ position</td><td>Max Δ number</td></tr><tr><td rowspan=\"2\">NQ</td><td>LLaMA-2</td><td>-51.94%</td><td>-16.18%</td><td>-25.43%</td></tr><tr><td>+ INFO-RAG</td><td>-43.48%</td><td>-15.80%</td><td>-17.25%</td></tr><tr><td rowspan=\"2\">WebQ</td><td>LLaMA-2</td><td>-50.57%</td><td>-5.63%</td><td>-22.13%</td></tr><tr><td>+ INFO-RAG</td><td>-45.48%</td><td>-8.72%</td><td>-11.91%</td></tr><tr><td rowspan=\"2\">T-REx</td><td>LLaMA-2</td><td>-46.57%</td><td>-9.45%</td><td>-5.95%</td></tr><tr><td>+ INFO-RAG</td><td>-44.38%</td><td>-8.61%</td><td>-2.99%</td></tr><tr><td rowspan=\"2\">ZS</td><td>LLaMA-2</td><td>-59.25%</td><td>-13.40%</td><td>-12.37%</td></tr><tr><td>+ INFO-RAG</td><td>-50.08%</td><td>-11.11%</td><td>-11.43%</td></tr></table>",
1145
+ "bbox": [
1146
+ 512,
1147
+ 205,
1148
+ 890,
1149
+ 353
1150
+ ],
1151
+ "page_idx": 7
1152
+ },
1153
+ {
1154
+ "type": "text",
1155
+ "text": "Table 7: Maximum relative performance change caused by changes in retrieval results.",
1156
+ "bbox": [
1157
+ 507,
1158
+ 365,
1159
+ 882,
1160
+ 394
1161
+ ],
1162
+ "page_idx": 7
1163
+ },
1164
+ {
1165
+ "type": "text",
1166
+ "text": "S1 has negative effects when performed alone, it can achieve the best results when trained together with S2 and S3. This is mainly because S1 alone is so simple that causes LLM to overfit the data. Adding S2 and S3 allows LLM to learn the task paradigm of information refinement, making LLM better extract the correct answer for Scenario 1.",
1167
+ "bbox": [
1168
+ 505,
1169
+ 420,
1170
+ 882,
1171
+ 533
1172
+ ],
1173
+ "page_idx": 7
1174
+ },
1175
+ {
1176
+ "type": "text",
1177
+ "text": "Robustness to Retrieval Results Table 7 shows INFO-RAG is more robust to changes in retrieval results including the ratio and position of positive passages and number of retrieved passages. More details can be found in Section A of Appendix.",
1178
+ "bbox": [
1179
+ 507,
1180
+ 543,
1181
+ 880,
1182
+ 621
1183
+ ],
1184
+ "page_idx": 7
1185
+ },
1186
+ {
1187
+ "type": "text",
1188
+ "text": "Avoid Catastrophic Forgetting Experiment on MMLU (Hendrycks et al., 2020) without RAG shows that INFO-RAG performs very close to the original LLaMA-2 (7B: 45.0 vs. 45.3; 13B: 54.3 vs. 54.8), which indicates that INFO-RAG enhances RAG while avoiding catastrophic forgetting. More details can be found in Section A.6 of Appendix.",
1189
+ "bbox": [
1190
+ 507,
1191
+ 624,
1192
+ 882,
1193
+ 736
1194
+ ],
1195
+ "page_idx": 7
1196
+ },
1197
+ {
1198
+ "type": "text",
1199
+ "text": "5 Conclusion",
1200
+ "text_level": 1,
1201
+ "bbox": [
1202
+ 507,
1203
+ 750,
1204
+ 640,
1205
+ 765
1206
+ ],
1207
+ "page_idx": 7
1208
+ },
1209
+ {
1210
+ "type": "text",
1211
+ "text": "This paper proposes a novel perspective to reassess the role of LLMs in RAG that considers LLMs as \"Information Refiner\". This means that regardless of the correctness, completeness, or usefulness of the retrieved texts, LLMs can consistently integrate knowledge within model parameters and the retrieved texts to generate texts that are more concise, accurate, and complete. To achieve it, we propose an information refinement training method named",
1212
+ "bbox": [
1213
+ 505,
1214
+ 776,
1215
+ 882,
1216
+ 921
1217
+ ],
1218
+ "page_idx": 7
1219
+ },
1220
+ {
1221
+ "type": "text",
1222
+ "text": "INFO-RAG in an unsupervised manner, which is low-cost and general across various tasks. Extensive experiments across 11 datasets of 7 tasks in zero-shot setting show that INFO-RAG improves the performance of LLMs for RAG. INFO-RAG also shows advantages in ICL and robustness of RAG and can be combined with the SOTA RAG framework to further improve its performance.",
1223
+ "bbox": [
1224
+ 112,
1225
+ 84,
1226
+ 489,
1227
+ 212
1228
+ ],
1229
+ "page_idx": 8
1230
+ },
1231
+ {
1232
+ "type": "text",
1233
+ "text": "Limitations",
1234
+ "text_level": 1,
1235
+ "bbox": [
1236
+ 112,
1237
+ 223,
1238
+ 220,
1239
+ 239
1240
+ ],
1241
+ "page_idx": 8
1242
+ },
1243
+ {
1244
+ "type": "text",
1245
+ "text": "This paper aims to enable LLMs to perform information refinement in RAG by unsupervised training, so as to accurately extract correct information and avoid the interference of incorrect information. The main limitation of this paper is that due to the lack of computing resources, we only conduct experiments on models with 7B and 13B parameter sizes. In the future, we consider using more computing resources to explore the performance of models with larger parameter sizes.",
1246
+ "bbox": [
1247
+ 112,
1248
+ 249,
1249
+ 490,
1250
+ 409
1251
+ ],
1252
+ "page_idx": 8
1253
+ },
1254
+ {
1255
+ "type": "text",
1256
+ "text": "Ethics Statement",
1257
+ "text_level": 1,
1258
+ "bbox": [
1259
+ 114,
1260
+ 420,
1261
+ 265,
1262
+ 435
1263
+ ],
1264
+ "page_idx": 8
1265
+ },
1266
+ {
1267
+ "type": "text",
1268
+ "text": "After careful consideration, we believe that our paper does not introduce additional ethical concerns. We declare that our work complies with the ACL Ethics Policy.",
1269
+ "bbox": [
1270
+ 112,
1271
+ 445,
1272
+ 489,
1273
+ 508
1274
+ ],
1275
+ "page_idx": 8
1276
+ },
1277
+ {
1278
+ "type": "text",
1279
+ "text": "Acknowledgements",
1280
+ "text_level": 1,
1281
+ "bbox": [
1282
+ 114,
1283
+ 520,
1284
+ 285,
1285
+ 537
1286
+ ],
1287
+ "page_idx": 8
1288
+ },
1289
+ {
1290
+ "type": "text",
1291
+ "text": "This work was supported by the National Key R&D Program of China (2022YFB3103700, 2022YFB3103704), the National Natural Science Foundation of China (NSFC) under Grants No. 62276248 and U21B2046, and the Youth Innovation Promotion Association CAS under Grants No. 2023111.",
1292
+ "bbox": [
1293
+ 112,
1294
+ 545,
1295
+ 489,
1296
+ 656
1297
+ ],
1298
+ "page_idx": 8
1299
+ },
1300
+ {
1301
+ "type": "text",
1302
+ "text": "References",
1303
+ "text_level": 1,
1304
+ "bbox": [
1305
+ 114,
1306
+ 683,
1307
+ 213,
1308
+ 697
1309
+ ],
1310
+ "page_idx": 8
1311
+ },
1312
+ {
1313
+ "type": "list",
1314
+ "sub_type": "ref_text",
1315
+ "list_items": [
1316
+ "Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511.",
1317
+ "Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. 2013. Semantic parsing on freebase from question-answer pairs. In Proceedings of the EMNLP 2013, pages 1533-1544.",
1318
+ "Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pages 2206-2240. PMLR."
1319
+ ],
1320
+ "bbox": [
1321
+ 115,
1322
+ 705,
1323
+ 489,
1324
+ 920
1325
+ ],
1326
+ "page_idx": 8
1327
+ },
1328
+ {
1329
+ "type": "list",
1330
+ "sub_type": "ref_text",
1331
+ "list_items": [
1332
+ "Tom B. Brown, Benjamin Mann, Nick Ryder, et al. 2020. Language models are few-shot learners.",
1333
+ "Deng Cai, Yan Wang, Victoria Bi, Zhaopeng Tu, Xiaojiang Liu, Wai Lam, and Shuming Shi. 2018. Skeleton-to-response: Dialogue generation guided by retrieval memory. arXiv preprint arXiv:1809.05296.",
1334
+ "Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, and Shuming Shi. 2019. Retrieval-guided dialogue response generation via a matching-to-generation framework. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1866-1875.",
1335
+ "Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. 2023. Benchmarking large language models in retrieval-augmented generation. arXiv preprint arXiv:2309.01431.",
1336
+ "Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883.",
1337
+ "Jingcheng Deng, Liang Pang, Huawei Shen, and Xueqi Cheng. 2023. Regavae: A retrieval-augmented gaussian mixture variational auto-encoder for language modeling. arXiv preprint arXiv:2310.10567.",
1338
+ "Shehzaad Dhuliawala, Mojtaba Komeili, Jing Xu, Roberta Raileanu, Xian Li, Asli Celikyilmaz, and Jason Weston. 2023. Chain-of-verification reduces hallucination in large language models.",
1339
+ "Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2018. Wizard of wikipedia: Knowledge-powered conversational agents. arXiv preprint arXiv:1811.01241.",
1340
+ "Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. 2018. T-rex: A large scale alignment of natural language with knowledge base triples. In Proceedings of LREC 2018.",
1341
+ "Angela Fan, Yacine Jernite, Ethan Perez, David Grangier, Jason Weston, and Michael Auli. 2019. Eli5: Long form question answering. arXiv preprint arXiv:1907.09190.",
1342
+ "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR.",
1343
+ "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2020. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300."
1344
+ ],
1345
+ "bbox": [
1346
+ 510,
1347
+ 85,
1348
+ 884,
1349
+ 920
1350
+ ],
1351
+ "page_idx": 8
1352
+ },
1353
+ {
1354
+ "type": "list",
1355
+ "sub_type": "ref_text",
1356
+ "list_items": [
1357
+ "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.",
1358
+ "Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. 2019. Code-searchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436.",
1359
+ "Srinivasan Iyer, Ioannis Konstas, Alvin Cheung, and Luke Zettlemoyer. 2018. Mapping language to code in programmatic context. arXiv preprint arXiv:1808.09588.",
1360
+ "Gautier Izacard, Patrick Lewis, Maria Lomeli, Lucas Hosseini, Fabio Petroni, Timo Schick, Jane Dwivedi-Yu, Armand Joulin, Sebastian Riedel, and Edouard Grave. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299.",
1361
+ "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6769-6781.",
1362
+ "Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. 2022. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024.",
1363
+ "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466.",
1364
+ "Omer Levy, Minjoon Seo, Eunsol Choi, and Luke Zettlemoyer. 2017. Zero-shot relation extraction via reading comprehension. arXiv preprint arXiv:1706.04115.",
1365
+ "Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474.",
1366
+ "Yun Luo, Zhen Yang, Fandong Meng, Yafu Li, Jie Zhou, and Yue Zhang. 2023. An empirical study of catastrophic forgetting in large language models during continual fine-tuning. arXiv preprint arXiv:2308.08747.",
1367
+ "Stephen Merity. 2016. The wikitext long term dependency language modeling dataset. Salesforce MetaMind, 9."
1368
+ ],
1369
+ "bbox": [
1370
+ 115,
1371
+ 85,
1372
+ 485,
1373
+ 919
1374
+ ],
1375
+ "page_idx": 9
1376
+ },
1377
+ {
1378
+ "type": "list",
1379
+ "sub_type": "ref_text",
1380
+ "list_items": [
1381
+ "Tomáš Mikolov et al. 2012. Statistical language models based on neural networks. Presentation at Google, Mountain View, 2nd April, 80(26).",
1382
+ "Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. 2021. Retrieval augmented code generation and summarization. arXiv preprint arXiv:2108.11601.",
1383
+ "Baolin Peng, Michel Galley, Pengcheng He, Hao Cheng, Yujia Xie, Yu Hu, Qiuyuan Huang, Lars Liden, Zhou Yu, Weizhu Chen, et al. 2023. Check your facts and try again: Improving large language models with external knowledge and automated feedback. arXiv preprint arXiv:2302.12813.",
1384
+ "Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. Kilt: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252.",
1385
+ "Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A. Smith, and Mike Lewis. 2023. Measuring and narrowing the compositionality gap in language models.",
1386
+ "Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. 2018. Improving language understanding by generative pre-training.",
1387
+ "Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. 2023. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083.",
1388
+ "Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019.",
1389
+ "Shuo Ren, Daya Guo, Shuai Lu, Long Zhou, Shujie Liu, Duyu Tang, Neel Sundaresan, Ming Zhou, Ambrosio Blanco, and Shuai Ma. 2020. Codebleu: a method for automatic evaluation of code synthesis. arXiv preprint arXiv:2009.10297.",
1390
+ "Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, Christopher Potts, and Matei Zaharia. 2022. Colbertv2: Effective and efficient retrieval via lightweight late interaction.",
1391
+ "Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761.",
1392
+ "Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Rich James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2023. Replug: Retrievalaugmented black-box language models. arXiv preprint arXiv:2301.12652."
1393
+ ],
1394
+ "bbox": [
1395
+ 510,
1396
+ 85,
1397
+ 880,
1398
+ 919
1399
+ ],
1400
+ "page_idx": 9
1401
+ },
1402
+ {
1403
+ "type": "text",
1404
+ "text": "Tiening Sun, Zhong Qian, Sujun Dong, Peifeng Li, and Qiaoming Zhu. 2022. Rumor detection on social media with graph adversarial contrastive learning. In Proceedings of the WWW 2022, pages 2789-2797.",
1405
+ "bbox": [
1406
+ 115,
1407
+ 85,
1408
+ 487,
1409
+ 139
1410
+ ],
1411
+ "page_idx": 10
1412
+ },
1413
+ {
1414
+ "type": "text",
1415
+ "text": "Nandan Thakur, Nils Reimers, Andreas Rückle, Abhishek Srivastava, and Iryna Gurevych. 2021. Beir: A heterogenous benchmark for zero-shot evaluation of information retrieval models. arXiv preprint arXiv:2104.08663.",
1416
+ "bbox": [
1417
+ 115,
1418
+ 152,
1419
+ 487,
1420
+ 217
1421
+ ],
1422
+ "page_idx": 10
1423
+ },
1424
+ {
1425
+ "type": "text",
1426
+ "text": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023a. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.",
1427
+ "bbox": [
1428
+ 115,
1429
+ 231,
1430
+ 487,
1431
+ 309
1432
+ ],
1433
+ "page_idx": 10
1434
+ },
1435
+ {
1436
+ "type": "text",
1437
+ "text": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023b. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.",
1438
+ "bbox": [
1439
+ 115,
1440
+ 323,
1441
+ 487,
1442
+ 401
1443
+ ],
1444
+ "page_idx": 10
1445
+ },
1446
+ {
1447
+ "type": "text",
1448
+ "text": "Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022a. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509.",
1449
+ "bbox": [
1450
+ 115,
1451
+ 414,
1452
+ 487,
1453
+ 482
1454
+ ],
1455
+ "page_idx": 10
1456
+ },
1457
+ {
1458
+ "type": "text",
1459
+ "text": "Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022b. Musique: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 10:539-554.",
1460
+ "bbox": [
1461
+ 115,
1462
+ 495,
1463
+ 487,
1464
+ 561
1465
+ ],
1466
+ "page_idx": 10
1467
+ },
1468
+ {
1469
+ "type": "text",
1470
+ "text": "Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002.",
1471
+ "bbox": [
1472
+ 115,
1473
+ 575,
1474
+ 487,
1475
+ 627
1476
+ ],
1477
+ "page_idx": 10
1478
+ },
1479
+ {
1480
+ "type": "text",
1481
+ "text": "Shicheng Xu, Liang Pang, Huawei Shen, Xueqi Cheng, and Tat-seng Chua. 2023. Search-in-the-chain: Towards the accurate, credible and traceable content generation for complex knowledge-intensive tasks. arXiv preprint arXiv:2304.14732.",
1482
+ "bbox": [
1483
+ 115,
1484
+ 640,
1485
+ 487,
1486
+ 707
1487
+ ],
1488
+ "page_idx": 10
1489
+ },
1490
+ {
1491
+ "type": "text",
1492
+ "text": "Shicheng Xu, Liang Pang, Jun Xu, Huawei Shen, and Xueqi Cheng. 2024. List-aware reranking-truncation joint model for search and retrieval-augmented generation. arXiv preprint arXiv:2402.02764.",
1493
+ "bbox": [
1494
+ 115,
1495
+ 720,
1496
+ 487,
1497
+ 774
1498
+ ],
1499
+ "page_idx": 10
1500
+ },
1501
+ {
1502
+ "type": "text",
1503
+ "text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William W Cohen, Ruslan Salakhutdinov, and Christopher D Manning. 2018. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600.",
1504
+ "bbox": [
1505
+ 115,
1506
+ 787,
1507
+ 487,
1508
+ 853
1509
+ ],
1510
+ "page_idx": 10
1511
+ },
1512
+ {
1513
+ "type": "text",
1514
+ "text": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629.",
1515
+ "bbox": [
1516
+ 115,
1517
+ 866,
1518
+ 487,
1519
+ 919
1520
+ ],
1521
+ "page_idx": 10
1522
+ },
1523
+ {
1524
+ "type": "text",
1525
+ "text": "Ori Yoran, Tomer Wolfson, Ori Ram, and Jonathan Berant. 2023. Making retrieval-augmented language models robust to irrelevant context. arXiv preprint arXiv:2310.01558.",
1526
+ "bbox": [
1527
+ 509,
1528
+ 85,
1529
+ 880,
1530
+ 137
1531
+ ],
1532
+ "page_idx": 10
1533
+ },
1534
+ {
1535
+ "type": "text",
1536
+ "text": "Wenhao Yu, Hongming Zhang, Xiaoman Pan, Kaixin Ma, Hongwei Wang, and Dong Yu. 2023. Chain-of-note: Enhancing robustness in retrieval-augmented language models.",
1537
+ "bbox": [
1538
+ 509,
1539
+ 151,
1540
+ 880,
1541
+ 204
1542
+ ],
1543
+ "page_idx": 10
1544
+ },
1545
+ {
1546
+ "type": "text",
1547
+ "text": "A More Analysis",
1548
+ "text_level": 1,
1549
+ "bbox": [
1550
+ 509,
1551
+ 221,
1552
+ 672,
1553
+ 237
1554
+ ],
1555
+ "page_idx": 10
1556
+ },
1557
+ {
1558
+ "type": "text",
1559
+ "text": "A.1 Robustness to ratio of Positive Passages",
1560
+ "text_level": 1,
1561
+ "bbox": [
1562
+ 509,
1563
+ 249,
1564
+ 867,
1565
+ 263
1566
+ ],
1567
+ "page_idx": 10
1568
+ },
1569
+ {
1570
+ "type": "text",
1571
+ "text": "Our INFO-RAG improves the robustness of RAG performance to retrieval performance. The performance of the retriever greatly affects the performance of LLM in RAG (Chen et al., 2023). We explore this in this section. Specifically, we simulate changes in retrieval performance by varying the ratio of positive and negative passages in the retrieved list and report the RAG performance with different ratios. Table 8 shows INFO-RAG performs better when the ratio is low and the performance is more stable than baseline when the ratio changes from $100\\%$ to $0\\%$ (Max $\\Delta$ ). The model in this experiment is LLaMA-2-13B-chat.",
1572
+ "bbox": [
1573
+ 507,
1574
+ 269,
1575
+ 882,
1576
+ 479
1577
+ ],
1578
+ "page_idx": 10
1579
+ },
1580
+ {
1581
+ "type": "text",
1582
+ "text": "A.2 Robustness to Positive Passage Position",
1583
+ "text_level": 1,
1584
+ "bbox": [
1585
+ 509,
1586
+ 493,
1587
+ 865,
1588
+ 508
1589
+ ],
1590
+ "page_idx": 10
1591
+ },
1592
+ {
1593
+ "type": "text",
1594
+ "text": "Experimental results in Table 9 show that our INFO-RAG consistently outperforms the baseline (LLaMA-2) regardless of where the positive passage (passage contains the correct answers) appears in the retrieved list. Specifically, we mix positive and negative passages in a ratio of 1:9 to simulate the retrieved passage list, vary the position of the positive passage in the retrieved list from 0 to 9, and evaluate the corresponding RAG performance respectively. The model in this experiment is LLaMA-2-13B-chat. Experimental results show that our INFO-RAG not only outperforms the baseline at every position but also achieves more stable performance varying with the position (Max $\\Delta$ ).",
1595
+ "bbox": [
1596
+ 507,
1597
+ 514,
1598
+ 882,
1599
+ 741
1600
+ ],
1601
+ "page_idx": 10
1602
+ },
1603
+ {
1604
+ "type": "text",
1605
+ "text": "A.3 Robustness to Number of Retrieved Passages",
1606
+ "text_level": 1,
1607
+ "bbox": [
1608
+ 509,
1609
+ 753,
1610
+ 836,
1611
+ 785
1612
+ ],
1613
+ "page_idx": 10
1614
+ },
1615
+ {
1616
+ "type": "text",
1617
+ "text": "Experimental results in Table 10 show that our INFO-RAG consistently outperforms the baseline with the different number of retrieved passages (from 1 to 10) and is robust to the change of the number. In this experiment, we use LLaMA-2-13B-chat as the base model, change the number of retrieved passages from 1 to 10, and evaluate the corresponding performance.",
1618
+ "bbox": [
1619
+ 507,
1620
+ 791,
1621
+ 882,
1622
+ 921
1623
+ ],
1624
+ "page_idx": 10
1625
+ },
1626
+ {
1627
+ "type": "table",
1628
+ "img_path": "images/7239483bcd2771891babf9173eb8d58060e7919fc2e15ae5a05cca5883d6446d.jpg",
1629
+ "table_caption": [],
1630
+ "table_footnote": [],
1631
+ "table_body": "<table><tr><td rowspan=\"2\">Data</td><td rowspan=\"2\">Model</td><td colspan=\"6\">ratio of Positive Passages</td><td rowspan=\"2\">Max Δ</td></tr><tr><td>100%</td><td>80%</td><td>60%</td><td>40%</td><td>20%</td><td>0%</td></tr><tr><td rowspan=\"2\">NQ</td><td>LLaMA-2</td><td>88.11</td><td>82.71</td><td>80.81</td><td>77.62</td><td>69.73</td><td>42.35</td><td>-51.94%</td></tr><tr><td>+ INFO-RAG</td><td>90.31</td><td>83.72</td><td>81.72</td><td>79.72</td><td>71.52</td><td>51.04</td><td>-43.48%</td></tr><tr><td rowspan=\"2\">WebQ</td><td>LLaMA-2</td><td>79.41</td><td>75.43</td><td>71.63</td><td>65.53</td><td>63.39</td><td>39.25</td><td>-50.57%</td></tr><tr><td>+ INFO-RAG</td><td>83.66</td><td>76.23</td><td>74.23</td><td>69.05</td><td>65.74</td><td>45.61</td><td>-45.48%</td></tr><tr><td rowspan=\"2\">T-REx</td><td>LLaMA-2</td><td>80.01</td><td>70.05</td><td>71.52</td><td>68.53</td><td>66.23</td><td>42.75</td><td>-46.57%</td></tr><tr><td>+ INFO-RAG</td><td>83.52</td><td>73.22</td><td>74.93</td><td>72.32</td><td>70.12</td><td>46.45</td><td>-44.38%</td></tr><tr><td rowspan=\"2\">ZS</td><td>LLaMA-2</td><td>69.52</td><td>65.48</td><td>63.81</td><td>60.95</td><td>57.14</td><td>28.33</td><td>-59.25%</td></tr><tr><td>+ INFO-RAG</td><td>72.50</td><td>72.62</td><td>67.62</td><td>67.86</td><td>60.48</td><td>36.19</td><td>-50.08%</td></tr></table>",
1632
+ "bbox": [
1633
+ 122,
1634
+ 82,
1635
+ 873,
1636
+ 250
1637
+ ],
1638
+ "page_idx": 11
1639
+ },
1640
+ {
1641
+ "type": "table",
1642
+ "img_path": "images/0f7d5ceead4a8400f3a401529fba3801ce7674cf81e9bf0689477440d3b9255c.jpg",
1643
+ "table_caption": [
1644
+ "Table 8: RAG performance changes with the ratio of positive passages (randomly select 500 samples)."
1645
+ ],
1646
+ "table_footnote": [],
1647
+ "table_body": "<table><tr><td rowspan=\"2\">Datasets</td><td rowspan=\"2\">Method</td><td colspan=\"11\">Position of Positive Passage</td><td>Max Δ</td></tr><tr><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td></td><td></td></tr><tr><td rowspan=\"2\">NQ</td><td>LLaMA-2</td><td>54.94</td><td>48.05</td><td>46.05</td><td>46.45</td><td>46.35</td><td>48.30</td><td>48.35</td><td>47.15</td><td>51.64</td><td>50.44</td><td>-16.18%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>63.23</td><td>58.34</td><td>54.54</td><td>54.44</td><td>53.54</td><td>53.24</td><td>53.84</td><td>54.44</td><td>53.34</td><td>53.34</td><td>-15.80%</td><td></td></tr><tr><td rowspan=\"2\">WebQ</td><td>LLaMA-2</td><td>66.13</td><td>63.21</td><td>62.54</td><td>62.68</td><td>64.01</td><td>62.41</td><td>63.21</td><td>64.54</td><td>63.87</td><td>64.14</td><td>-5.63%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>71.58</td><td>68.39</td><td>66.26</td><td>65.34</td><td>67.19</td><td>65.73</td><td>65.73</td><td>65.81</td><td>65.54</td><td>66.72</td><td>-8.72%</td><td></td></tr><tr><td rowspan=\"2\">T-REx</td><td>LLaMA-2</td><td>64.43</td><td>60.13</td><td>58.34</td><td>60.23</td><td>58.54</td><td>59.14</td><td>59.74</td><td>60.53</td><td>63.53</td><td>63.23</td><td>-9.45%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>70.72</td><td>66.23</td><td>64.93</td><td>65.23</td><td>65.43</td><td>64.83</td><td>66.03</td><td>67.23</td><td>64.63</td><td>66.83</td><td>-8.61%</td><td></td></tr><tr><td rowspan=\"2\">ZS</td><td>LLaMA-2</td><td>63.04</td><td>59.04</td><td>54.59</td><td>55.03</td><td>55.17</td><td>57.15</td><td>56.42</td><td>57.89</td><td>58.04</td><td>59.47</td><td>-13.40%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>66.42</td><td>63.33</td><td>59.04</td><td>60.23</td><td>61.42</td><td>61.66</td><td>60.00</td><td>61.19</td><td>60.23</td><td>62.14</td><td>-11.11%</td><td></td></tr></table>",
1648
+ "bbox": [
1649
+ 119,
1650
+ 291,
1651
+ 878,
1652
+ 460
1653
+ ],
1654
+ "page_idx": 11
1655
+ },
1656
+ {
1657
+ "type": "table",
1658
+ "img_path": "images/de9e0c5c6994e6bc25ee778e0b96090407f55c72b26772b023dd21644cca31b3.jpg",
1659
+ "table_caption": [
1660
+ "Table 9: RAG performance changes with the position of positive passage (randomly select 500 samples)."
1661
+ ],
1662
+ "table_footnote": [],
1663
+ "table_body": "<table><tr><td></td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td></tr><tr><td>Baseline</td><td>51.47</td><td>40.26</td><td>45.05</td><td>41.78</td></tr><tr><td>+ INFO-RAG</td><td>55.67</td><td>43.29</td><td>49.76</td><td>44.02</td></tr></table>",
1664
+ "bbox": [
1665
+ 126,
1666
+ 510,
1667
+ 473,
1668
+ 565
1669
+ ],
1670
+ "page_idx": 11
1671
+ },
1672
+ {
1673
+ "type": "table",
1674
+ "img_path": "images/3e5215a12ce51db09f1b80a49f5a4e22fd7c0deb31b58922cfcb6fcb72b13b91.jpg",
1675
+ "table_caption": [
1676
+ "Table 11: Works based on BM25."
1677
+ ],
1678
+ "table_footnote": [],
1679
+ "table_body": "<table><tr><td>Method</td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td></tr><tr><td>Baseline</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td></tr><tr><td>Simple Mask</td><td>64.05</td><td>58.91</td><td>53.80</td><td>50.55</td></tr><tr><td>Our method</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td></tr></table>",
1680
+ "bbox": [
1681
+ 524,
1682
+ 511,
1683
+ 867,
1684
+ 581
1685
+ ],
1686
+ "page_idx": 11
1687
+ },
1688
+ {
1689
+ "type": "text",
1690
+ "text": "Table 13: Ablation study of masking strategy.",
1691
+ "bbox": [
1692
+ 539,
1693
+ 594,
1694
+ 850,
1695
+ 609
1696
+ ],
1697
+ "page_idx": 11
1698
+ },
1699
+ {
1700
+ "type": "text",
1701
+ "text": "A.4 Ablation Study on Masking Strategy",
1702
+ "text_level": 1,
1703
+ "bbox": [
1704
+ 112,
1705
+ 630,
1706
+ 450,
1707
+ 646
1708
+ ],
1709
+ "page_idx": 11
1710
+ },
1711
+ {
1712
+ "type": "text",
1713
+ "text": "In general, Table 13 and 12 show our masking strategy in Scenario 3 is more effective than simple and straightforward masking. Specifically, our method is more significantly effective in the scenarios that correct answers are randomly replaced with other phrases (replace) and retrieval cannot find any answers (no answer).",
1714
+ "bbox": [
1715
+ 112,
1716
+ 663,
1717
+ 489,
1718
+ 776
1719
+ ],
1720
+ "page_idx": 11
1721
+ },
1722
+ {
1723
+ "type": "text",
1724
+ "text": "A.5 Works with Different Retriever",
1725
+ "text_level": 1,
1726
+ "bbox": [
1727
+ 112,
1728
+ 807,
1729
+ 410,
1730
+ 822
1731
+ ],
1732
+ "page_idx": 11
1733
+ },
1734
+ {
1735
+ "type": "text",
1736
+ "text": "We evaluate our method and baseline (LLaMA2-13B-chat) with BM25 as the retriever, the experimental results shown in Table 11 indicate that our method still performs better than baseline when the retriever as BM25.",
1737
+ "bbox": [
1738
+ 112,
1739
+ 840,
1740
+ 489,
1741
+ 920
1742
+ ],
1743
+ "page_idx": 11
1744
+ },
1745
+ {
1746
+ "type": "text",
1747
+ "text": "A.6 Performance on MMLU",
1748
+ "text_level": 1,
1749
+ "bbox": [
1750
+ 507,
1751
+ 627,
1752
+ 749,
1753
+ 640
1754
+ ],
1755
+ "page_idx": 11
1756
+ },
1757
+ {
1758
+ "type": "text",
1759
+ "text": "Experimental results on MMLU benchmark in the setting without RAG shown in Table 14 show that our INFO-RAG significantly improves the performance of LLMs in RAG, while still maintaining its versatility and avoiding catastrophic forgetting. MMLU is a benchmark that measures massive multitask language understanding ability of LLMs. It covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem-solving ability (Hendrycks et al., 2020). Experiments show that our INFO-RAG performs very close to the original LLaMA-2 on MMLU, which shows that our INFO-RAG does not damage the basic language understanding ability of LLMs. This is mainly because the prefix language model",
1760
+ "bbox": [
1761
+ 507,
1762
+ 646,
1763
+ 884,
1764
+ 921
1765
+ ],
1766
+ "page_idx": 11
1767
+ },
1768
+ {
1769
+ "type": "table",
1770
+ "img_path": "images/0b25ceee50e0437b19c72a17485410a26c0d09423e313bfe7102299781fdf5bb.jpg",
1771
+ "table_caption": [],
1772
+ "table_footnote": [],
1773
+ "table_body": "<table><tr><td rowspan=\"2\">Datasets</td><td rowspan=\"2\">Method</td><td colspan=\"11\">Number of Retrieved Passages</td><td>Max Δ</td></tr><tr><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td><td></td><td></td></tr><tr><td rowspan=\"2\">NQ</td><td>LLaMA-2</td><td>38.80</td><td>43.21</td><td>46.62</td><td>47.84</td><td>48.61</td><td>49.42</td><td>52.03</td><td>50.23</td><td>50.40</td><td>50.20</td><td>-25.43%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>45.18</td><td>46.80</td><td>51.44</td><td>51.23</td><td>51.00</td><td>53.21</td><td>54.03</td><td>53.44</td><td>53.82</td><td>54.60</td><td>-17.25%</td><td></td></tr><tr><td rowspan=\"2\">WebQ</td><td>LLaMA-2</td><td>40.22</td><td>43.63</td><td>48.20</td><td>46.61</td><td>48.32</td><td>49.11</td><td>49.40</td><td>50.22</td><td>51.65</td><td>50.43</td><td>-22.13%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>50.21</td><td>53.84</td><td>54.41</td><td>55.07</td><td>55.25</td><td>55.27</td><td>57.00</td><td>55.45</td><td>56.62</td><td>56.03</td><td>-11.91%</td><td></td></tr><tr><td rowspan=\"2\">T-REx</td><td>LLaMA-2</td><td>66.20</td><td>63.45</td><td>67.22</td><td>64.45</td><td>64.43</td><td>65.40</td><td>64.41</td><td>65.22</td><td>63.22</td><td>65.01</td><td>-5.95%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>66.25</td><td>66.03</td><td>66.31</td><td>65.80</td><td>67.23</td><td>67.22</td><td>66.65</td><td>67.83</td><td>67.03</td><td>67.40</td><td>-2.99%</td><td></td></tr><tr><td rowspan=\"2\">ZS</td><td>LLaMA-2</td><td>49.25</td><td>50.01</td><td>52.38</td><td>54.09</td><td>56.12</td><td>56.20</td><td>56.13</td><td>56.05</td><td>55.95</td><td>56.11</td><td>-12.37%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>53.17</td><td>54.08</td><td>56.35</td><td>58.01</td><td>59.45</td><td>59.12</td><td>59.40</td><td>58.55</td><td>60.03</td><td>59.08</td><td>-11.43%</td><td></td></tr></table>",
1774
+ "bbox": [
1775
+ 119,
1776
+ 80,
1777
+ 884,
1778
+ 252
1779
+ ],
1780
+ "page_idx": 12
1781
+ },
1782
+ {
1783
+ "type": "table",
1784
+ "img_path": "images/fd8725babf689e60c28c5ed5ea2b8c5dfccc4c280368d4337acd98d75942c75a.jpg",
1785
+ "table_caption": [
1786
+ "Table 10: RAG performance changes with the number of retrieved passages (randomly select 500 samples)."
1787
+ ],
1788
+ "table_footnote": [],
1789
+ "table_body": "<table><tr><td></td><td colspan=\"3\">T-REx</td><td colspan=\"3\">ZS</td><td colspan=\"3\">NQ</td><td colspan=\"3\">WebQ</td></tr><tr><td></td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td></tr><tr><td>Baseline</td><td>75.96</td><td>43.79</td><td>5.59</td><td>67.03</td><td>16.58</td><td>1.42</td><td>69.37</td><td>30.72</td><td>6.16</td><td>65.07</td><td>31.88</td><td>5.47</td></tr><tr><td>Simple Mask</td><td>78.43</td><td>44.05</td><td>5.75</td><td>70.30</td><td>19.45</td><td>1.96</td><td>73.59</td><td>31.05</td><td>6.51</td><td>70.55</td><td>32.96</td><td>6.83</td></tr><tr><td>Our method</td><td>79.25</td><td>48.59</td><td>6.67</td><td>70.26</td><td>25.02</td><td>3.87</td><td>73.73</td><td>33.85</td><td>8.39</td><td>70.59</td><td>37.48</td><td>11.25</td></tr></table>",
1790
+ "bbox": [
1791
+ 136,
1792
+ 291,
1793
+ 862,
1794
+ 372
1795
+ ],
1796
+ "page_idx": 12
1797
+ },
1798
+ {
1799
+ "type": "text",
1800
+ "text": "ing training paradigm of our method is consistent with the pre-training task of LLMs. The difference is that in the training of prefix language modeling, our method learns to perform information refinement that utilizes the retrieved texts for the next token prediction.",
1801
+ "bbox": [
1802
+ 112,
1803
+ 467,
1804
+ 489,
1805
+ 563
1806
+ ],
1807
+ "page_idx": 12
1808
+ },
1809
+ {
1810
+ "type": "table",
1811
+ "img_path": "images/7169d58dd058c5ae35f82d63f9d3e7c7f08dcc1b7a2e127366682efcb2473278.jpg",
1812
+ "table_caption": [
1813
+ "Table 12: Ablation study of masking strategy on three scenarios. \"has-ans.\" is the first scenario that correct answers are in retrieved texts. \"replace\" is the second scenario that correct answers are randomly replaced with other phrases to simulate the incorrect and incomplete knowledge. \"no-ans.\" is the third scenario that retrieval cannot find any answers."
1814
+ ],
1815
+ "table_footnote": [],
1816
+ "table_body": "<table><tr><td></td><td>Humanities</td><td>STEM</td><td>Social-Sciences</td><td>Other</td><td>Average</td></tr><tr><td>LLaMA-2-7B w/o RAG</td><td>42.9</td><td>36.4</td><td>51.2</td><td>52.2</td><td>45.3</td></tr><tr><td>+ INFO-RAG w/o RAG</td><td>42.8</td><td>36.1</td><td>50.8</td><td>52.0</td><td>45.0</td></tr><tr><td>LLaMA-2-13B w/o RAG</td><td>52.8</td><td>44.1</td><td>62.6</td><td>61.1</td><td>54.8</td></tr><tr><td>+ INFO-RAG w/o RAG</td><td>52.5</td><td>43.7</td><td>62.1</td><td>60.9</td><td>54.3</td></tr></table>",
1817
+ "bbox": [
1818
+ 117,
1819
+ 576,
1820
+ 500,
1821
+ 642
1822
+ ],
1823
+ "page_idx": 12
1824
+ },
1825
+ {
1826
+ "type": "text",
1827
+ "text": "Table 14: Performance on MMLU in the setting without retrieval-augmented generation.",
1828
+ "bbox": [
1829
+ 112,
1830
+ 652,
1831
+ 487,
1832
+ 682
1833
+ ],
1834
+ "page_idx": 12
1835
+ }
1836
+ ]
2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18150/47bd3316-0d2d-47e9-a05e-e8b0d9228014_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b727ab1c7f0801fa46cddd755bb9fbd975a7b62fb663ec774ffdfb30ae53560
3
+ size 560206
2402.18xxx/2402.18150/full.md ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Unsupervised Information Refinement Training of Large Language Models for Retrieval-Augmented Generation
2
+
3
+ Shicheng Xu $^{1,2*}$ Liang Pang $^{1\dagger}$ Mo Yu $^{3\dagger}$ Fandong Meng $^{3}$ Huawei Shen $^{1}$ Xueqi Cheng $^{1}$ Jie Zhou $^{3}$
4
+
5
+ $^{1}$ CAS Key Laboratory of AI Safety, Institute of Computing Technology, CAS $^{2}$ University of Chinese Academy of Sciences $^{3}$ Pattern Recognition Center, WeChat AI
6
+
7
+ {xushicheng21s,pangliang,shenhuawei,cxq}@ict.ac.cn
8
+
9
+ moyumyu@global.tencent.com {fandongmeng,withtomzhou}@tencent.com
10
+
11
+ # Abstract
12
+
13
+ Retrieval-augmented generation (RAG) enhances large language models (LLMs) by incorporating additional information from retrieval. However, studies have shown that LLMs still face challenges in effectively using the retrieved information, even ignoring it or being misled by it. The key reason is that the training of LLMs does not clearly make LLMs learn how to utilize input retrieved texts with varied quality. In this paper, we propose a novel perspective that considers the role of LLMs in RAG as "Information Refiner", which means that regardless of correctness, completeness, or usefulness of retrieved texts, LLMs can consistently integrate knowledge within the retrieved texts and model parameters to generate the texts that are more concise, accurate, and complete than the retrieved texts. To this end, we propose an information refinement training method named INFO-RAG that optimizes LLMs for RAG in an unsupervised manner. INFO-RAG is low-cost and general across various tasks. Extensive experiments on zero-shot prediction of 11 datasets in diverse tasks including Question Answering, Slot-Filling, Language Modeling, Dialogue, and Code Generation show that INFO-RAG improves the performance of LLaMA2 by an average of $9.39\%$ relative points. INFO-RAG also shows advantages in in-context learning and robustness of RAG.
14
+
15
+ # 1 Introduction
16
+
17
+ Retrieval-augmented generation (RAG) is a popular framework in modern NLP systems that equips neural with retrieved information for text generation like open-domain question answering, dialogue (Lewis et al., 2020; Guu et al., 2020) etc. Recently, RAG has been applied to large language models (LLMs) to provide additional knowledge
18
+
19
+ ![](images/6f62f172aacff9a1c2d1f982b7395b0f00fb794514c65747538065b9076b4f48.jpg)
20
+ nuee eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae eae
21
+ Figure 1: We consider the role of LLMs in RAG as "Information Refiner" that can generate more concise, accurate, and complete texts than the input retrieved texts. In this way, LLM can consistently make RAG system produce positive information gain.
22
+
23
+ and mitigate issues such as hallucination (Peng et al., 2023; Shi et al., 2023; Ren et al., 2023).
24
+
25
+ Despite the improved performance of retrieval models, the internet continues to be inundated with fake news, rumors, and fragmented, noisy information, posing challenges for retrieval models to reliably identify and shield against such content (Sun et al., 2022; Thakur et al., 2021). Consequently, not all retrieved texts are beneficial, necessitating that LLMs determine how to judiciously utilize them. However, pre-training tasks do not explicitly enable LLMs to learn how to utilize the retrieved texts with varied quality for generation. For a question and its retrieved texts as input sequence, RAG aims to minimize the negative log-likelihood (NLL) of sub-sequence (question and generated answer) by referring to the retrieved texts. However, mainstream pre-training for LLMs with decoder-only architecture is language modeling based on the prefix (Radford et al., 2018; Touvron et al., 2023a), the training objective aims to minimize the negative log-likelihood (NLL) of the entire input sequence (retrieved texts, question, and generated answer) (Mikolov et al., 2012). This gap causes
26
+
27
+ LLMs to only regard the input retrieved texts as a part of the prefix for language modeling rather than additional reference, which leads to the following problems. Firstly, for the long and complex retrieved texts, LLMs struggle to extract the correct answers (Deng et al., 2023) accurately. Secondly, in situations where the retrieved texts cannot address the task, LLMs lack the capability to integrate the knowledge within model parameters with the retrieved texts to generate improved texts. Thirdly, LLMs are susceptible to incorrect and noisy information in retrieved texts, posing a risk of being misled (Chen et al., 2023; Yoran et al., 2023).
28
+
29
+ To solve above problems, some previous methods explore strategies for how or when to perform retrieval for LLMs by prompt techniques (Press et al., 2023; Khattab et al., 2022; Xu et al., 2023; Asai et al., 2023). However, prompt cannot materially change the ability of LLMs to utilize retrieved texts because model parameters are not updated for this ability. Some methods fine-tune LLMs on the constructed RAG data for a specific task such as QA (Yoran et al., 2023; Yu et al., 2023). However, under the trend that LLMs are regarded as foundation models for various tasks in zero-shot setting, fine-tuning LLMs only on a few tasks make LLMs limited to the RAG of training tasks and lose their generalizability. Because catastrophic forgetting still exists in supervised fine-tuning of LLMs (Luo et al., 2023). Although constructing data for a large number of tasks can alleviate this, it is hard to design the data in various RAG tasks and requires high data annotation costs. Our paper aims to fundamentally improve the ability of LLMs to utilize retrieved texts while preserving the generalizability of LLMs for various RAG tasks in zero-shot setting, which is orthogonal to prompt techniques and can be combined with them to get better performance.
30
+
31
+ In this paper, considering that LLMs have a certain ability to use their own knowledge to examine information (Dhuliawala et al., 2023), we introduce a novel perspective to reassess the role of LLMs in RAG. Specifically, we propose considering LLMs as "Information Refiner". The key idea behind this is to continue training the pre-trained LLMs with an Information Refinement objective that regardless of the correctness, completeness, or usefulness of the input retrieved texts, LLMs can consistently integrate knowledge within the retrieved texts and model parameters to generate the texts that are more concise, accurate, and complete than the retrieved texts (Figure 1). We term this
32
+
33
+ process "Positive Information Gain". This enables LLMs to extract correct information from complex texts as well as resist and rectify retrieved erroneous information and noise, thereby improving the information bottleneck of the RAG and allowing the knowledge capacity of RAG to approximate the combined knowledge of IR and LLMs.
34
+
35
+ We make the information refinement training work in a completely unsupervised manner, such that it is easy to obtain large-scale training data and maintain the generalizability of the trained LLMs that can be used in various RAG tasks in zero-shot setting. Specifically, we propose an unsupervised training method named INFO-RAG. INFO-RAG classifies the retrieved texts into three scenarios (shown in Figure 1) and proposes the unsupervised training task for each scenario. For the first scenario that all knowledge for the question is already in the retrieved texts, LLMs need to accurately extract relevant knowledge from complex retrieved texts and generate more concise texts. For the second scenario that retrieved texts are incomplete or incorrect for the question, LLMs need to combine the knowledge within model parameters to verify the retrieved texts, correct the wrong knowledge, and complete the missing knowledge. For the third scenario that retrieved texts are relevant but do not have any answer, LLMs need to find the knowledge within model parameters based on relevant context to generate correct answers. We mix the above three tasks to train INFO-RAG unsupervisedly.
36
+
37
+ Main contributions of this paper are as follows: (1) We introduce a novel perspective to reassess the role of LLMs in the RAG system that considers LLMs as "Information Refiner" that can produce positive information gain in RAG scenarios.
38
+ (2) We propose an unsupervised training method named INFO-RAG that enables LLMs to perform information refinement in RAG. INFO-RAG is low-cost and general for various RAG tasks.
39
+ (3) Extensive experiments show INFO-RAG enhances the zero-shot RAG of LLaMA2 across Question Answering, Slot-Filling, Language Modeling, Dialog, and Code Generation. INFO-RAG also shows advantages in in-context learning and robustness of RAG. Code is released at https://github.com/xsc1234/INFO-RAG/.
40
+
41
+ # 2 Related Work
42
+
43
+ Retrieval Augmented Generation Retrieval augmented generation (RAG) aims to provide addi
44
+
45
+ tional knowledge for language models by retrieving information from external databases (Lewis et al., 2020; Guu et al., 2020; Borgeaud et al., 2022; Izacard et al., 2022). RAG makes the text generated by LLM more accurate and credible, and is widely used in Open-domain QA (Karpukhin et al., 2020; Trivedi et al., 2022a), dialogue (Cai et al., 2018, 2019) and Code Generation (Parvez et al., 2021). Recently, RAG has also been widely applied in LLMs (Peng et al., 2023; Shi et al., 2023; Ren et al., 2023). The form of RAG in LLMs is using the retrieved texts as contexts (Ram et al., 2023).
46
+
47
+ Some studies have noted that noise in retrieved texts will interfere with the performance of the language model or even mislead it (Xu et al., 2023; Wang et al., 2023; Chen et al., 2023; Xu et al., 2024). These works try to solve this problem from the interactive framework between IR and LM, while our work points out a more essential view. That is, previous studies on RAG do not define the role of LLMs in RAG clearly. Our paper introduces a novel perspective to reassess the role of LLMs in RAG that considers LLMs as "Information Refiner".
48
+
49
+ Unsupervised Learning of RAG Unsupervised learning of RAG can be divided into the training of retrievers and language models. As for retrievers, REALM (Guu et al., 2020) proposes using masked language modeling to pre-train a knowledge retriever. REPLUG (Shi et al., 2023) trains the retriever according to the feedback from black-box LM. As for language models, RETRO (Borgeaud et al., 2022) improves language models by retrieving tokens. Atlas proposes pretext tasks to jointly train the retriever and language model. However, these two methods focus on the model of encoder-decoder architecture, which is inconsistent with the current mainstream LLMs based on decoder-only.
50
+
51
+ Previous unsupervised training methods do not consider the specific role that language models should play in RAG. In this paper, we focus on training language model as an "Information Refiner" that can further improve the information bottleneck of RAG and be robust to retrieved texts.
52
+
53
+ # 3 Our INFO-RAG
54
+
55
+ This section introduces our INFO-RAG, an unsupervised training method to enable LLMs to perform information refinement in RAG. Firstly, we summarize the retrieved texts in RAG into three scenarios and define the positive information gain
56
+
57
+ for each scenario. Secondly, we construct sample pairs in which the output has information gain compared to the input for these three scenarios and design three training tasks. Thirdly, we train LLMs under our designed tasks on the unsupervised samples. Unsupervised training makes INFO-RAG low-cost and general for RAG in various tasks.
58
+
59
+ # 3.1 Positive Information Gain in RAG
60
+
61
+ In this paper, we introduce a novel perspective to reassess the role of LLMs in RAG that LLMs should be the "Information Refiner" that can produce "Positive Information Gain" in the information flow of RAG. This section details the scenarios of retrieved texts and defines specific information gain LLMs should produce in each scenario.
62
+
63
+ Scenario 1. The first scenario is that all knowledge for the question is already in the retrieved texts. Even if the correct knowledge already exists in the retrieved texts, complex and lengthy retrieved texts are not conducive for users to directly obtain the knowledge. Therefore, the positive information gain in this scenario means that LLMs extract correct knowledge as much as possible while removing irrelevant information, thereby generating more direct and concise texts for users.
64
+
65
+ Scenario 2. The second scenario is that although the retrieved texts contain some usable knowledge, they still contain some incomplete or incorrect knowledge. This scenario is very common, especially with the current proliferation of fake news, misinformation, and fragmented knowledge on the Internet. There has been study proving that noise and erroneous knowledge in retrieved texts greatly mislead the generation of LLMs (Xu et al., 2023). The positive information gain in this scenario is that LLMs can exploit the knowledge within their parameters to verify the knowledge in the retrieved texts. Utilize accurate knowledge, rectify incorrect knowledge, and complete missing knowledge
66
+
67
+ Scenario 3. The third scenario is that the retrieved texts do not have any answer that can be used to solve the question. This scenario means that the question is very difficult or the target knowledge is very long-tail for information retrieval systems. Even in this case, the retrieval model's ability to model semantics allows it to provide texts that are semantically related to the question (Karpukhin et al., 2020). Therefore, the positive information gain in this scenario is that LLMs can stimulate the knowledge within their parameters based on semantically relevant context to solve the question.
68
+
69
+ ![](images/179a27e5d46bb133682c03cd9a5e1c035af9d24e41253d3eb5b4d7279d96937d.jpg)
70
+ Figure 2: Overview of our INFO-RAG. Each sample is only processed for a single scenario to avoid data leakage.
71
+
72
+ ![](images/c111599f5429af9a9f4678a128b14ce4afd9e9079f1486644b4f42367d017038.jpg)
73
+
74
+ # 3.2 Unsupervised Learning
75
+
76
+ This section introduces unsupervised learning in INFO-RAG. We construct the input-output pairs that satisfy the information gain in the above three scenarios on Wikipedia. We continue to train pretrained LLMs on the constructed data to perform information refinement in the form of next token prediction in prefix language modeling, which is general for various tasks. Pipeline is in Figure 2.
77
+
78
+ # 3.2.1 Data Collection
79
+
80
+ The data construction is performed on English Wikipedia. Specifically, for each document $d$ in Wikipedia, we intercept $k$ consecutive sentences from $d$ and get the sentence set $S = [s_1, s_2, \dots, s_k]$ . Our method randomly selects $s_l$ from $S$ and uses it as the object for language modeling. The first $\frac{1}{3}$ to $\frac{2}{3}$ of the tokens of $s_l$ are randomly intercepted as the prefix $(s_l^p)$ and the other tokens of $s_l$ are used as the prediction target $(s_l^t)$ . We also perform the process (Section 3.2.2) on sentence set $S$ so that it can be used to simulate the retrieved texts $\mathcal{R}(s_l^p)$ for prefix $s_l^p$ in three scenarios for conditioning the generation of $s_l^t$ . Then, we can get an unsupervised training sample for prefix language modeling that predicts $s_l^t$ given the prefix $s_l^p$ and the retrieved texts $\mathcal{R}(s_l^p)$ . This can be formulated as:
81
+
82
+ $$
83
+ p (s _ {l} ^ {t}) = p _ {\theta} ([ \mathcal {R} (s _ {l} ^ {p}); s _ {l} ^ {p} ]), \qquad (1)
84
+ $$
85
+
86
+ $\theta$ are parameters of LLMs, $[\mathcal{R}(s_l^p);s_l^p ]$ is the concatenation of $\mathcal{R}(s_l^p)$ and $s_l^p$ by a special token.
87
+
88
+ # 3.2.2 Data Construction and Training Tasks
89
+
90
+ This section details our data construction and training tasks for three scenarios in Section 3.1.
91
+
92
+ For Scenario 1 that needs LLMs to extract the correct knowledge from the complex texts, we
93
+
94
+ propose the training task named Select and Copy. Specifically, given the sentence set $S$ for a sample, Select and Copy directly uses all sentences in $S$ as retrieved texts for conditioning LLMs to predict $s_l^t$ for the given prefix $s_l^p$ . This can be formulated as:
95
+
96
+ $$
97
+ p \left(s _ {l} ^ {t}\right) = p _ {\theta} ([ S; s _ {l} ^ {p} ]). \tag {2}
98
+ $$
99
+
100
+ In Select and Copy, $s_l$ (both $s_l^p$ and $s_l^t$ ) has been contained in the retrieved texts $S$ , this needs LLMs to select the texts matching the prefix $s_l^p$ from the complex retrieved texts $S$ and directly copy the target $s_l^t$ for generation. The information gain between $s_l^t$ and input retrieved texts $S$ is that $s_l^t$ is more concise to be used as the postfix for $s_l^p$ .
101
+
102
+ For Scenario 2 that needs LLMs to verify the knowledge in the retrieved texts, utilize accurate knowledge, rectify incorrect knowledge, and complete missing knowledge. We propose the training task named Correct and Complete. Given a sentence set $S$ , firstly, this task uses the stability of word distribution between layers to get informative tokens. The intention for this is that the more unstable the word distribution of the token among the topmost layers is, the more it indicates that the token is an informative token. We follow (Chuang et al., 2023) to achieve this. Specifically, for each sentence $s_i$ in $S$ , our method obtains the next word distribution of the $a$ -th token $s_i^{[a]}$ given prefix $s_i^{<a}$ of $s_i$ in each layer of LLM as:
103
+
104
+ $$
105
+ d _ {j} \left(s _ {i} ^ {[ a ]} \mid s _ {i} ^ {< a}\right) = \operatorname {s o f t m a x} \left(\mathbf {W H} _ {j} ^ {[ a ]}\right), \tag {3}
106
+ $$
107
+
108
+ in which $j$ indicates the $j$ -th layer of LLMs, $\mathbf{H}_j^{[a]} \in \mathbb{R}^h$ is the hidden states for token $s_i^{[a]}$ in the $j$ -th layer, $\mathbf{W} \in \mathbb{R}^{h \times v}$ is the vocabulary head that maps the hidden states $\mathbf{H}_j^{[a]}$ to the word distribution with
109
+
110
+ vocabulary size $v$ . Then, for the LLM with $N$ layers, our method uses Jensen-Shannon Divergence (JSD) to measure the differences in word distribution between layers and gets the word distribution stability of token $s_{i}^{[a]}$ as:
111
+
112
+ $$
113
+ O _ {i} ^ {[ a ]} = \max _ {j \in J} \mathrm {J S D} (d _ {N} (s _ {i} ^ {[ a ]} | s _ {i} ^ {< a}) | | d _ {j} (s _ {i} ^ {[ a ]} | s _ {i} ^ {< a})),
114
+ $$
115
+
116
+ in which $J$ is the set of candidate layers (0-th to $\frac{N}{2}$ -th layers), $d_{N}(s_{i}^{[a]}|s_{i}^{< a})$ is the word distribution of the last layer. This design aims to find the layer with the largest word distribution difference between the last layer and use the JSD of the two as the word distribution stability of the token $s_{i}^{[a]}$ (Chuang et al., 2023). For each token of $s_{i}$ , we obtain its word distribution stability in parallel and get the set of word distribution stability for $s_{i}$ as:
117
+
118
+ $$
119
+ \mathbb {O} _ {i} = \left\{O _ {i} ^ {[ 0 ]}, O _ {i} ^ {[ 1 ]}, \dots , O _ {i} ^ {[ n ]} \right\}. \tag {4}
120
+ $$
121
+
122
+ We choose the tokens corresponding to the top $50\%$ of the elements in $\mathbb{O}_i$ as informative tokens within the sentence $s_i$ . Subsequently, we apply a specific percentage $(30\%)$ of random masking and replacement to these tokens. For the randomly selected token, we replace it with [MASK] with a $50\%$ probability to simulate the incomplete knowledge, and randomly replace it with another token with a $40\%$ probability to simulate the incorrect knowledge, while keeping it unchanged with a $10\%$ probability to simulate the correct knowledge. We do the above pipeline for each sentence in the set $S$ and get the processed set $S'$ . RAG in Correct and Complete can be formulated as:
123
+
124
+ $$
125
+ p \left(s _ {l} ^ {t}\right) = p _ {\theta} \left(\left[ S ^ {\prime}; s _ {l} ^ {p} \right]\right). \tag {5}
126
+ $$
127
+
128
+ In Correct and Complete, the broken $s_l$ with noise is already in $S'$ . The information gain in this task requires LLM to extract, correct, and complete the knowledge in $s_l$ from $S'$ to generate $s_l^t$ .
129
+
130
+ For Scenario 3 that needs LLMs to find answers from their knowledge based on relevant texts in context. We propose the training task named Contextual Stimulation. Contextual Stimulation eliminates $s_l$ (both $s_l^p$ and $s_l^t$ ) from the set $S$ and uses the remaining sentences as retrieved tests for generation, which can be formulated as:
131
+
132
+ $$
133
+ p (s _ {l} ^ {t}) = p _ {\theta} ([ S - \{s _ {l} \}; s _ {l} ^ {p} ]). \qquad (6)
134
+ $$
135
+
136
+ In Contextual Stimulation, each sentence in retrieved texts $S - \{s_l\}$ is semantically relevant to $s_l^p$ but cannot help LLMs to directly generate $s_l^t$ . LLMs need to be stimulated by relevant information to generate $s_l^t$ based on their own knowledge.
137
+
138
+ # 3.2.3 Training Strategy
139
+
140
+ After the data construction for three training tasks, we mix them for multi-task training. Specifically, we use LoRA (Hu et al., 2021) to train the pretrained LLMs on the mixed dataset of three tasks. Three tasks are trained alternately in batches. Since Select and Copy is relatively simple for LLMs, it only accounts for $20\%$ of the batches, while Correct and Complete and Contextual Stimulation each account for $40\%$ of the batches. Using LoRA not only reduces training costs but also makes our method plug-and-play. The trained LoRA parameters are loaded when LLMs need to perform RAG and unloaded when RAG is not needed.
141
+
142
+ # 4 Experiments
143
+
144
+ # 4.1 Datasets and Evaluation Metrics
145
+
146
+ To demonstrate the generality of our unsupervised training method, we evaluate the performance of INFO-RAG on eleven datasets across seven tasks.
147
+
148
+ Open-domain Question Answering Open-domain QA is a typical knowledge-intensive task that can directly evaluate the knowledge of LLMs. We use Natural Questions (Kwiatkowski et al., 2019) (NQ) and WebQuestions (Berant et al., 2013) (WebQ) as the datasets. We use cover Exact Match (EM) to determine whether the ground truth exactly appears in the output and the accuracy is used as the evaluation metric, following (Schick et al., 2023)
149
+
150
+ Soft Filling Soft filling requires LLMs to output the object entities for the input subject entity and relation. We use two knowledge-intensive datasets including Zero Shot RE (Levy et al., 2017) (ZS) and T-REx (Elsahar et al., 2018). We use the same evaluation metric as Open-domain QA.
151
+
152
+ Long-Form Question Answering Compared with open-domain QA, LFQA is the QA task whose ground truth answer is a relatively long text. We use ELI5 (Fan et al., 2019), a knowledge-intensive dataset for LFQA. We use ROUGE-L as the evaluation metric (Petroni et al., 2020).
153
+
154
+ Dialogue Dialogue in our experiment focuses on the factual knowledge. We use Wizard of Wikipedia (Dinan et al., 2018) (WoW), a knowledge-powered dialogue dataset whose conversation is grounded with knowledge. We use F1 as the evaluation metric (Petroni et al., 2020).
155
+
156
+ Language Modeling We use WikiText-103 (M erity, 2016), a popular dataset for language modeling. We use ROUGE-L as the evaluation metric.
157
+
158
+ <table><tr><td rowspan="2"></td><td colspan="2">Soft-Filling Accuracy</td><td colspan="2">ODQA Accuracy</td><td colspan="2">Multi-Hop QA Accuracy</td><td rowspan="2">LFQA ROUGE EII5</td><td rowspan="2">Dialog F1 Wow</td><td rowspan="2">LM ROUGE WikiText</td><td rowspan="2" colspan="2">Code Gen CodeBLEU Python Java</td><td rowspan="2">Overall</td></tr><tr><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td><td>Hotpot</td><td>Musique</td></tr><tr><td>LLaMA-2-7B</td><td>55.60</td><td>54.08</td><td>46.82</td><td>43.52</td><td>39.40</td><td>25.95</td><td>15.18</td><td>7.85</td><td>60.77</td><td>21.44</td><td>22.99</td><td>35.78</td></tr><tr><td>+ INFO-RAG</td><td>65.91</td><td>57.01</td><td>45.74</td><td>44.68</td><td>46.56</td><td>30.19</td><td>17.18</td><td>9.09</td><td>62.91</td><td>26.75</td><td>32.06</td><td>39.83</td></tr><tr><td>LLaMA-2-7B-chat</td><td>60.63</td><td>55.03</td><td>49.42</td><td>46.72</td><td>50.03</td><td>42.69</td><td>27.81</td><td>10.21</td><td>60.26</td><td>22.46</td><td>23.90</td><td>40.83</td></tr><tr><td>+ INFO-RAG</td><td>65.77</td><td>58.32</td><td>53.93</td><td>49.13</td><td>52.01</td><td>44.45</td><td>28.15</td><td>10.49</td><td>63.24</td><td>27.25</td><td>28.79</td><td>43.78</td></tr><tr><td>LLaMA-2-13B</td><td>60.08</td><td>50.77</td><td>47.40</td><td>44.62</td><td>42.12</td><td>25.78</td><td>14.80</td><td>7.04</td><td>62.20</td><td>21.52</td><td>29.16</td><td>36.86</td></tr><tr><td>+ INFO-RAG</td><td>62.80</td><td>55.63</td><td>47.82</td><td>45.42</td><td>51.48</td><td>35.02</td><td>17.48</td><td>7.20</td><td>64.14</td><td>29.00</td><td>35.50</td><td>41.04</td></tr><tr><td>LLaMA-2-13B-chat</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td><td>61.23</td><td>47.06</td><td>27.07</td><td>11.19</td><td>60.52</td><td>22.34</td><td>30.96</td><td>43.23</td></tr><tr><td>+ INFO-RAG</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td><td>61.91</td><td>47.93</td><td>27.24</td><td>11.38</td><td>63.92</td><td>31.98</td><td>38.12</td><td>46.55</td></tr></table>
159
+
160
+ Table 1: Overall performance on retrieval-augmented generation on 11 datasets across 7 tasks in zero-shot setting.
161
+
162
+ Multi-Hop Question Answering Multi-hop QA measures the ability of LLMs to perform combined reasoning on multiple knowledge. We use HotpotQA (Yang et al., 2018) and Musique (Trivedi et al., 2022b) for this task. We use the same evaluation metric as Open-domain QA.
163
+
164
+ Code Generation Code generation aims to generate the code for the given natural language. We use Java and Python in CodeXGLUE (Iyer et al., 2018) for this task. We use CodeBLEU (Ren et al., 2020) as the evaluation metric.
165
+
166
+ # 4.2 Experimental Settings
167
+
168
+ LLMs in our paper include LLaMA-2-7B, 13B and their chat version (Touvron et al., 2023b). We use LoRA to fine-tune these pre-trained LLMs on four A100 GPUs with the learning rate of 1e-5, per-gpu batch size of 4 (for 7B) and 2 (for 13B) for 5K steps. As for the training data, we intercept 15 consecutive sentences for each example.
169
+
170
+ As for the retrieval model and retrieval database, for Open-domain QA, Soft Filling and Language Modeling, we use ColBERTv2 (Santhanam et al., 2022), a late-interaction model with excellent generalization ability as the retriever, and use Wikipedia consisting of 21,015,324 passages (Karpukhin et al., 2020) as retrieval database. For Code Generation, we SCODE-R (Parvez et al., 2021) as code retriever and use deduplicated source codes in CodeSearchNET (Husain et al., 2019) as retrieval database. For all the above tasks, we give Top-5 retrieved passages to each example. For LFQA, Dialog, and Multi-Hop QA, we use the list of contextual passages provided in the datasets as the retrieved list (distractor setting). In each experiment, all baselines and our method share the same retrieved documents.
171
+
172
+ # 4.3 Experimental Results
173
+
174
+ Main Results (Zero-Shot Setting) Experimental results in Table 1 show the improvement (the average is $9.39\%$ ) of our method on the utilization of retrieved knowledge from four aspects.
175
+
176
+ (1) Short and Direct Knowledge. Our method can significantly improve the RAG performance of LLaMA on ODQA and Slot-Filling tasks. The answer in ODQA and Slot-Filling is short and direct, it can directly reflect the ability of LLMs to utilize the knowledge in retrieved texts.
177
+ (2) Reasoning on Multiple Knowledge. Our INFO-RAG has advantages in cross-passage reasoning on multiple knowledge of retrieval lists. Questions in both HotpotQA and Musique are complex and need multiple knowledge from different passages. These questions not only require LLMs to extract correct knowledge from the retrieved passage list but also to combine the knowledge of different passages in the list for reasoning to give the final answer.
178
+ (3) Long and Complex Knowledge. Our INFO-RAG can improve the RAG performance of LLaMA on LFQA, Dialogue and Language Modeling. These tasks require LLaMA to output long and complex texts grounded with intensive knowledge.
179
+ (4) Code Knowledge. Our INFO-RAG can also improve the RAG performance of LLaMA on Code Generation. This further demonstrates the cros-task generality of INFO-RAG. Our method is only trained on natural language but can also show advantages in programming language tasks, which demonstrates that INFO-RAG successfully enables LLMs to learn how to exploit the retrieved information rather than just fitting the data. Unsupervised and prefix language modeling training paradigms make INFO-RAG general in various tasks.
180
+
181
+ <table><tr><td></td><td colspan="3">T-REx</td><td colspan="3">ZS</td><td colspan="3">NQ</td><td colspan="3">WebQ</td></tr><tr><td></td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td></tr><tr><td>LLaMA-2-7B</td><td>67.19</td><td>38.37</td><td>6.49</td><td>64.41</td><td>12.78</td><td>2.44</td><td>65.54</td><td>16.91</td><td>3.41</td><td>60.64</td><td>25.68</td><td>7.90</td></tr><tr><td>+ INFO-RAG</td><td>79.80</td><td>41.79</td><td>7.04</td><td>68.10</td><td>13.55</td><td>3.26</td><td>64.43</td><td>22.68</td><td>4.70</td><td>62.70</td><td>26.48</td><td>8.96</td></tr><tr><td>LLaMA-2-7B-chat</td><td>73.79</td><td>40.56</td><td>4.87</td><td>66.71</td><td>14.19</td><td>1.63</td><td>68.72</td><td>20.81</td><td>4.50</td><td>66.86</td><td>28.63</td><td>5.62</td></tr><tr><td>+ INFO-RAG</td><td>80.01</td><td>42.92</td><td>5.42</td><td>69.64</td><td>15.02</td><td>2.65</td><td>70.99</td><td>23.14</td><td>5.62</td><td>68.73</td><td>29.74</td><td>9.12</td></tr><tr><td>LLaMA-2-13B</td><td>72.26</td><td>39.47</td><td>7.76</td><td>60.14</td><td>19.71</td><td>4.69</td><td>65.94</td><td>18.45</td><td>4.42</td><td>62.09</td><td>26.63</td><td>9.27</td></tr><tr><td>+ INFO-RAG</td><td>75.80</td><td>44.08</td><td>8.48</td><td>65.94</td><td>23.21</td><td>4.90</td><td>64.98</td><td>27.60</td><td>8.02</td><td>63.51</td><td>28.24</td><td>9.88</td></tr><tr><td>LLaMA-2-13B-chat</td><td>75.96</td><td>43.79</td><td>5.59</td><td>67.03</td><td>16.58</td><td>1.42</td><td>69.37</td><td>30.72</td><td>6.16</td><td>65.07</td><td>31.88</td><td>5.47</td></tr><tr><td>+ INFO-RAG</td><td>79.25</td><td>48.59</td><td>6.67</td><td>70.26</td><td>25.02</td><td>3.87</td><td>73.73</td><td>33.85</td><td>8.39</td><td>70.59</td><td>37.48</td><td>11.25</td></tr></table>
182
+
183
+ Results on In-context Learning for RAG Besides, our INFO-RAG allows further improvement cooperating with in-context learning (ICL). ICL (Brown et al., 2020) works by preponding a few examples of the target task before the query, which helps LLMs understand the task. However, ICL may not always help in the RAG setting, mainly due to the confusion between the retrieved texts of the query and the few-shot examples. As shown in Table 3, LLaMA-2 cannot further improve the RAG performance from ICL, even sometimes hurt by the few-shot examples while INFO-RAG can further improve RAG by ICL. This is mainly because INFO-RAG enables LLaMA to understand the task form of RAG, thereby better learning the general task pattern from ICL examples. In this experiment, we construct the ICL example consisting of a query, a relevant passage, and an answer. For a fair comparison, we need to ensure that the performance of our method and the baseline are close in non-ICL setting. Therefore, we select queries for which the baseline gives the same answer as our method (both correct or both incorrect) and evaluate the ICL performance on these queries.
184
+
185
+ Enhancing Previous SOTA in Open-Retrieval Setting We further show that our INFO-RAG can cooperate well with the recent prompting techniques that perform multi-step reasoning to combine with retrieval to solve questions (Xu et al., 2023; Khattab et al., 2022; Press et al., 2023; Yao et al., 2022). To make a fair comparison, we follow SearChain (Xu et al., 2023) that runs on Multi-Hop QA and Slot-Filling in open-retrieval setting that retrieves passages from the full Wikipedia in each reasoning step. SearChain and other baselines use LLaMA-2-13B-chat as the backbone. Then, we
186
+
187
+ Table 2: Experimental results on three scenarios. "has-ans." is the first scenario that correct answers are in retrieved texts. "replace" is the second scenario that correct answers are randomly replaced with other phrases to simulate the incorrect and incomplete knowledge. "no-ans." is the third scenario that retrieval cannot find any answers.
188
+
189
+ <table><tr><td rowspan="2">Data</td><td rowspan="2">Model</td><td colspan="6">Number of Examples in ICL</td></tr><tr><td>0</td><td>2</td><td>4</td><td>8</td><td>12</td><td>16</td></tr><tr><td rowspan="2">NQ</td><td>LLaMA-2</td><td>43.36</td><td>23.34</td><td>16.60</td><td>39.22</td><td>44.32</td><td>43.00</td></tr><tr><td>+INFO-RAG</td><td>43.36</td><td>44.35</td><td>45.88</td><td>44.45</td><td>47.75</td><td>46.25</td></tr><tr><td rowspan="2">WebQ</td><td>LLaMA-2</td><td>43.20</td><td>18.36</td><td>9.40</td><td>36.71</td><td>44.80</td><td>44.81</td></tr><tr><td>+INFO-RAG</td><td>43.20</td><td>48.03</td><td>49.82</td><td>48.25</td><td>47.86</td><td>47.29</td></tr><tr><td rowspan="2">T-REx</td><td>LLaMA-2</td><td>59.83</td><td>47.05</td><td>49.11</td><td>56.51</td><td>55.23</td><td>56.31</td></tr><tr><td>+INFO-RAG</td><td>59.83</td><td>63.08</td><td>63.45</td><td>63.54</td><td>63.57</td><td>63.38</td></tr><tr><td rowspan="2">ZS</td><td>LLaMA-2</td><td>52.41</td><td>42.71</td><td>37.05</td><td>50.40</td><td>50.20</td><td>51.01</td></tr><tr><td>+INFO-RAG</td><td>52.41</td><td>56.53</td><td>60.37</td><td>59.86</td><td>59.75</td><td>59.85</td></tr></table>
190
+
191
+ Table 3: RAG performance changes with number of examples in In-context learning.
192
+
193
+ <table><tr><td></td><td colspan="2">Multi-Hop QA</td><td colspan="2">Slot-Filling</td></tr><tr><td></td><td>HotpotQA</td><td>Musique</td><td>T-REx</td><td>zsRE</td></tr><tr><td>Previous SOTA</td><td>28.19</td><td>10.03</td><td>63.10</td><td>57.09</td></tr><tr><td>SearChain</td><td>31.21</td><td>11.27</td><td>64.58</td><td>58.91</td></tr><tr><td>+ INFO-RAG</td><td>33.04</td><td>12.10</td><td>66.95</td><td>60.72</td></tr></table>
194
+
195
+ Table 4: Enhancement to the state-of-the-art RAG framework. Previous SOTA includes DSP, Self-Ask, React.
196
+
197
+ perform SearChain based on LLaMA-2-13B-chat trained by INFO-RAG to show the enhancement to SearChain by INFO-RAG. Results in Table 4 show that INFO-RAG can make SearChain achieve better performance. This provides additional support that our unsupervised INFO training fundamentally improves the RAG performance of LLMs.
198
+
199
+ # 4.4 Analysis
200
+
201
+ Fine-grained Analysis for Three Scenarios As shown in Table 2, our INFO-RAG is effective in all three RAG scenarios and shows better robustness to incorrect, incomplete, and noisy retrieved texts. We propose corresponding unsupervised training tasks for the three scenarios of RAG. This section introduces the fine-grained analysis for each sce
202
+
203
+ <table><tr><td></td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td><td>Hotpot</td><td>Musique</td><td>EI5</td><td>Wow</td><td>WikiText</td><td>Python</td><td>Java</td><td>Overall</td></tr><tr><td>LLaMA-2 w/o RAG</td><td>35.60</td><td>10.99</td><td>32.67</td><td>39.13</td><td>29.16</td><td>5.83</td><td>26.05</td><td>10.71</td><td>41.80</td><td>20.67</td><td>25.87</td><td>25.32</td></tr><tr><td>LLaMA-2 w/ RAG</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td><td>61.23</td><td>47.06</td><td>27.07</td><td>11.19</td><td>60.52</td><td>22.34</td><td>30.96</td><td>43.23</td></tr><tr><td>+ training on wiki</td><td>62.55</td><td>56.79</td><td>49.23</td><td>45.05</td><td>61.00</td><td>46.95</td><td>26.31</td><td>11.05</td><td>60.84</td><td>22.05</td><td>30.28</td><td>42.92</td></tr><tr><td>+ INFO-RAG</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td><td>61.91</td><td>47.93</td><td>27.24</td><td>11.38</td><td>63.92</td><td>31.98</td><td>38.12</td><td>46.55</td></tr></table>
204
+
205
+ Table 5: Analysis on the best-performed model LLaMA-2-13B-chat.
206
+
207
+ <table><tr><td rowspan="2">Method</td><td colspan="4">NQ</td></tr><tr><td colspan="4">original has-ans. replace no-ans.</td></tr><tr><td>Baseline</td><td>50.36</td><td>69.37</td><td>30.72</td><td>6.16</td></tr><tr><td>S1: Select and Copy</td><td>48.77</td><td>69.59</td><td>25.40</td><td>0.11</td></tr><tr><td>S2: Correct and Complete</td><td>51.59</td><td>70.42</td><td>32.71</td><td>4.48</td></tr><tr><td>S3: Contextual Stimulation</td><td>52.75</td><td>72.50</td><td>31.77</td><td>8.86</td></tr><tr><td>S2&amp;S3</td><td>53.73</td><td>73.01</td><td>32.50</td><td>9.01</td></tr><tr><td>INFO-RAG (S1&amp; S2&amp;S3)</td><td>54.04</td><td>73.73</td><td>33.85</td><td>8.39</td></tr></table>
208
+
209
+ nario. For Scenario 1, we use cover EM to select those samples that already contain the correct answers in the retrieval list. For Scenario 2, we randomly replace the correct answers in the retrieved texts with another phrase with the same properties. For Scenario 3, we use cover EM to select those samples that retrieved texts do not contain any correct answers. We count the accuracy of LLaMA on samples of these three scenarios respectively. Questions in the third scenario are more difficult than in the second scenario because retrieval models cannot find anything to solve them. Table 2 indicates that our method shows advantages in each scenario and is more robust regardless of whether the retrieved texts contain the correct answer.
210
+
211
+ Ablation Study We conduct ablation study to explore the effects of the following factors.
212
+
213
+ (1) Additional Training on Wikipedia. We study whether our improvement is from helping the model to achieve information refinement, or simply because of additional training on Wikipedia. To this end, we train LLaMA-2 on Wikipedia with standard language modeling objective, by setting the same hyperparameters as our INFO-RAG. The results in Table 5 show that this baseline leads to no improvement over the backbone LLaMA-2, confirming the effectiveness of our training method rather than additional training on Wikipedia.
214
+
215
+ (2) Training tasks. We perform three training tasks proposed in INFO-RAG separately on original data and data constructed for each scenario to explore their effects respectively. Table 6 shows that both S2 and S3 have gains in their scenarios. Although
216
+
217
+ Table 6: Effects of three training tasks.
218
+
219
+ <table><tr><td>Datasets</td><td>Method</td><td>Max Δ ratio</td><td>Max Δ position</td><td>Max Δ number</td></tr><tr><td rowspan="2">NQ</td><td>LLaMA-2</td><td>-51.94%</td><td>-16.18%</td><td>-25.43%</td></tr><tr><td>+ INFO-RAG</td><td>-43.48%</td><td>-15.80%</td><td>-17.25%</td></tr><tr><td rowspan="2">WebQ</td><td>LLaMA-2</td><td>-50.57%</td><td>-5.63%</td><td>-22.13%</td></tr><tr><td>+ INFO-RAG</td><td>-45.48%</td><td>-8.72%</td><td>-11.91%</td></tr><tr><td rowspan="2">T-REx</td><td>LLaMA-2</td><td>-46.57%</td><td>-9.45%</td><td>-5.95%</td></tr><tr><td>+ INFO-RAG</td><td>-44.38%</td><td>-8.61%</td><td>-2.99%</td></tr><tr><td rowspan="2">ZS</td><td>LLaMA-2</td><td>-59.25%</td><td>-13.40%</td><td>-12.37%</td></tr><tr><td>+ INFO-RAG</td><td>-50.08%</td><td>-11.11%</td><td>-11.43%</td></tr></table>
220
+
221
+ Table 7: Maximum relative performance change caused by changes in retrieval results.
222
+
223
+ S1 has negative effects when performed alone, it can achieve the best results when trained together with S2 and S3. This is mainly because S1 alone is so simple that causes LLM to overfit the data. Adding S2 and S3 allows LLM to learn the task paradigm of information refinement, making LLM better extract the correct answer for Scenario 1.
224
+
225
+ Robustness to Retrieval Results Table 7 shows INFO-RAG is more robust to changes in retrieval results including the ratio and position of positive passages and number of retrieved passages. More details can be found in Section A of Appendix.
226
+
227
+ Avoid Catastrophic Forgetting Experiment on MMLU (Hendrycks et al., 2020) without RAG shows that INFO-RAG performs very close to the original LLaMA-2 (7B: 45.0 vs. 45.3; 13B: 54.3 vs. 54.8), which indicates that INFO-RAG enhances RAG while avoiding catastrophic forgetting. More details can be found in Section A.6 of Appendix.
228
+
229
+ # 5 Conclusion
230
+
231
+ This paper proposes a novel perspective to reassess the role of LLMs in RAG that considers LLMs as "Information Refiner". This means that regardless of the correctness, completeness, or usefulness of the retrieved texts, LLMs can consistently integrate knowledge within model parameters and the retrieved texts to generate texts that are more concise, accurate, and complete. To achieve it, we propose an information refinement training method named
232
+
233
+ INFO-RAG in an unsupervised manner, which is low-cost and general across various tasks. Extensive experiments across 11 datasets of 7 tasks in zero-shot setting show that INFO-RAG improves the performance of LLMs for RAG. INFO-RAG also shows advantages in ICL and robustness of RAG and can be combined with the SOTA RAG framework to further improve its performance.
234
+
235
+ # Limitations
236
+
237
+ This paper aims to enable LLMs to perform information refinement in RAG by unsupervised training, so as to accurately extract correct information and avoid the interference of incorrect information. The main limitation of this paper is that due to the lack of computing resources, we only conduct experiments on models with 7B and 13B parameter sizes. In the future, we consider using more computing resources to explore the performance of models with larger parameter sizes.
238
+
239
+ # Ethics Statement
240
+
241
+ After careful consideration, we believe that our paper does not introduce additional ethical concerns. We declare that our work complies with the ACL Ethics Policy.
242
+
243
+ # Acknowledgements
244
+
245
+ This work was supported by the National Key R&D Program of China (2022YFB3103700, 2022YFB3103704), the National Natural Science Foundation of China (NSFC) under Grants No. 62276248 and U21B2046, and the Youth Innovation Promotion Association CAS under Grants No. 2023111.
246
+
247
+ # References
248
+
249
+ Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511.
250
+ Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. 2013. Semantic parsing on freebase from question-answer pairs. In Proceedings of the EMNLP 2013, pages 1533-1544.
251
+ Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pages 2206-2240. PMLR.
252
+
253
+ Tom B. Brown, Benjamin Mann, Nick Ryder, et al. 2020. Language models are few-shot learners.
254
+ Deng Cai, Yan Wang, Victoria Bi, Zhaopeng Tu, Xiaojiang Liu, Wai Lam, and Shuming Shi. 2018. Skeleton-to-response: Dialogue generation guided by retrieval memory. arXiv preprint arXiv:1809.05296.
255
+ Deng Cai, Yan Wang, Wei Bi, Zhaopeng Tu, Xiaojiang Liu, and Shuming Shi. 2019. Retrieval-guided dialogue response generation via a matching-to-generation framework. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1866-1875.
256
+ Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. 2023. Benchmarking large language models in retrieval-augmented generation. arXiv preprint arXiv:2309.01431.
257
+ Yung-Sung Chuang, Yujia Xie, Hongyin Luo, Yoon Kim, James Glass, and Pengcheng He. 2023. Dola: Decoding by contrasting layers improves factuality in large language models. arXiv preprint arXiv:2309.03883.
258
+ Jingcheng Deng, Liang Pang, Huawei Shen, and Xueqi Cheng. 2023. Regavae: A retrieval-augmented gaussian mixture variational auto-encoder for language modeling. arXiv preprint arXiv:2310.10567.
259
+ Shehzaad Dhuliawala, Mojtaba Komeili, Jing Xu, Roberta Raileanu, Xian Li, Asli Celikyilmaz, and Jason Weston. 2023. Chain-of-verification reduces hallucination in large language models.
260
+ Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2018. Wizard of wikipedia: Knowledge-powered conversational agents. arXiv preprint arXiv:1811.01241.
261
+ Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. 2018. T-rex: A large scale alignment of natural language with knowledge base triples. In Proceedings of LREC 2018.
262
+ Angela Fan, Yacine Jernite, Ethan Perez, David Grangier, Jason Weston, and Michael Auli. 2019. Eli5: Long form question answering. arXiv preprint arXiv:1907.09190.
263
+ Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR.
264
+ Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2020. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300.
265
+
266
+ Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.
267
+ Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. 2019. Code-searchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436.
268
+ Srinivasan Iyer, Ioannis Konstas, Alvin Cheung, and Luke Zettlemoyer. 2018. Mapping language to code in programmatic context. arXiv preprint arXiv:1808.09588.
269
+ Gautier Izacard, Patrick Lewis, Maria Lomeli, Lucas Hosseini, Fabio Petroni, Timo Schick, Jane Dwivedi-Yu, Armand Joulin, Sebastian Riedel, and Edouard Grave. 2022. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299.
270
+ Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6769-6781.
271
+ Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. 2022. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024.
272
+ Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453-466.
273
+ Omer Levy, Minjoon Seo, Eunsol Choi, and Luke Zettlemoyer. 2017. Zero-shot relation extraction via reading comprehension. arXiv preprint arXiv:1706.04115.
274
+ Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474.
275
+ Yun Luo, Zhen Yang, Fandong Meng, Yafu Li, Jie Zhou, and Yue Zhang. 2023. An empirical study of catastrophic forgetting in large language models during continual fine-tuning. arXiv preprint arXiv:2308.08747.
276
+ Stephen Merity. 2016. The wikitext long term dependency language modeling dataset. Salesforce MetaMind, 9.
277
+
278
+ Tomáš Mikolov et al. 2012. Statistical language models based on neural networks. Presentation at Google, Mountain View, 2nd April, 80(26).
279
+ Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. 2021. Retrieval augmented code generation and summarization. arXiv preprint arXiv:2108.11601.
280
+ Baolin Peng, Michel Galley, Pengcheng He, Hao Cheng, Yujia Xie, Yu Hu, Qiuyuan Huang, Lars Liden, Zhou Yu, Weizhu Chen, et al. 2023. Check your facts and try again: Improving large language models with external knowledge and automated feedback. arXiv preprint arXiv:2302.12813.
281
+ Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. Kilt: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252.
282
+ Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A. Smith, and Mike Lewis. 2023. Measuring and narrowing the compositionality gap in language models.
283
+ Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. 2018. Improving language understanding by generative pre-training.
284
+ Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. 2023. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083.
285
+ Ruiyang Ren, Yuhao Wang, Yingqi Qu, Wayne Xin Zhao, Jing Liu, Hao Tian, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2023. Investigating the factual knowledge boundary of large language models with retrieval augmentation. arXiv preprint arXiv:2307.11019.
286
+ Shuo Ren, Daya Guo, Shuai Lu, Long Zhou, Shujie Liu, Duyu Tang, Neel Sundaresan, Ming Zhou, Ambrosio Blanco, and Shuai Ma. 2020. Codebleu: a method for automatic evaluation of code synthesis. arXiv preprint arXiv:2009.10297.
287
+ Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, Christopher Potts, and Matei Zaharia. 2022. Colbertv2: Effective and efficient retrieval via lightweight late interaction.
288
+ Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761.
289
+ Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Rich James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2023. Replug: Retrievalaugmented black-box language models. arXiv preprint arXiv:2301.12652.
290
+
291
+ Tiening Sun, Zhong Qian, Sujun Dong, Peifeng Li, and Qiaoming Zhu. 2022. Rumor detection on social media with graph adversarial contrastive learning. In Proceedings of the WWW 2022, pages 2789-2797.
292
+
293
+ Nandan Thakur, Nils Reimers, Andreas Rückle, Abhishek Srivastava, and Iryna Gurevych. 2021. Beir: A heterogenous benchmark for zero-shot evaluation of information retrieval models. arXiv preprint arXiv:2104.08663.
294
+
295
+ Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023a. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
296
+
297
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023b. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
298
+
299
+ Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022a. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509.
300
+
301
+ Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022b. Musique: Multi-hop questions via single-hop question composition. Transactions of the Association for Computational Linguistics, 10:539-554.
302
+
303
+ Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002.
304
+
305
+ Shicheng Xu, Liang Pang, Huawei Shen, Xueqi Cheng, and Tat-seng Chua. 2023. Search-in-the-chain: Towards the accurate, credible and traceable content generation for complex knowledge-intensive tasks. arXiv preprint arXiv:2304.14732.
306
+
307
+ Shicheng Xu, Liang Pang, Jun Xu, Huawei Shen, and Xueqi Cheng. 2024. List-aware reranking-truncation joint model for search and retrieval-augmented generation. arXiv preprint arXiv:2402.02764.
308
+
309
+ Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William W Cohen, Ruslan Salakhutdinov, and Christopher D Manning. 2018. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600.
310
+
311
+ Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629.
312
+
313
+ Ori Yoran, Tomer Wolfson, Ori Ram, and Jonathan Berant. 2023. Making retrieval-augmented language models robust to irrelevant context. arXiv preprint arXiv:2310.01558.
314
+
315
+ Wenhao Yu, Hongming Zhang, Xiaoman Pan, Kaixin Ma, Hongwei Wang, and Dong Yu. 2023. Chain-of-note: Enhancing robustness in retrieval-augmented language models.
316
+
317
+ # A More Analysis
318
+
319
+ # A.1 Robustness to ratio of Positive Passages
320
+
321
+ Our INFO-RAG improves the robustness of RAG performance to retrieval performance. The performance of the retriever greatly affects the performance of LLM in RAG (Chen et al., 2023). We explore this in this section. Specifically, we simulate changes in retrieval performance by varying the ratio of positive and negative passages in the retrieved list and report the RAG performance with different ratios. Table 8 shows INFO-RAG performs better when the ratio is low and the performance is more stable than baseline when the ratio changes from $100\%$ to $0\%$ (Max $\Delta$ ). The model in this experiment is LLaMA-2-13B-chat.
322
+
323
+ # A.2 Robustness to Positive Passage Position
324
+
325
+ Experimental results in Table 9 show that our INFO-RAG consistently outperforms the baseline (LLaMA-2) regardless of where the positive passage (passage contains the correct answers) appears in the retrieved list. Specifically, we mix positive and negative passages in a ratio of 1:9 to simulate the retrieved passage list, vary the position of the positive passage in the retrieved list from 0 to 9, and evaluate the corresponding RAG performance respectively. The model in this experiment is LLaMA-2-13B-chat. Experimental results show that our INFO-RAG not only outperforms the baseline at every position but also achieves more stable performance varying with the position (Max $\Delta$ ).
326
+
327
+ # A.3 Robustness to Number of Retrieved Passages
328
+
329
+ Experimental results in Table 10 show that our INFO-RAG consistently outperforms the baseline with the different number of retrieved passages (from 1 to 10) and is robust to the change of the number. In this experiment, we use LLaMA-2-13B-chat as the base model, change the number of retrieved passages from 1 to 10, and evaluate the corresponding performance.
330
+
331
+ <table><tr><td rowspan="2">Data</td><td rowspan="2">Model</td><td colspan="6">ratio of Positive Passages</td><td rowspan="2">Max Δ</td></tr><tr><td>100%</td><td>80%</td><td>60%</td><td>40%</td><td>20%</td><td>0%</td></tr><tr><td rowspan="2">NQ</td><td>LLaMA-2</td><td>88.11</td><td>82.71</td><td>80.81</td><td>77.62</td><td>69.73</td><td>42.35</td><td>-51.94%</td></tr><tr><td>+ INFO-RAG</td><td>90.31</td><td>83.72</td><td>81.72</td><td>79.72</td><td>71.52</td><td>51.04</td><td>-43.48%</td></tr><tr><td rowspan="2">WebQ</td><td>LLaMA-2</td><td>79.41</td><td>75.43</td><td>71.63</td><td>65.53</td><td>63.39</td><td>39.25</td><td>-50.57%</td></tr><tr><td>+ INFO-RAG</td><td>83.66</td><td>76.23</td><td>74.23</td><td>69.05</td><td>65.74</td><td>45.61</td><td>-45.48%</td></tr><tr><td rowspan="2">T-REx</td><td>LLaMA-2</td><td>80.01</td><td>70.05</td><td>71.52</td><td>68.53</td><td>66.23</td><td>42.75</td><td>-46.57%</td></tr><tr><td>+ INFO-RAG</td><td>83.52</td><td>73.22</td><td>74.93</td><td>72.32</td><td>70.12</td><td>46.45</td><td>-44.38%</td></tr><tr><td rowspan="2">ZS</td><td>LLaMA-2</td><td>69.52</td><td>65.48</td><td>63.81</td><td>60.95</td><td>57.14</td><td>28.33</td><td>-59.25%</td></tr><tr><td>+ INFO-RAG</td><td>72.50</td><td>72.62</td><td>67.62</td><td>67.86</td><td>60.48</td><td>36.19</td><td>-50.08%</td></tr></table>
332
+
333
+ Table 8: RAG performance changes with the ratio of positive passages (randomly select 500 samples).
334
+
335
+ <table><tr><td rowspan="2">Datasets</td><td rowspan="2">Method</td><td colspan="11">Position of Positive Passage</td><td>Max Δ</td></tr><tr><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td></td><td></td></tr><tr><td rowspan="2">NQ</td><td>LLaMA-2</td><td>54.94</td><td>48.05</td><td>46.05</td><td>46.45</td><td>46.35</td><td>48.30</td><td>48.35</td><td>47.15</td><td>51.64</td><td>50.44</td><td>-16.18%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>63.23</td><td>58.34</td><td>54.54</td><td>54.44</td><td>53.54</td><td>53.24</td><td>53.84</td><td>54.44</td><td>53.34</td><td>53.34</td><td>-15.80%</td><td></td></tr><tr><td rowspan="2">WebQ</td><td>LLaMA-2</td><td>66.13</td><td>63.21</td><td>62.54</td><td>62.68</td><td>64.01</td><td>62.41</td><td>63.21</td><td>64.54</td><td>63.87</td><td>64.14</td><td>-5.63%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>71.58</td><td>68.39</td><td>66.26</td><td>65.34</td><td>67.19</td><td>65.73</td><td>65.73</td><td>65.81</td><td>65.54</td><td>66.72</td><td>-8.72%</td><td></td></tr><tr><td rowspan="2">T-REx</td><td>LLaMA-2</td><td>64.43</td><td>60.13</td><td>58.34</td><td>60.23</td><td>58.54</td><td>59.14</td><td>59.74</td><td>60.53</td><td>63.53</td><td>63.23</td><td>-9.45%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>70.72</td><td>66.23</td><td>64.93</td><td>65.23</td><td>65.43</td><td>64.83</td><td>66.03</td><td>67.23</td><td>64.63</td><td>66.83</td><td>-8.61%</td><td></td></tr><tr><td rowspan="2">ZS</td><td>LLaMA-2</td><td>63.04</td><td>59.04</td><td>54.59</td><td>55.03</td><td>55.17</td><td>57.15</td><td>56.42</td><td>57.89</td><td>58.04</td><td>59.47</td><td>-13.40%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>66.42</td><td>63.33</td><td>59.04</td><td>60.23</td><td>61.42</td><td>61.66</td><td>60.00</td><td>61.19</td><td>60.23</td><td>62.14</td><td>-11.11%</td><td></td></tr></table>
336
+
337
+ Table 9: RAG performance changes with the position of positive passage (randomly select 500 samples).
338
+
339
+ <table><tr><td></td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td></tr><tr><td>Baseline</td><td>51.47</td><td>40.26</td><td>45.05</td><td>41.78</td></tr><tr><td>+ INFO-RAG</td><td>55.67</td><td>43.29</td><td>49.76</td><td>44.02</td></tr></table>
340
+
341
+ Table 11: Works based on BM25.
342
+
343
+ <table><tr><td>Method</td><td>T-REx</td><td>ZS</td><td>NQ</td><td>WebQ</td></tr><tr><td>Baseline</td><td>62.53</td><td>56.81</td><td>50.36</td><td>45.47</td></tr><tr><td>Simple Mask</td><td>64.05</td><td>58.91</td><td>53.80</td><td>50.55</td></tr><tr><td>Our method</td><td>65.39</td><td>59.05</td><td>54.04</td><td>51.07</td></tr></table>
344
+
345
+ Table 13: Ablation study of masking strategy.
346
+
347
+ # A.4 Ablation Study on Masking Strategy
348
+
349
+ In general, Table 13 and 12 show our masking strategy in Scenario 3 is more effective than simple and straightforward masking. Specifically, our method is more significantly effective in the scenarios that correct answers are randomly replaced with other phrases (replace) and retrieval cannot find any answers (no answer).
350
+
351
+ # A.5 Works with Different Retriever
352
+
353
+ We evaluate our method and baseline (LLaMA2-13B-chat) with BM25 as the retriever, the experimental results shown in Table 11 indicate that our method still performs better than baseline when the retriever as BM25.
354
+
355
+ # A.6 Performance on MMLU
356
+
357
+ Experimental results on MMLU benchmark in the setting without RAG shown in Table 14 show that our INFO-RAG significantly improves the performance of LLMs in RAG, while still maintaining its versatility and avoiding catastrophic forgetting. MMLU is a benchmark that measures massive multitask language understanding ability of LLMs. It covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem-solving ability (Hendrycks et al., 2020). Experiments show that our INFO-RAG performs very close to the original LLaMA-2 on MMLU, which shows that our INFO-RAG does not damage the basic language understanding ability of LLMs. This is mainly because the prefix language model
358
+
359
+ <table><tr><td rowspan="2">Datasets</td><td rowspan="2">Method</td><td colspan="11">Number of Retrieved Passages</td><td>Max Δ</td></tr><tr><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td><td></td><td></td></tr><tr><td rowspan="2">NQ</td><td>LLaMA-2</td><td>38.80</td><td>43.21</td><td>46.62</td><td>47.84</td><td>48.61</td><td>49.42</td><td>52.03</td><td>50.23</td><td>50.40</td><td>50.20</td><td>-25.43%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>45.18</td><td>46.80</td><td>51.44</td><td>51.23</td><td>51.00</td><td>53.21</td><td>54.03</td><td>53.44</td><td>53.82</td><td>54.60</td><td>-17.25%</td><td></td></tr><tr><td rowspan="2">WebQ</td><td>LLaMA-2</td><td>40.22</td><td>43.63</td><td>48.20</td><td>46.61</td><td>48.32</td><td>49.11</td><td>49.40</td><td>50.22</td><td>51.65</td><td>50.43</td><td>-22.13%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>50.21</td><td>53.84</td><td>54.41</td><td>55.07</td><td>55.25</td><td>55.27</td><td>57.00</td><td>55.45</td><td>56.62</td><td>56.03</td><td>-11.91%</td><td></td></tr><tr><td rowspan="2">T-REx</td><td>LLaMA-2</td><td>66.20</td><td>63.45</td><td>67.22</td><td>64.45</td><td>64.43</td><td>65.40</td><td>64.41</td><td>65.22</td><td>63.22</td><td>65.01</td><td>-5.95%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>66.25</td><td>66.03</td><td>66.31</td><td>65.80</td><td>67.23</td><td>67.22</td><td>66.65</td><td>67.83</td><td>67.03</td><td>67.40</td><td>-2.99%</td><td></td></tr><tr><td rowspan="2">ZS</td><td>LLaMA-2</td><td>49.25</td><td>50.01</td><td>52.38</td><td>54.09</td><td>56.12</td><td>56.20</td><td>56.13</td><td>56.05</td><td>55.95</td><td>56.11</td><td>-12.37%</td><td></td></tr><tr><td>+ INFO-RAG</td><td>53.17</td><td>54.08</td><td>56.35</td><td>58.01</td><td>59.45</td><td>59.12</td><td>59.40</td><td>58.55</td><td>60.03</td><td>59.08</td><td>-11.43%</td><td></td></tr></table>
360
+
361
+ Table 10: RAG performance changes with the number of retrieved passages (randomly select 500 samples).
362
+
363
+ <table><tr><td></td><td colspan="3">T-REx</td><td colspan="3">ZS</td><td colspan="3">NQ</td><td colspan="3">WebQ</td></tr><tr><td></td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td><td>has-ans.</td><td>replace</td><td>no-ans.</td></tr><tr><td>Baseline</td><td>75.96</td><td>43.79</td><td>5.59</td><td>67.03</td><td>16.58</td><td>1.42</td><td>69.37</td><td>30.72</td><td>6.16</td><td>65.07</td><td>31.88</td><td>5.47</td></tr><tr><td>Simple Mask</td><td>78.43</td><td>44.05</td><td>5.75</td><td>70.30</td><td>19.45</td><td>1.96</td><td>73.59</td><td>31.05</td><td>6.51</td><td>70.55</td><td>32.96</td><td>6.83</td></tr><tr><td>Our method</td><td>79.25</td><td>48.59</td><td>6.67</td><td>70.26</td><td>25.02</td><td>3.87</td><td>73.73</td><td>33.85</td><td>8.39</td><td>70.59</td><td>37.48</td><td>11.25</td></tr></table>
364
+
365
+ ing training paradigm of our method is consistent with the pre-training task of LLMs. The difference is that in the training of prefix language modeling, our method learns to perform information refinement that utilizes the retrieved texts for the next token prediction.
366
+
367
+ Table 12: Ablation study of masking strategy on three scenarios. "has-ans." is the first scenario that correct answers are in retrieved texts. "replace" is the second scenario that correct answers are randomly replaced with other phrases to simulate the incorrect and incomplete knowledge. "no-ans." is the third scenario that retrieval cannot find any answers.
368
+
369
+ <table><tr><td></td><td>Humanities</td><td>STEM</td><td>Social-Sciences</td><td>Other</td><td>Average</td></tr><tr><td>LLaMA-2-7B w/o RAG</td><td>42.9</td><td>36.4</td><td>51.2</td><td>52.2</td><td>45.3</td></tr><tr><td>+ INFO-RAG w/o RAG</td><td>42.8</td><td>36.1</td><td>50.8</td><td>52.0</td><td>45.0</td></tr><tr><td>LLaMA-2-13B w/o RAG</td><td>52.8</td><td>44.1</td><td>62.6</td><td>61.1</td><td>54.8</td></tr><tr><td>+ INFO-RAG w/o RAG</td><td>52.5</td><td>43.7</td><td>62.1</td><td>60.9</td><td>54.3</td></tr></table>
370
+
371
+ Table 14: Performance on MMLU in the setting without retrieval-augmented generation.
2402.18xxx/2402.18150/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75314508efdcc2b5fd2c62c859f2c89bdfaa821d226f135e48c6c266544de67a
3
+ size 902814
2402.18xxx/2402.18150/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18153/c9db1710-6005-4fc2-8761-4e676be14cdc_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09096d69044c3a1bbb48b50414b76a6f708879aa1ea67ed88e05bd3d3d0b1880
3
+ size 4797654
2402.18xxx/2402.18153/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18153/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bdf9c307e464cd09dfb804b5f5961fe197fc79f7881a3687e12317e6083e7d3
3
+ size 1750387
2402.18xxx/2402.18153/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18154/3d2cad04-baae-42a1-98cf-38bf4b8d4e7b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fec093aa12818b730ac79a4c5b958fdb1e89810190b37fe214711cd3eede702f
3
+ size 1583610
2402.18xxx/2402.18154/full.md ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $\mathcal{K}$ Cutting Off the Head Ends the Conflict: A Mechanism for Interpreting and Mitigating Knowledge Conflicts in Language Models
2
+
3
+ Zhuoran Jin $^{1,2}$ , Pengfei Cao $^{1,2}$ , Hongbang Yuan $^{1,2}$ , Yubo Chen $^{1,2}$ , Jiexin Xu $^{3}$ , Huaijun Li $^{3}$ , Xiaojian Jiang $^{3}$ , Kang Liu $^{1,2}$ , Jun Zhao $^{1,2}$
4
+
5
+ $^{1}$ School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China
6
+
7
+ $^{2}$ The Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China
8
+
9
+ 3 China Merchants Bank {zhuoran.jin, pengfei.cao, yubo.chen, kliu, jzhao}@nlpr.ia.ac.cn
10
+
11
+ # Abstract
12
+
13
+ Recently, retrieval augmentation and tool augmentation have demonstrated a remarkable capability to expand the internal memory boundaries of language models (LMs) by providing external context. However, internal memory and external context inevitably clash, leading to knowledge conflicts within LMs. In this paper, we aim to interpret the mechanism of knowledge conflicts through the lens of information flow, and then mitigate conflicts by precise interventions at the pivotal point. We find there are some attention heads with opposite effects in the later layers, where memory heads can recall knowledge from internal memory, and context heads can retrieve knowledge from external context. Moreover, we reveal that the pivotal point at which knowledge conflicts emerge in LMs is the integration of inconsistent information flows by memory heads and context heads. Inspired by the insights, we propose a novel method called Pruning Head via PatH PatchHing (PH3), which can efficiently mitigate knowledge conflicts by pruning conflicting attention heads without updating model parameters. PH3 can flexibly control eight LMs to use internal memory ( $\uparrow 44.0\%$ ) or external context ( $\uparrow 38.5\%$ ). Moreover, PH3 can also improve the performance of LMs on open-domain QA tasks. We also conduct extensive experiments to demonstrate the cross-model, cross-relation, and cross-format generalization of our method.
14
+
15
+ # 1 Introduction
16
+
17
+ Language models (LMs) (Brown et al., 2020; Touvron et al., 2023; OpenAI, 2023) have memorized a substantial amount of factual knowledge during pre-training, and stored the knowledge within their parameters as internal memory (i.e., parametric knowledge) (Meng et al., 2022). During the inference phase, LMs rely on their internal memory to understand and generate text. However, the internal memory may be limited or outdated, making LMs prone to producing factually incorrect content.
18
+
19
+ To alleviate the problem, one promising solution is to employ additional retrievers or tools to augment LMs by providing external context (i.e., non-parametric knowledge). Nevertheless, internal memory and external context can often contradict each other, which is known as knowledge conflicts (Longpre et al., 2021; Chen et al., 2022; Xie et al., 2023; Yu et al., 2023). Recent works have mainly investigated the behavior and preference of LMs, attempting to determine whether these models are more inclined towards internal memory or external context when faced with knowledge conflicts. However, there is a limited understanding of the underlying mechanism of knowledge conflicts. Insights into the mechanism will facilitate precise interventions at the pivotal point to mitigate knowledge conflicts, which can not only empower LMs to more reliably adhere to internal memory (e.g., ignoring misleading external context) but also enhance faithfulness in generating text based on external context (e.g., correcting outdated memory).
20
+
21
+ In this paper, we reveal that the pivotal point at which knowledge conflicts emerge in LMs is the integration of inconsistent information flows by various attention heads in later layers. To investigate this, we consider a simple factual recall task (i.e., subject attribute prediction) inspired by the work of Yu et al. (2023). As illustrated in Figure 1, given the question (i.e., "What is the capital of France?") and the conflicting external context (i.e., "The capital of France is Rome"), the model can either use internal memory (i.e., "Paris") or external context (i.e., "Rome") to predict the subject's attribute. Following this, we present a set of "top-down" analyses to locate the pivotal point where conflicts emerge and to identify the model components that are significant in knowledge conflicts, which primarily involves the following three steps:
22
+
23
+ Step 1: We start by answering the first question "What function do model components serve in knowledge conflicts?" We knock out the acti
24
+
25
+ ![](images/005a4b11b1d6705799b174fb79bf420882ec056f0f0c1b7fe577955735a0a008.jpg)
26
+ Figure 1: An illustration of the mechanism of knowledge conflicts in LMs: (1) Enriching the semantic information of context subject and context attribute; (2) Propagating question information to the last token through MHAs; (3) Extracting attribute information through memory attention heads and context attention heads at later layers.
27
+
28
+ vations to examine the functionality of multi-head attention (MHA) blocks and feed-forward network (FFN) blocks. We find that FFNs enrich the semantic information of input elements in early layers, while MHAs play an important role in passing information to the last token in later layers; Step 2: Based on this, the second question naturally arises, namely "When and where do MHAs pass information to the last token?". We investigate the MHAs by knocking out the attention weights from the last token to other input elements. Results reveal that the question information is first propagated to the last token, and then the last token extracts attribute information from the subject and the attribute in the context; Step 3: Inspired by this, we aim to answer the final question "How do MHAs extract attribute information under knowledge conflicts?". We find that some attention heads in late MHAs play opposite roles, where memory heads can recall attributes from internal memory, and context heads can retrieve attributes from external context. According to our findings, the mechanism by which LMs use both internal memory and external context can be summarized as three stages in Figure 1: (1) Enriching semantic information; (2) Propagating question information; and (3) Extracting attribute information, where knowledge conflicts arise at the third stage, due to the inconsistent information flows between memory heads and context heads.
29
+
30
+ Inspired by our insights into knowledge conflicts, we propose a minimally-invasive control method called Pruning Head via PatH PatchHing (PH3), which can efficiently mitigate knowledge conflicts by intervening on attention heads without updating model parameters. First, we use the path patching (Goldowsky-Dill et al., 2023; Wang et al., 2023a)
31
+
32
+ technique to localize important memory heads and context heads. Our method can avoid the noise interference of other heads, enabling a more accurate calculation of the importance score for the target head. Then, we perform structured pruning on those negative attention heads to mitigate conflicts. In this way, our method can flexibly control LMs to use internal memory or external context. Experimental results on the World Capital dataset show that our method can not only reliably and consistently increase the average internal memory usage rate of eight LMs by $44.0\%$ (from $49.7\%$ to $93.7\%$ ) but also increase the external context usage rate by $38.5\%$ (from $50.3\%$ to $88.8\%$ ). PH3 also enables LMs to generate answers more faithfully according to retrieved passages in open-domain QA tasks. We conduct extensive experiments to demonstrate the cross-model (e.g., from GPT series to LLaMA2 series), cross-relation (e.g., from World Capital to Official Language), and cross-format (e.g., from triple format to document format) generalization. Our contributions are summarized as follows:
33
+
34
+ - We perform an exploration into the mechanism of interpreting knowledge conflicts, and reveal that memory heads and context heads at later layers can cause knowledge conflicts when inconsistent information flows merge.
35
+ - We propose a novel method called Pruning Head via PatH PatcHing (PH3), which can efficiently mitigate knowledge conflicts by pruning those conflicting attention heads.
36
+ - We demonstrate that our PH3 can flexibly control LMs to use internal memory ( $\uparrow 44.0\%$ ) or external context ( $\uparrow 38.5\%$ ). We also prove the cross-model, cross-relation, and cross-format generalization ability of our method.
37
+
38
+ # 2 Background
39
+
40
+ In this work, we mainly focus on the autoregressive transformer-based language models. Given a sequence of input tokens $x = [x_{1},\dots ,x_{N}]$ , the LM $\mathcal{G}$ first embeds each token $x_{i}$ into a vector $\mathbf{x}_i^0\in \mathbb{R}^d$ using an embedding matrix $E\in \mathbb{R}^{|\mathcal{V}|\times d}$ , over a vocabulary $\mathcal{V}$ . The input embeddings are processed by $L$ transformer layers. Each layer consists of an MHA and an FFN. Formally, the hidden state $\mathbf{x}_i^\ell$ of token $x_{i}$ at layer $\ell$ is calculated as:
41
+
42
+ $$
43
+ \mathbf {x} _ {i} ^ {\ell} = \mathbf {x} _ {i} ^ {\ell - 1} + \mathbf {a} _ {i} ^ {\ell} + \mathbf {m} _ {i} ^ {\ell}, \tag {1}
44
+ $$
45
+
46
+ where $\mathbf{a}_i^\ell$ and $\mathbf{m}_i^\ell$ are the outputs from the MHA block and the FFN block in the $\ell$ -th layer. Then, the vocabulary head $\phi(\cdot)$ and the softmax function $\sigma(\cdot)$ predict the output probability:
47
+
48
+ $$
49
+ \mathbf {p} _ {i} ^ {L} = \sigma (\phi (\mathbf {x} _ {i} ^ {L})). \tag {2}
50
+ $$
51
+
52
+ MHA. A MHA block consists of $M$ attention heads, which are capable of aggregating global information from the hidden states (Halawi et al., 2023; Wang et al., 2023b). An individual attention head $h$ in layer $\ell$ consists of three learnable matrices, $\mathbf{W}_Q^{\ell,h}, \mathbf{W}_K^{\ell,h}, \mathbf{W}_V^{\ell,h} \in \mathbb{R}^{d \times \frac{d}{M}}$ . Formally, for the input $\mathbf{X}^{\ell-1} = \left[\mathbf{x}_1^{\ell-1}, \dots, \mathbf{x}_N^{\ell-1}\right]$ in layer $\ell$ :
53
+
54
+ $$
55
+ \mathbf {A} ^ {\ell} = \left[ \mathbf {H} ^ {\ell , 1}; \dots ; \mathbf {H} ^ {\ell , M} \right] \mathbf {W} _ {o} ^ {\ell}, \tag {3}
56
+ $$
57
+
58
+ $$
59
+ \mathbf {H} ^ {\ell , h} = \mathbf {s} ^ {\ell , h} \mathbf {X} ^ {\ell - 1} \mathbf {W} _ {V} ^ {\ell , j}, \tag {4}
60
+ $$
61
+
62
+ $$
63
+ \mathbf {s} ^ {\ell , h} = \sigma \left(\frac {\left(\mathbf {X} ^ {\ell - 1} \mathbf {W} _ {Q} ^ {\ell , h}\right) \left(\mathbf {X} ^ {\ell - 1} \mathbf {W} _ {K} ^ {\ell , h}\right) ^ {T}}{\sqrt {d / M}}\right) \tag {5}
64
+ $$
65
+
66
+ where $\mathbf{A}^{\ell} = \left[\mathbf{a}_{1}^{\ell},\dots ,\mathbf{a}_{N}^{\ell}\right]$ is the MHA block's output. $\mathbf{W}_O^{\ell ,h}\in \mathbb{R}^{d\times d}$ is a learnable output matrix.
67
+
68
+ FFN. A FFN block can work as a key-value memory to store factual knowledge (Geva et al., 2021), enriching the hidden states of token $i$ :
69
+
70
+ $$
71
+ \mathbf {m} _ {i} ^ {\ell} = f \left(\left(\mathbf {x} _ {i} ^ {\ell - 1} + \mathbf {a} _ {i} ^ {\ell}\right) \mathbf {W} _ {1} ^ {\ell}\right) \mathbf {W} _ {2} ^ {\ell}. \tag {6}
72
+ $$
73
+
74
+ # 3 Experimental Setup
75
+
76
+ # 3.1 Tasks
77
+
78
+ In this paper, we conduct controlled experiments to construct knowledge conflicts, wherein the internal memory is factual while the external context is counterfactual. To avoid the LM being influenced
79
+
80
+ by other irrelevant factors (i.e., reasoning ability), we adopt a simple factual recall task (Geva et al., 2023), which requires predicting the corresponding attribute $a_{m}$ based on the given subject $s$ and relation $r$ . Building on previous work (Yu et al., 2023), we use the World Capital dataset to interpret this problem in $\S 4$ , where the LM needs to predict the capital city of the country based on the question $q$ :
81
+
82
+ $$
83
+ Q: \text {W h a t i s t h e c a p i t a l o f} \{s \}? A:
84
+ $$
85
+
86
+ We retain those questions that the LM can correctly predict the factual attributes $a_{m}$ based on internal memory, then provide the counterfactual attributes $a_{c}$ in the external context $c$ to construct conflicts:
87
+
88
+ $$
89
+ \overline {{\text {T h e c a p i t a l o f} \{s \} i s \{a _ {c} \} . \{q \}}}
90
+ $$
91
+
92
+ To mitigate knowledge conflicts, we further construct three datasets for verifying the generalization of our method in §5, including the Official Language, Country, and Continent datasets. We also generate a more complex World Capital D dataset based on the World Capital dataset, using gpt-3.5-turbo to rewrite the external context from triplet form into document form. More details about these datasets are shown in Appendix B.
93
+
94
+ # 3.2 Models
95
+
96
+ We analyze two GPT-series LMs: GPT-2 XL (Radford et al., 2019) and GPT-J (Wang and Komatsuzaki, 2021) in §4. Additionally, we also validate the effectiveness of our method on six LMs: OPT-1.3B, OPT-2.7B (Zhang et al., 2022), Pythia-6.9B, Pythia-12B (Biderman et al., 2023), LLaMA2-7B and LLaMA2-13B (Touvron et al., 2023) in §5.
97
+
98
+ # 4 Interpreting Knowledge Conflicts
99
+
100
+ We utilize a "top-down" analysis approach to locate the pivotal point where conflicts emerge and to identify the model components that are significant in knowledge conflicts. We start by examining the functionality of model components by knocking out activations, and reveal that MHAs in the middle and late layers play a crucial role in passing information to the last token (§4.1). Then, we further investigate MHAs by knocking out the attention weights. We find the question information is first passed to the last token, then the last token extracts information from the subject and the attribute in the context (§4.2). Last, we discover that some attention heads in later MHAs play opposite roles in conflicts, where memory heads can recall knowledge from internal memory, and context heads can retrieve knowledge from external context (§4.3).
101
+
102
+ ![](images/53e47830ecddab6cf7a64ab7f55c2d695474a84bff3430e2255ca1fd5d436122.jpg)
103
+ (a) Effect of FFNs on internal memory.
104
+
105
+ ![](images/fe039e36809e6297917cd9a22dee84f2bc607757c55d9772cebaa095619cb861.jpg)
106
+ (b) Effect of MHAs on internal memory.
107
+
108
+ ![](images/e8af759244540db9d9dbe968b9d0f8d900190682d6a1fe9aaef31e7aef28f0bd.jpg)
109
+ (c) Extraction rate of internal memory.
110
+
111
+ ![](images/6b3182be1352ec856e3e0caac5f7bd2c3074e3319994209452681d3941dd1a6f.jpg)
112
+ (d) Effect of FFNs on external context.
113
+
114
+ ![](images/2f704cf087a252b3ab574183d8a66cfe7d10dca2acfc419386322d487b1a723d.jpg)
115
+ (e) Effect of MHAs on external context.
116
+
117
+ ![](images/07df1849c0bdbf89109192d36234406c209f8e6f3582820dff4594baac7ede96.jpg)
118
+ (f) Extraction rate of external context.
119
+ Figure 2: Effect of model components (FFNs and MHAs) in GPT-2 XL on the final prediction probability. Figures 2a and 2b (Figures 2d and 2e) show the effect of different model components and input elements when the model predicts based on internal memory (external context). The deeper color indicates the greater the impact of knocking out this part on the original prediction probability. Figure 2c (Figure 2f) shows the effect of MHAs and FFNs on the last token's attribute extraction rate when the model predicts based on internal memory (external context).
120
+
121
+ # 4.1 Examining Component Functionality
122
+
123
+ We start by exploring the functionality of model components (including FFNs and MHAs across various layers) in knowledge conflicts.
124
+
125
+ Experiment 1: Knocking Out Component. We examine which component in the transformer layer is critical for the attribute prediction by knocking out activations. Then, we divide the input into six elements for analysis: context subject $s_c$ , context relation $r_c$ , context attribute $a_c$ , question subject $s_q$ , question relation $r_q$ , and the last token $x_N$ . To measure the impact on the final prediction results, we zero-out the updates to the specified input element from the MHA and FFN blocks within each layer. For example, to intervene in the update of the $\ell$ -th MHA (FFN) to the input element $s_c$ , we set $\mathbf{a}_i^{\ell'} = \mathbf{0}$ ( $\mathbf{m}_i^{\ell'} = \mathbf{0}$ ) for $i$ in the token range of $s_c$ and $\ell' = \max(1, \ell - W/2), \dots, \min(L, \ell + W/2)$ , where $W$ denotes the window size. We define the effect of a model component as the change in the original prediction probability after knocking it out.
126
+
127
+ Results. Figure 2 illustrates the effect of model components (FFNs and MHAs) in GPT-2 XL with the window size $W = 5$ . Our observation reveals that destroying the FFN blocks in the early layers has a significant effect on the prediction probability while destroying the FFN blocks at the late layers shows minimal or no impact (Figures 2a and 2d). Moreover, the MHA blocks at the middle and late layers are crucial for the last token (Figures 2b and
128
+
129
+ 2e). A possible explanation of the model's behavior on the factual recall task is that the early FFNs first enrich the semantic information of input elements, and then the enriched semantic information about attributes is extracted to the last token via late MHAs, where knowledge conflicts may arise at the later stage. To verify this hypothesis, we will examine the attribute extraction function of MHAs.
130
+
131
+ Experiment 2: Extracting Attributes via MHAs. We adopt the extraction rate (Geva et al., 2023) to examine the attribute extraction function of MHAs. We apply the early exit (Schuster et al., 2021; Geva et al., 2022) to project the MHA update $\mathbf{a}_N^\ell$ for the last token $x_N$ over the vocabulary. Then we check whether the top token $t^\ell$ of each update aligns with the attribute $t^*$ predicted at the final layer $L$ :
132
+
133
+ $$
134
+ t ^ {*} = \arg \max \left(\mathbf {p} _ {N} ^ {L}\right), \tag {7}
135
+ $$
136
+
137
+ $$
138
+ t ^ {\ell} = \arg \max \left(\sigma \left(\phi \left(\mathbf {a} _ {N} ^ {\ell}\right)\right)\right). \tag {8}
139
+ $$
140
+
141
+ We consider that the MHA correctly performs attribute extraction when $t^{*} = t^{\ell}$ . For comparison, we also examine the extraction rate of FFNs.
142
+
143
+ Results. As illustrated in Figures 2c and 2f, it is evident that the attribute extraction rate of MHAs significantly exceeds that of FFNs. Moreover, attribute extraction mainly takes place at the 24-48 layers. Results for GPT-J show similar trends in Appendix C. The above findings motivate us to conduct an in-depth study on the information flows of MHAs from input elements to the last token.
144
+
145
+ ![](images/f634868d53c6676bb53f55ee73ffcb0d6d65f0a99c1afa14620e103f66c3746f.jpg)
146
+ (a) Prediction based on internal memory.
147
+
148
+ ![](images/219e0e8ff25d7b85565f8815bd4c4a1a3e3b1d15f50de40b1dd14bd45672c323.jpg)
149
+ (b) Prediction based on external context.
150
+ Figure 3: Relative change in the prediction probability when blocking the information flow from the input elements to the last token. Figures 3a and 3b only provide conflicting context. Figure 3c provides both supporting and conflicting context to internal memory, C1 denotes the supporting context, and C2 denotes the conflicting context.
151
+
152
+ ![](images/754d53fc754af053075e8833ea90dc57a4ebecdc74b29eaf336f989fb3e25545.jpg)
153
+ (c) Prediction based on internal memory.
154
+
155
+ # 4.2 Tracing Information Flow
156
+
157
+ The analysis presented above confirms that the last token extracts attribute information for prediction through MHA blocks. Following this, we explore the order and importance of the information flow from the various elements to the last token.
158
+
159
+ Experiment 3: Blocking Information Flow. We localize the information propagation from the input elements (including $s_c$ , $r_c$ , $a_c$ , $s_q$ and $r_q$ ) to the last token by knocking out attention edges between them. For example, to block the information flow from the input element $s_c$ to the last token $x_N$ in the layer $\ell$ , we set the attention weight $s^{\ell,h} [N,i] = 0$ for $i$ in the token range of $s_c$ , $h = 1, \dots, M$ , and $\ell' = \max(1,\ell - W/2), \dots, \min(L,\ell + W/2)$ . In this way, we can restrict the last token from attending to the target element. If blocking the information propagation between them has a significant impact on the original prediction probability, this indicates that it is a crucial information flow.
160
+
161
+ Results. Figure 3 illustrates the information flow in GPT-2 XL with the window size $W = 9$ . We can observe that in the early to middle layers, blocking the attention to the question relation leads to a decrease in the prediction probability. Similarly, in the subsequent layers, blocking the attention to the question subject also results in a decrease in the prediction probability. This suggests that the critical relation and subject information in the question are sequentially transmitted to the last token.
162
+
163
+ Then, in the middle to late layers, blocking the attention to the context subject and context attribute has the opposite effect on the final prediction probability. Taking Figure 3a as an example (when the model predicts the attribute based on internal memory), blocking the attention to the context attribute can improve the prediction probability, however, blocking the attention to the context subject can re
164
+
165
+ duce the prediction probability. This suggests that the last token can extract the internal knowledge from the context subject, and extract the external knowledge from the context attribute. In addition, the last token also extracts a certain degree of internal knowledge from the question subject. Results for GPT-J show consistent trends in Appendix C.
166
+
167
+ Overall, this shows that there are two specific stages in the process of information flow passing to the last token: (1) the question information is first passed to the last token; (2) the last token extracts or copies the attribute from the context subject or the context attribute. In the later stage, knowledge conflicts arise during the process of merging inconsistent information flows from MHAs.
168
+
169
+ Experiment 4: Extending to Conflicts between Contexts. We extend our analysis to a more complex scenario in which the model is presented with both supporting context and conflicting context relative to internal memory. Supporting context and conflicting context contain $a_{m}$ and $a_{c}$ respectively:
170
+
171
+ C1: The capital of $\{s\}$ is $\{a_m\}$ .
172
+ C2: The capital of $\{s\}$ is $\{a_c\}$ . $\{q\}$
173
+
174
+ We find that GPT-2 XL prefers to choose attributes consistent with internal memory $97.6\%$ of the time. Hence, we only analyze the cases where the model makes predictions based on its internal memory.
175
+
176
+ Results. As illustrated in Figure 3c, we can observe that the question information is first passed to the last token in the first stage, which is consistent with the trend of a single conflicting context. In the second stage, a notable distinction is that the model no longer extracts the memory attribute from the subject; instead, it opts for a more straightforward approach of copying the memory attribute from the context. The above findings indicate that there exists a mechanism within MHAs capable of distinguishing and selecting between internal knowledge
177
+
178
+ ![](images/457da2b53d3009b2a09426024cc09a2e3e8c994ed5404e3414ca99b7da200afe.jpg)
179
+ (a) Memory head.
180
+
181
+ ![](images/78b25d38cbd3d3b22811e07e15549d423cac091572461f57560d629ef1f69119.jpg)
182
+ (b) Context head.
183
+
184
+ ![](images/d5577bf6dddcff39d64214968d76ccacd506b9e8ae37d26f1d4a6637af05ee96.jpg)
185
+ (c) Extraction rate of attention heads.
186
+ Figure 4: Memory heads and context heads in GPT-2 XL. Figure 4a shows the important score heatmap for predicting based on internal memory. Figure 4b shows the important score heatmap for predicting based on external context. Figure 4c illustrates the memory and context attribute extraction rate of different attention heads.
187
+
188
+ and external knowledge. This motivates us to conduct further analysis of MHAs.
189
+
190
+ # 4.3 Looking Deeper into Attention Heads
191
+
192
+ Attention heads serve as the fundamental component of an MHA block. For example, GPT-2 XL contains a total of 1,200 attention heads. This motivates us to conduct an investigation into the role of attention heads in handling knowledge conflicts.
193
+
194
+ Experiment 5: Discovering Important Heads. To discover the attention heads that are crucial for predicting memory attributes or context attributes, we compute the gradient-based importance score (Michel et al., 2019; Bansal et al., 2023) for each head. Given a dataset $\mathcal{D}$ with a set of inputs $x$ and outputs $y$ , the importance score of an attention head $h$ captures the expected sensitivity of the model to $h$ and is computed as follows:
195
+
196
+ $$
197
+ I ^ {l, h} (\mathcal {D}) = \mathbb {E} _ {(x, y)} \left| \mathbf {H} ^ {l, h ^ {T}} \frac {\partial \mathcal {L} (y \mid x)}{\partial \mathbf {H} ^ {l , h}} \right|, \tag {9}
198
+ $$
199
+
200
+ where $\mathcal{L}(\cdot)$ is the loss function of conditional autoregressive generation. The proxy score of head $h$ for predicting internal memory is calculated as:
201
+
202
+ $$
203
+ S _ {m} ^ {l, h} \left(\mathcal {D} _ {m}, \mathcal {D} _ {m} ^ {\prime}\right) = I ^ {l, h} \left(\mathcal {D} _ {m}\right) - I ^ {l, h} \left(\mathcal {D} _ {m} ^ {\prime}\right), \tag {10}
204
+ $$
205
+
206
+ where $(x, a_{m}) \in \mathcal{D}_{m}$ denotes the original outputs are memory attributes, $(x, a_{c}) \in \mathcal{D}_{m}^{\prime}$ denotes replacing the original outputs with context attributes. In this way, we can also calculate the proxy score of head $h$ for predicting external context as:
207
+
208
+ $$
209
+ S _ {c} ^ {l, h} \left(\mathcal {D} _ {c}, \mathcal {D} _ {c} ^ {\prime}\right) = I ^ {l, h} \left(\mathcal {D} _ {c}\right) - I ^ {l, h} \left(\mathcal {D} _ {c} ^ {\prime}\right). \tag {11}
210
+ $$
211
+
212
+ We compute the proxy score of each head across different layers to discover important heads.
213
+
214
+ Results. As shown in Figure 4a (Figure 4b), the deeper color of the red square indicates a more significant contribution from this attention head to the model's predictions based on internal memory (external context). We can observe that there are a specific number of attention heads within middle-to-late layers that play opposite roles in predicting attributes. Accordingly, we refer to those heads that contribute to the prediction of memory attributes as memory heads, and those that facilitate predicting context attributes as context heads. Therefore, we claim that they may serve in a mutually exclusive capacity during knowledge conflicts. The heatmaps of GPT-J are provided in the Appendix C.
215
+
216
+ Experiment 6: Extracting Specific Attributes via Heads. We further analyze the two types of heads discovered above to verify their role in knowledge conflicts. We rank the attention heads in descending order based on their importance scores, $S_{m}^{l,h}$ for memory and $S_{c}^{l,h}$ for context, subsequently identifying the top-5% of heads as memory heads and context heads, respectively. For comparison, we also randomly choose an additional 5% of the attention heads as other heads. Then, we examine their memory extraction rate when $t_{\ell} = a_{m}$ , and context extraction rate when $t_{\ell} = a_{c}$ .
217
+
218
+ Results. As shown in Figure 4c, memory heads and context heads are responsible for extracting different attribute information to the last token with a significant difference between memory and context extraction rates. Therefore, we discern that the pivotal point at which knowledge conflicts emerge in LMs is the integration of inconsistent information flows by memory heads and context heads.
219
+
220
+ # 5 Mitigating Knowledge Conflicts
221
+
222
+ Building on the above insights, we propose a novel method called Pruning Head via PatH PatchHing (PH3) to efficiently mitigate knowledge conflicts by intervening on attention heads without the need to update model parameters (\$5.1). Then, we conduct extensive experiments to show that our method can flexibly control LMs to use internal memory or external context (\$5.2). Moreover, we analyze the generalization capability of our method (\$5.3).
223
+
224
+ # 5.1 Method
225
+
226
+ Our method consists of two stages, first identifying the important heads through path patching, then intervening on these heads via structured pruning.
227
+
228
+ Localizing Memory Heads and Context Heads via Path Patching. When we use the gradient-based method in §4.3 to estimate the importance score of the target head $h$ , it is subject to interference from other heads. The calculated gradients may not fully reflect the contribution of the target head, but rather a mixture of the influences from other heads. Therefore, we adopt the path patching technique (Goldowsky-Dill et al., 2023; Wang et al., 2023a) to analyze the causal relationship between the head $h$ and the output attribute (including $a_{m}$ and $a_{c}$ ) in conflicts. To calculate the important score $S_{c}^{\ell,h}$ of the target head $h$ , our path patching method consists of three steps shown in Figure 14:
229
+
230
+ 1. Run on the original input $x \in \mathcal{D}_c$ to record the original activations of all heads;
231
+ 2. Run on the corrupted input $\mathcal{X}$ to record the corrupted activations of all heads, where $\mathcal{X}$ is:
232
+
233
+ The capital of $\{s\}$ is $\langle \mathrm{unk}\rangle .\{q\}$
234
+
235
+ where $\langle \mathrm{unk}\rangle$ is the special token;
236
+
237
+ 3. Run on the original input $x$ , while keeping all the heads frozen to their activations on $x$ , except for the target head $h$ whose activation is set on $x$ . Then measure the important score as the change of output logits.
238
+
239
+ The important score $S_{c}^{\ell,h}$ of head $h$ is computed as:
240
+
241
+ $$
242
+ \begin{array}{l} S _ {c} ^ {l, h} \left(\mathcal {D} _ {c}\right) = \mathbb {E} _ {(x)} \left[ \left(\mathbb {P} _ {x} \left(a _ {c}\right) - \mathbb {P} _ {x} \left(a _ {m}\right)\right) \right. \tag {12} \\ \left. - \left(\mathbb {P} _ {\not f} (a _ {c}) - \mathbb {P} _ {\not f} (a _ {m})\right) \right]. \\ \end{array}
243
+ $$
244
+
245
+ We adopt similar steps to calculate the importance score $S_{m}^{\ell,h}$ of the target head $h$ for memory attribute prediction in Appendix D. We also provide the importance score heatmaps of memory and context heads for various models in Appendix E, and our method can clearly distinguish between them.
246
+
247
+ Pruning Attention Heads to Mitigate Knowledge Conflicts. By ranking all the attention heads in ascending order based on the importance score $S_{c}^{l,h}$ ( $S_{m}^{l,h}$ ), we can prune the top- $k\%$ attention heads that negatively impact the model's capability to predict context (memory) attributes, thereby enhancing the model's ability to utilize external context (internal memory). To prune a head $h$ in layer $\ell$ in practice, we set $\mathbf{H}^{\ell,h}$ to be the zero matrix.
248
+
249
+ # 5.2 Experiment
250
+
251
+ Setup. We evaluate our method on five datasets, including World Capital, World Capital D, Official Language, Country, and Continent. To verify the generalization of PH3, we only calculate the importance scores of the attention heads on the World Capital dataset, and then directly evaluate PH3 on other datasets. We also select 1,000 test samples from an open-domain QA dataset NQ (Kwiatkowski et al., 2019), providing the LM with the top-5 retrieved passages, and ensuring that at least one relevant passage is among them. We validate the effectiveness of PH3 on eight LMs.
252
+
253
+ Metrics. We use the internal memory usage rate $RM = \frac{f_m}{f_m + f_c + f_o}$ and the external context usage rate $RC = \frac{f_c}{f_m + f_c + f_o}$ to assess how effectively the method controls the reliance of LMs on either internal memory or external context, where $f_m$ is the frequency of relying on internal memory, $f_c$ is the frequency of relying on external context, and $f_o$ is the frequency of other answers. For the open-domain QA task, we use Recall to evaluate whether the model can provide correct answers based on the retrieved passages following Adlakha et al. (2023).
254
+
255
+ Baselines. We compare with the following baselines: (1) Prompt: We instruct the LM to generate answers based on internal memory or external context through specific prompts; (2) CAD: Shi et al. (2023) leverage contrastive decoding (Li et al., 2023b) to encourage the LM to attend to its context during generation; (3) Gradient: We replace our path patching method with the gradient-based method to discover the attention heads. We select the optimal pruning rate $k$ on the development set for both Gradient and PH3. More details about hyperparameter settings are in Appendix B.2.
256
+
257
+ Results. Table 1 shows the results of GPT-2 XL, GPT-J and LLaMA2-7B, and more results of other models are in Table 2. Throughout our experiments, we note the following key observations:
258
+
259
+ <table><tr><td rowspan="2">Model</td><td rowspan="2" colspan="2">Method</td><td colspan="2">World Capital</td><td colspan="2">World Capital D</td><td colspan="2">Official Language</td><td colspan="2">Country</td><td colspan="2">Continent</td></tr><tr><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td></tr><tr><td rowspan="9">GPT-2 XL</td><td colspan="2">Base</td><td>59.2</td><td>40.8</td><td>47.2</td><td>52.8</td><td>42.2</td><td>57.8</td><td>37.2</td><td>62.8</td><td>41.5</td><td>58.5</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>12.5</td><td>81.2</td><td>21.3</td><td>71.2</td><td>20.3</td><td>74.4</td><td>24.5</td><td>75.5</td><td>16.2</td><td>43.4</td></tr><tr><td>Gradient</td><td>72.4</td><td>9.8</td><td>78.6</td><td>10.5</td><td>41.5</td><td>40.5</td><td>39.1</td><td>60.2</td><td>42.7</td><td>46.6</td></tr><tr><td>PH3 (Ours)</td><td>97.9</td><td>0.6</td><td>93.3</td><td>2.5</td><td>74.4</td><td>9.8</td><td>50.9</td><td>36.3</td><td>53.1</td><td>38.1</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>9.3</td><td>87.5</td><td>18.9</td><td>75.2</td><td>17.1</td><td>80.7</td><td>18.5</td><td>81.4</td><td>25.5</td><td>58.3</td></tr><tr><td>CAD</td><td>25.0</td><td>65.6</td><td>12.5</td><td>63.6</td><td>9.1</td><td>80.5</td><td>27.2</td><td>72.5</td><td>22.9</td><td>60.4</td></tr><tr><td>Gradient</td><td>44.4</td><td>49.0</td><td>28.0</td><td>58.7</td><td>29.6</td><td>59.5</td><td>36.4</td><td>63.4</td><td>18.4</td><td>51.5</td></tr><tr><td>PH3 (Ours)</td><td>27.5</td><td>68.9</td><td>7.7</td><td>91.3</td><td>20.7</td><td>74.8</td><td>22.7</td><td>75.7</td><td>27.7</td><td>66.7</td></tr><tr><td>+ Prompt</td><td>3.6</td><td>95.1</td><td>5.2</td><td>94.4</td><td>9.6</td><td>88.7</td><td>12.6</td><td>86.0</td><td>20.9</td><td>63.8</td></tr><tr><td rowspan="9">GPT-J</td><td colspan="2">Base</td><td>37.5</td><td>62.5</td><td>43.1</td><td>56.9</td><td>41.5</td><td>58.5</td><td>54.0</td><td>46.0</td><td>43.2</td><td>56.8</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>29.8</td><td>67.1</td><td>31.6</td><td>62.1</td><td>23.1</td><td>69.1</td><td>22.4</td><td>77.6</td><td>12.5</td><td>86.0</td></tr><tr><td>Gradient</td><td>67.9</td><td>8.3</td><td>67.6</td><td>6.7</td><td>39.4</td><td>53.4</td><td>54.4</td><td>45.5</td><td>57.2</td><td>30.0</td></tr><tr><td>PH3 (Ours)</td><td>93.3</td><td>1.6</td><td>76.5</td><td>10.8</td><td>63.3</td><td>25.3</td><td>58.9</td><td>40.5</td><td>75.1</td><td>17.6</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>31.9</td><td>64.5</td><td>15.8</td><td>76.4</td><td>16.2</td><td>70.6</td><td>17.9</td><td>82.1</td><td>7.2</td><td>91.4</td></tr><tr><td>CAD</td><td>2.5</td><td>89.9</td><td>13.4</td><td>68.2</td><td>4.7</td><td>89.9</td><td>17.0</td><td>81.8</td><td>13.0</td><td>80.3</td></tr><tr><td>Gradient</td><td>6.1</td><td>88.3</td><td>7.9</td><td>67.8</td><td>5.7</td><td>76.4</td><td>29.2</td><td>70.5</td><td>36.8</td><td>60.7</td></tr><tr><td>PH3 (Ours)</td><td>0.2</td><td>99.3</td><td>0.1</td><td>98.4</td><td>2.3</td><td>90.6</td><td>9.5</td><td>86.7</td><td>8.0</td><td>64.9</td></tr><tr><td>+ Prompt</td><td>0.1</td><td>99.5</td><td>0.2</td><td>97.8</td><td>2.0</td><td>81.9</td><td>1.4</td><td>98.6</td><td>1.4</td><td>90.9</td></tr><tr><td rowspan="9">LLaMA2-7B</td><td colspan="2">Base</td><td>46.3</td><td>53.7</td><td>95.5</td><td>4.0</td><td>18.8</td><td>80.3</td><td>52.9</td><td>46.8</td><td>30.9</td><td>69.1</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>36.0</td><td>63.2</td><td>96.0</td><td>3.7</td><td>40.0</td><td>59.1</td><td>68.2</td><td>31.6</td><td>77.4</td><td>22.6</td></tr><tr><td>Gradient</td><td>81.0</td><td>5.8</td><td>95.1</td><td>1.6</td><td>50.1</td><td>47.4</td><td>60.0</td><td>38.0</td><td>64.5</td><td>24.5</td></tr><tr><td>PH3 (Ours)</td><td>98.1</td><td>1.2</td><td>98.0</td><td>1.3</td><td>73.7</td><td>17.8</td><td>76.9</td><td>20.6</td><td>90.5</td><td>8.8</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>3.2</td><td>96.6</td><td>92.4</td><td>2.2</td><td>25.5</td><td>73.8</td><td>58.2</td><td>41.5</td><td>19.2</td><td>80.3</td></tr><tr><td>CAD</td><td>1.4</td><td>95.5</td><td>29.1</td><td>70.6</td><td>0.0</td><td>100.0</td><td>13.6</td><td>86.1</td><td>0.2</td><td>98.2</td></tr><tr><td>Gradient</td><td>23.6</td><td>63.2</td><td>40.1</td><td>58.8</td><td>25.7</td><td>74.6</td><td>17.6</td><td>82.2</td><td>27.1</td><td>72.9</td></tr><tr><td>PH3 (Ours)</td><td>1.6</td><td>97.4</td><td>19.1</td><td>73.4</td><td>0.1</td><td>99.9</td><td>5.2</td><td>94.7</td><td>0.5</td><td>99.4</td></tr><tr><td>+ Prompt</td><td>0.4</td><td>98.8</td><td>10.6</td><td>85.3</td><td>0.0</td><td>100.0</td><td>2.8</td><td>97.0</td><td>0.0</td><td>100.0</td></tr></table>
260
+
261
+ Table 1: Experimental results of GPT-2 XL, GPT-J and LLaMA2-7B on five datasets. Bolds denote the best results.
262
+
263
+ (1) PH3 significantly outperforms other baselines. Experimental results show that PH3 can not only increase the average internal memory usage rate of eight LMs by $44.0\%$ , but also increase the average external context usage rate by $38.5\%$ . When PH3 is combined with Prompt, it can more effectively control the LMs to use external context.
264
+ (2) As shown in Table 3, PH3 can also achieve an average $6.2\%$ Recall improvement on open-domain QA tasks. By pruning a small number of negative context heads, PH3 can make LMs generate answers more faithfully based on retrieved passages.
265
+ (3) Although Prompt and CAD can effectively increase the external context usage rate, there are limitations. CAD cannot directly enhance internal memory, and Prompt may even have the opposite effect. In contrast, our method offers a viable solution to enhance the internal memory usage rate.
266
+
267
+ # 5.3 Analysis
268
+
269
+ We conduct a thorough analysis of the generalization ability of PH3. For cross-model generalization, PH3 is effective across a wide range of models. This shows that our method is not limited to small models, but can also be adopted on relatively large models, including the popular LLaMA2 series. For cross-relation generalization, by intervening on the attention heads discovered on World Capital, our
270
+
271
+ method can also well resolve knowledge conflicts on other relation types. This indicates that PH3 does not identify attention heads specific to a certain type of relation. Instead, it identifies universal memory and context heads. For cross-format generalization, PH3 can transfer well from triple-form context to document-form context. This indicates that our method does not merely remember the relative positions of elements in context, but is capable of understanding the external context. Compared to the Gradient, our method has demonstrated superior generalizability. We also analyze the impact of the number of pruning heads in Appendix G.
272
+
273
+ # 6 Conclusion
274
+
275
+ In this paper, we perform an exploration into the mechanism of interpreting knowledge conflicts and reveal that memory and context heads in later layers can cause knowledge conflicts when merging inconsistent information flows. Based on our insights, we propose a novel method called Pruning Head via PatH PatchHing (PH3), which can mitigate knowledge conflicts by pruning those conflicting attention heads. We prove that PH3 can flexibly control LMs to use internal memory or external context. We also demonstrate the cross-model, cross-relation, and cross-format generalization.
276
+
277
+ # Limitations
278
+
279
+ For further study, we conclude some limitations of our work as follows:
280
+
281
+ - Similar to previous works on mechanism interpretability that adopt tasks such as antonym generation (Todd et al., 2023), fact recall (Meng et al., 2022; Geva et al., 2023), arithmetic operation (Hanna et al., 2023; Stolfo et al., 2023), and text classification (Bansal et al., 2023; Wang et al., 2023b), our work also selects a relatively simpler task to interpret the mechanism behind knowledge conflicts. Simple tasks enable us to better control variables and minimize external distractions. In the future, we plan to extend our analysis to more complex and realistic scenarios, such as where irrelevant information is present within the external context, or where the model needs to reason with both internal and external knowledge.
282
+
283
+ - Although our research has delved into the attention heads in LMs, there may be more basic elements involved in knowledge conflicts. Furthermore, the memory and context heads we have discovered may not only be responsible for extracting knowledge from internal memory or external context. These heads may also have other functions, such as helping the model capture global dependencies of input texts. By pruning these heads, the original capabilities of the model may be affected. Therefore, we will further explore mitigating knowledge conflicts through more subtle intervention methods.
284
+
285
+ In summary, the mechanism behind knowledge conflicts remains a largely unexplored area, and we hope our work can offer some useful insights for further research.
286
+
287
+ # Ethics Statement
288
+
289
+ To enhance the reproducibility of our research, we will make all source code and datasets publicly available upon the acceptance of this paper. Our work focuses on uncovering the mechanisms behind knowledge conflicts in LM, thereby better controlling the model in retrieval augmentation and tool augmentation. Through effective intervention, our method can make the LM more controllable and trustworthy. On the one hand, it can prevent prompt injections from attacking the model, and on the other hand, it can correct the biased knowl
290
+
291
+ edge that the model learned during pre-training. Nonetheless, the impact of head pruning on the model's original capabilities remains unexplored. These factors should be taken into careful consideration for future research.
292
+
293
+ # References
294
+
295
+ Vaibhav Adlakha, Parishad BehnamGhader, Xing Han Lu, Nicholas Meade, and Siva Reddy. 2023. Evaluating correctness and faithfulness of instruction-following models for question answering. arXiv preprint arXiv:2307.16877.
296
+ Hritik Bansal, Karthik Gopalakrishnan, Saket Dingliwal, Sravan Bodapati, Katrin Kirchhoff, and Dan Roth. 2023. Rethinking the role of scale for in-context learning: An interpretability-based case study at 66 billion scale. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11833-11856, Toronto, Canada. Association for Computational Linguistics.
297
+ Stella Biderman, Hailey Schoelkopf, Quentin Gregory Anthony, Herbie Bradley, Kyle O'Brien, Eric Hallahan, Mohammad Aflah Khan, Shivanshu Purohit, USVSN Sai Prashanth, Edward Raff, et al. 2023. Pythia: A suite for analyzing large language models across training and scaling. In International Conference on Machine Learning, pages 2397-2430. PMLR.
298
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.
299
+ Nick Cammarata, Shan Carter, Gabriel Goh, Chris Olah, Michael Petrov, Ludwig Schubert, Chelsea Voss, Ben Egan, and Swee Kiat Lim. 2020. Thread: Circuits. Distill. https://distill.pub/2020/circuits.
300
+ Hung-Ting Chen, Michael Zhang, and Eunsol Choi. 2022. Rich knowledge sources bring complex knowledge conflicts: Recalibrating models to reflect conflicting evidence. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 2292-2307, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
301
+ Damai Dai, Li Dong, Yaru Hao, Zhifang Sui, Baobao Chang, and Furu Wei. 2022. Knowledge neurons in
302
+
303
+ pretrained transformers. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 8493-8502, Dublin, Ireland. Association for Computational Linguistics.
304
+ Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. 2021. A mathematical framework for transformer circuits. Transformer Circuits Thread, 1.
305
+ Mor Geva, Jasmijn Bastings, Katja Filippova, and Amir Globerson. 2023. Dissecting recall of factual associations in auto-regressive language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12216-12235, Singapore. Association for Computational Linguistics.
306
+ Mor Geva, Avi Caciularu, Kevin Wang, and Yoav Goldberg. 2022. Transformer feed-forward layers build predictions by promoting concepts in the vocabulary space. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 30-45, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
307
+ Mor Geva, Roei Schuster, Jonathan Berant, and Omer Levy. 2021. Transformer feed-forward layers are key-value memories. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5484-5495, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
308
+ Nicholas Goldowsky-Dill, Chris MacLeod, Lucas Sato, and Aryaman Arora. 2023. Localizing model behavior with path patching. arXiv preprint arXiv:2304.05969.
309
+ Danny Halawi, Jean-Stanislas Denain, and Jacob Steinhardt. 2023. Overthinking the truth: Understanding how language models process false demonstrations. arXiv preprint arXiv:2307.09476.
310
+ Michael Hanna, Ollie Liu, and Alexandre Variengien. 2023. How does GPT-2 compute greater-than?: Interpreting mathematical abilities in a pre-trained language model. In *Thirty-seventh Conference on Neural Information Processing Systems*.
311
+ Yaru Hao, Li Dong, Furu Wei, and Ke Xu. 2021. Self-attention attribution: Interpreting information interactions inside transformer. Proceedings of the AAAI Conference on Artificial Intelligence, 35(14):12963-12971.
312
+ Roee Hendel, Mor Geva, and Amir Globerson. 2023. In-context learning creates task vectors. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 9318-9333, Singapore. Association for Computational Linguistics.
313
+ Zhuoran Jin, Pengfei Cao, Yubo Chen, Kang Liu, Xiao-jian Jiang, Jiexin Xu, Qiumia Li, and Jun Zhao. 2024.
314
+
315
+ Tug-of-war between knowledge: Exploring and resolving knowledge conflicts in retrieval-augmented language models. arXiv preprint arXiv:2402.14409.
316
+ Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural Questions: A Benchmark for Question Answering Research. Transactions of the Association for Computational Linguistics, 7:453-466.
317
+ Daliang Li, Ankit Singh Rawat, Manzil Zaheer, Xin Wang, Michal Lukasik, Andreas Veit, Felix Yu, and Sanjiv Kumar. 2023a. Large language models with controllable working memory. In Findings of the Association for Computational Linguistics: ACL 2023, pages 1774-1793, Toronto, Canada. Association for Computational Linguistics.
318
+ Xiang Lisa Li, Ari Holtzman, Daniel Fried, Percy Liang, Jason Eisner, Tatsunori Hashimoto, Luke Zettlemoyer, and Mike Lewis. 2023b. Contrastive decoding: Open-ended text generation as optimization. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12286-12312, Toronto, Canada. Association for Computational Linguistics.
319
+ Shayne Longpre, Kartik Perisetla, Anthony Chen, Nikhil Ramesh, Chris DuBois, and Sameer Singh. 2021. Entity-based knowledge conflicts in question answering. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7052-7063, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
320
+ Kevin Meng, David Bau, Alex Andonian, and Yonatan Belinkov. 2022. Locating and editing factual associations in gpt. Advances in Neural Information Processing Systems, 35:17359-17372.
321
+ Paul Michel, Omer Levy, and Graham Neubig. 2019. Are sixteen heads really better than one? In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.
322
+ Ella Neeman, Roee Aharoni, Or Honovich, Leshem Choshen, Idan Szpektor, and Omri Abend. 2023. DisentQA: Disentangling parametric and contextual knowledge with counterfactual question answering. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10056-10070, Toronto, Canada. Association for Computational Linguistics.
323
+ Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma, Tom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al. 2022. In-context learning and induction heads. arXiv preprint arXiv:2209.11895.
324
+ OpenAI. 2023. Gpt-4 technical report.
325
+
326
+ Cheng Qian, Xinran Zhao, and Sherry Tongshuang Wu. 2023. "merge conflicts!" exploring the impacts of external distractors to parametric knowledge graphs. ArXiv, abs/2309.08594.
327
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
328
+ Mansi Sakarvadia, Aswathy Ajith, Arham Khan, Daniel Grzenda, Nathaniel Hudson, Andre Bauer, Kyle Chard, and Ian Foster. 2023a. Memory injections: Correcting multi-hop reasoning failures during inference in transformer-based language models. In Proceedings of the 6th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pages 342-356, Singapore. Association for Computational Linguistics.
329
+ Mansi Sakarvadia, Arham Khan, Aswathy Ajith, Daniel Grzenda, Nathaniel Hudson, Andre Bauer, Kyle Chard, and Ian Foster. 2023b. Attention lens: A tool for mechanistically interpreting the attention head information retrieval mechanism.
330
+ Tal Schuster, Adam Fisch, Tommi Jaakkola, and Regina Barzilay. 2021. Consistent accelerated inference via confident adaptive transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 4962-4979, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
331
+ Weijia Shi, Xiaochuang Han, Mike Lewis, Yulia Tsvetkov, Luke Zettlemoyer, and Scott Wen-tau Yih. 2023. Trusting your evidence: Hallucinate less with context-aware decoding. arXiv preprint arXiv:2305.14739.
332
+ Alessandro Stolfo, Yonatan Belinkov, and Mrinmaya Sachan. 2023. A mechanistic interpretation of arithmetic reasoning in language models using causal mediation analysis. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 7035-7052, Singapore. Association for Computational Linguistics.
333
+ Eric Todd, Millicent L. Li, Arnab Sen Sharma, Aaron Mueller, Byron C. Wallace, and David Bau. 2023. Function vectors in large language models.
334
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
335
+ Jesse Vig, Sebastian Gehrmann, Yonatan Belinkov, Sharon Qian, Daniel Nevo, Yaron Singer, and Stuart Shieber. 2020. Investigating gender bias in language models using causal mediation analysis. In Advances in Neural Information Processing Systems, volume 33, pages 12388-12401. Curran Associates, Inc.
336
+
337
+ Ben Wang and Aran Komatsuzaki. 2021. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax.
338
+ Kevin Ro Wang, Alexandre Variengien, Arthur Conmy, Buck Shlegeris, and Jacob Steinhardt. 2023a. Interpretability in the wild: a circuit for indirect object identification in GPT-2 small. In The Eleventh International Conference on Learning Representations.
339
+ Lean Wang, Lei Li, Damai Dai, Deli Chen, Hao Zhou, Fandong Meng, Jie Zhou, and Xu Sun. 2023b. Label words are anchors: An information flow perspective for understanding in-context learning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9840-9855, Singapore. Association for Computational Linguistics.
340
+ Yike Wang, Shangbin Feng, Heng Wang, Weijia Shi, Vidhisha Balachandran, Tianxing He, and Yulia Tsvetkov. 2023c. Resolving knowledge conflicts in large language models. arXiv preprint arXiv:2310.00935.
341
+ Jian Xie, Kai Zhang, Jiangjie Chen, Renze Lou, and Yu Su. 2023. Adaptive chameleon or stubborn sloth: Unraveling the behavior of large language models in knowledge conflicts. arXiv preprint arXiv:2305.13300.
342
+ Yi Yang, Hanyu Duan, Ahmed Abbasi, John P. Lalor, and Kar Yan Tam. 2023. Bias a-head? analyzing bias in transformer-based language model attention heads.
343
+ Qinan Yu, Jack Merullo, and Ellie Pavlick. 2023. Characterizing mechanisms for factual recall in language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9924-9959, Singapore. Association for Computational Linguistics.
344
+ Fred Zhang and Neel Nanda. 2024. Towards best practices of activation patching in language models: Metrics and methods.
345
+ Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, Todor Mihaylov, Myle Ott, Sam Shleifer, Kurt Shuster, Daniel Simig, Punit Singh Koura, Anjali Sridhar, Tianlu Wang, and Luke Zettlemoyer. 2022. Opt: Open pretrained transformer language models.
346
+ Wenxuan Zhou, Sheng Zhang, Hoifung Poon, and Muhao Chen. 2023. Context-faithful prompting for large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 14544–14556, Singapore. Association for Computational Linguistics.
347
+
348
+ # A Related Work
349
+
350
+ # A.1 Investigating Knowledge Conflict
351
+
352
+ Previous research (Longpre et al., 2021; Chen et al., 2022; Yu et al., 2023; Xie et al., 2023; Wang et al., 2023c; Neeman et al., 2023; Jin et al., 2024) on knowledge conflicts primarily seek to answer the question: do language models prefer internal memory or external context? Yu et al. (2023) find that language models are more inclined to internal memory as the frequency of a fact in the pre-training corpus increases. Xie et al. (2023) demonstrate that large language models (LLMs) are highly receptive to external conflicting evidence. They also reveal that when both supportive and contradictory evidence to their internal memory are present, LLMs show a strong confirmation bias and tend to cling to their parametric memory. The above observed phenomena contribute to a better understanding of knowledge conflicts. However, the underlying mechanism of knowledge conflicts remains unclear. We observe that knowledge conflicts arise when the late attention heads integrate different information flows from internal memory and external context.
353
+
354
+ # A.2 Resolving Knowledge Conflict
355
+
356
+ Existing work (Shi et al., 2023; Zhou et al., 2023; Li et al., 2023a; Yu et al., 2023; Qian et al., 2023) has conducted preliminary exploration into the mitigation of knowledge conflicts. Shi et al. (2023) propose a simple method to encourage the LM to attend to the external context via contrastive decoding (Li et al., 2023b). Yu et al. (2023) use head attribution to identify individual attention heads that either promote the memorized answer or the in-context answer, then scale the value vector of these heads to increase the rate of the in-context answers. Our work is inspired by their exploration of attention heads, and we propose further analysis to improve understanding of the way knowledge conflicts are formed. Furthermore, while most existing methods (Shi et al., 2023; Yu et al., 2023) primarily focus on improving the model's faithfulness to the context, enabling the model to adhere to its internal memory remains a challenging task.
357
+
358
+ # A.3 Mechanistic Interpretability
359
+
360
+ Recently, there has been a growing interest in the mechanistic interpretability (Cammarata et al., 2020; Elhage et al., 2021) of parametric knowledge in LMs, with efforts focusing on reverse engineering the computational processes of model parame
361
+
362
+ ters. Dai et al. (2022) use a knowledge attribution method (Hao et al., 2021) to identify the knowledge neurons in FFNs. Meng et al. (2022) reveal that FFNs at a range of middle layers can recall facts by using the causal mediation analysis method (Vig et al., 2020). Geva et al. (2023) find that knowledge extraction is typically done via attention heads. Besides, there are some works investigating LMs in mathematical reasoning (Hanna et al., 2023; Stolfo et al., 2023) and in-context learning (Hendel et al., 2023; Olsson et al., 2022; Bansal et al., 2023). Besides, there are some studies (Yang et al., 2023; Sakarvadia et al., 2023a,b; Zhang and Nanda, 2024) focused on interpreting attention heads in LMs. Our work is highly inspired by previous wisdom in mechanistic interpretability, focusing on interpreting and mitigating knowledge conflicts in LMs.
363
+
364
+ # B Implement Details
365
+
366
+ # B.1 Datasets
367
+
368
+ We construct Official Language, Country, and Continent datasets by sampling knowledge triples from Wikidata. The Official Language dataset requires the LM to predict the official language of the given city or country:
369
+
370
+ $$
371
+ \begin{array}{c} \text {T h e o f f i c i a l l a n g u a g e o f} \{s \} \text {i s} \{a _ {c} \}. \\ Q: \text {W h a t i s t h e o f f i c i a l l a n g u a g e o f} \{s \}? A: \end{array}
372
+ $$
373
+
374
+ The Country dataset requires the LM to predict the country to which the given city belongs:
375
+
376
+ $$
377
+ \begin{array}{c} \hline \text {T h e c i t y} \{s \} \text {i s l o c a t e d i n} \{a _ {c} \}. \\ Q: \text {W h i c h c o u n t r y i s t h e c i t y} \{s \} \text {i n ? A :} \end{array}
378
+ $$
379
+
380
+ The Continent dataset requires the LM to predict the continent on which the given country is located:
381
+
382
+ $$
383
+ \begin{array}{c} \{s \} \text {i s i n t h e c o n t i n e n t o f} \{a _ {c} \}. \\ Q: \text {W h i c h c o n t i n e n t i s} \{s \} \text {l o c a t e d i n ? A :} \end{array}
384
+ $$
385
+
386
+ We also generate a more complex World Capital D dataset based on the World Capital dataset, using gpt-3.5-turbo to rewrite the external context from triplet form into document form.
387
+
388
+ # B.2 Hyperparameter Settings
389
+
390
+ Our implementation is based on HuggingFace's Transformers<sup>1</sup>, PyTorch<sup>2</sup> and Baukit<sup>3</sup>. For the Prompt method, we use the following prompt to enhance the internal memory:
391
+
392
+ $$
393
+ \begin{array}{c} \text {P l a s e a n s w e r t h e q u e s t i o n b a s e d o n y o u r} \\ \text {i n t e r n a l m e m o r y , i g n o r i n g t h e g i v e n c o m t e x t .} \end{array}
394
+ $$
395
+
396
+ ![](images/86ec390da517409e9b4aa43fbb3101cd21773f73b7522ea5cc3962fc44308961.jpg)
397
+ Figure 5: Effect of FFNs in GPT-J on internal memory.
398
+
399
+ ![](images/80f1912e97444efcacc758a36a5289ccb880178190742a88191fa338b6b208cf.jpg)
400
+ Figure 6: Effect of MHAs in GPT-J on internal memory.
401
+
402
+ and we use the following prompt to enhance the external context:
403
+
404
+ Please answer the question based on the given context, ignoring your internal memory.
405
+
406
+ For Gradient and PH3, we select the optimal pruning rate $k \in \{1, 3, 5, 7, 9, 15\}$ on the development set with 200 samples. To mitigate knowledge conflicts, setting the pruning rate $k$ of PH3 to 5 usually achieves excellent results. For enhancing the open-domain QA capabilities, we usually set the pruning rate $k$ of PH3 to 3. Details about the models used in this paper are in Table 4. All experiments are conducted with NVIDIA GeForce RTX A6000 GPUs.
407
+
408
+ # C Additional Results for GPT-J
409
+
410
+ We provide here additional results for GPT-J. Figures 5 and 6 show the effect of FFNs and MHAs on internal memory, and Figures 7 and 8 show the effect of FFNs and MHAs on external context. Figures 9 and 10 illustrate the information flow in GPT-J with the window size $W = 9$ . Figure 11 shows the information flow in GPT-J when providing both supporting context and conflicting context relative to internal memory. Figures 12 and 13 show the gradient-based important scores of memory heads and context heads in GPT-J.
411
+
412
+ ![](images/e3db78594dfff3482f3e7f779b0f20cca6dd439b4fd171aa5d9529350b8d1faf.jpg)
413
+ Figure 7: Effect of FFNs in GPT-J on external context.
414
+
415
+ ![](images/5b82b540f33e1ca31853b26b34e902f47567e6d5c17747539442cefcd141f73b.jpg)
416
+ Figure 8: Effect of MHAs in GPT-J on external context.
417
+
418
+ # D Method Details
419
+
420
+ To calculate the important score $S_{m}^{\ell,h}$ of the target head $h$ , our path patching method consists of the following three steps:
421
+
422
+ 1. Run on the original input $x\in \mathcal{D}_m$ to record the original activations of all heads;
423
+ 2. Run on the corrupted input $\mathcal{X}$ to record the corrupted activations of all heads, where $\mathcal{X}$ is:
424
+
425
+ The capital of $\langle \mathrm{unk}\rangle$ is $\{a_{c}\}$ . Q: What is the capital of $\langle \mathrm{unk}\rangle$ ? A:
426
+
427
+ where $\langle \mathrm{unk}\rangle$ is the special token;
428
+
429
+ 3. Run on the original input $x$ , while keeping all the heads frozen to their activations on $x$ , except for the target head $h$ whose activation is set on $x$ . Then measure the important score as the change of output logits.
430
+
431
+ The important score $S_{m}^{\ell,h}$ of head $h$ is computed as:
432
+
433
+ $$
434
+ \begin{array}{l} S _ {m} ^ {l, h} \left(\mathcal {D} _ {m}\right) = \mathbb {E} _ {(x)} \left[ \left(\mathbb {P} _ {x} \left(a _ {m}\right) - \mathbb {P} _ {x} \left(a _ {c}\right)\right) \right. \tag {13} \\ \left. - \left(\mathbb {P} _ {\not f} (a _ {m}) - \mathbb {P} _ {\not f} (a _ {c})\right) \right]. \\ \end{array}
435
+ $$
436
+
437
+ # E Heatmaps of Attention Heads
438
+
439
+ We calculate the important scores of memory heads and context heads via our path patching method,
440
+
441
+ ![](images/47a583a7507ef6d69f3d1293cfb7f493a60e7084eb8b5d3ae8a452575b9d9b96.jpg)
442
+ Figure 9: Relative change in the GPT-J's prediction probability based on internal memory.
443
+
444
+ ![](images/45af20d9a70c7e6dea82cc4702fe5217e22f0d98c4de21ce5443d8b891143310.jpg)
445
+ Figure 10: Relative change in the GPT-J's prediction probability based on external context.
446
+
447
+ then provide the heatmaps for GPT-2 XL (Figures 15 and 16), GPT-J (Figures 17 and 18), OPT-1.3B (Figures 19 and 20), OPT-2.7B (Figures 21 and 22), Pythia-6.9B (Figures 23 and 24), Pythia-12B (Figures 25 and 26), LLaMA2-7B (Figures 27 and 28) and LLaMA2-13B (Figures 29 and 30). The red squares indicate heads that have a significant positive impact, while the blue squares represent heads that have a negative effect.
448
+
449
+ # F Additional Experimental Results
450
+
451
+ We report experimental results in Table 2 and 3.
452
+
453
+ # G Number of Pruning Heads
454
+
455
+ As shown in Figures 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41 and 42, we analyze the impact of the number of pruning heads (sparsity ratio) on the Gradient and PH3 methods.
456
+
457
+ ![](images/6369a6cccf966c54186bf2b038216f246e7df599194e55ac532e9390e33a1a39.jpg)
458
+ Figure 11: Relative change in the GPT-J's prediction probability based on internal memory when providing both supporting context and conflicting context.
459
+
460
+ ![](images/2e8463dd90d534b95134bb9d8e281c06a4e4a322b969680465513fb0534a44bc.jpg)
461
+ Figure 12: Memory Heads of GPT-J.
462
+
463
+ ![](images/cfec2659e5c9251340d4fb3ec2c5fc65ea77f4864599ac1603928074caeb34f0.jpg)
464
+ Figure 13: Context Heads of GPT-J.
465
+
466
+ ![](images/af2b01d12f70a41cd7e5dca4407e70e9d9a41e24e8a51cd8dd03c76babb5b26d.jpg)
467
+ (1) Gradient-based method.
468
+
469
+ ![](images/cc7d56669ad1f6781a9b0a4a0de0045f10ce3dfe6a54a58c69d766f2d9566f27.jpg)
470
+ Original run. (b) Corrupted run.
471
+ (c) Run with replacing activations
472
+ (2) Path patching method (Ours).
473
+
474
+ ![](images/15d173560d4f193c8a0a01407e86ba08db9198e014246aaf2eebc2c01f5aab6e.jpg)
475
+ Figure 14: Illustration of gradient-based method and our path patching method.
476
+
477
+ ![](images/e21be79c067f5aa03eac50805343c09afcad470702959c15c784ddc272bdf512.jpg)
478
+ Figure 15: Memory Heads of GPT-2 XL.
479
+
480
+ ![](images/648385a211e8274177022abee3dfc4bd0a44c1616c1eb58fe873c40d7e3f6d64.jpg)
481
+ Figure 17: Memory Heads of GPT-J.
482
+
483
+ ![](images/b7053adfdfa1e5aaf561e2348279dd9f47cbe3f3af96389ebadac73498208a91.jpg)
484
+ Figure 16: Context Heads of GPT-2 XL.
485
+
486
+ ![](images/c7b438beeb348a0a16d712efba6075e8d801750502192e6ec99d97673faab889.jpg)
487
+ Figure 18: Context Heads of GPT-J.
488
+
489
+ ![](images/29fc95a04bafa67d9f53517885d0711361ad95d60f528bbff962f467e78adb73.jpg)
490
+ Figure 19: Memory Heads of OPT-1.3B.
491
+
492
+ ![](images/6937e07f29ce474459a136deb49193e8f1b197413922e68ee997aceef1b4e58c.jpg)
493
+ Figure 22: Context Heads of OPT-2.7B.
494
+
495
+ ![](images/ce68eef5d69f377ef52dbc9c9c228debc4c4fb0e327ef653abb26d8f1de3f393.jpg)
496
+ Figure 20: Context Heads of OPT-1.3B.
497
+
498
+ ![](images/e17813bf96c3551b8f4073adc0e1a1c2c45cf9f3b1dd4dae21b5316b6c74834d.jpg)
499
+ Figure 23: Memory Heads of Pythia-6.9B.
500
+
501
+ ![](images/8a1cf8e56240ea8955b69a2ff5a3456a536ff6093641382adf70133d7766d377.jpg)
502
+ Figure 21: Memory Heads of OPT-2.7B.
503
+
504
+ ![](images/4312feaa3e53c02aa7baffef8ccfa922388bb8e3cb501b9d5d101b6edbd4500b.jpg)
505
+ Figure 24: Context Heads of Pythia-6.9B.
506
+
507
+ ![](images/07b5431374ff5fa9b312e379113a5546d2f38f4568be82e71cc5b57ad1075139.jpg)
508
+ Figure 25: Memory Heads of Pythia-12B.
509
+
510
+ ![](images/a3bef8e6a72c8d6e98a48f46bed6a4ee7f13aa0f9e2de0aff2a732acb64b4b55.jpg)
511
+ Figure 28: Context Heads of LLaMA2-7B.
512
+
513
+ ![](images/da1cdf583d8e19b5e35e9c371c858792bd7ea41214c212996720e44acf8e1234.jpg)
514
+ Figure 26: Context Heads of Pythia-12B.
515
+
516
+ ![](images/45793a57cd643b48e7d7dfa3f1bd7bc60f760daec9d9700e5880180f559676a0.jpg)
517
+ Figure 29: Memory Heads of LLaMA2-13B.
518
+
519
+ ![](images/1dea2c86d3f2e0ed2c4f2523e5a511c7a4002385b456aa816da1d02b46a7ae40.jpg)
520
+ Figure 27: Memory Heads of LLaMA2-7B.
521
+
522
+ ![](images/91803e62d5e2e1635a3b35d82468e8ab756c3e8a58a12bfea4c6069892a263c8.jpg)
523
+ Figure 30: Context Heads of LLaMA2-13B.
524
+
525
+ ![](images/d328f1c1af6ddd8a42560345299a610add754e732ca13a1503baede9d11e9bee.jpg)
526
+ Figure 31: Impact of GPT-2 XL's sparsity ratio on improving internal memory usage rate.
527
+
528
+ ![](images/787296c7e51b177dd139509f1a9c5f39e0574876e132165f446b85323ab66019.jpg)
529
+ Figure 34: Impact of GPT-J's sparsity ratio on improving external context usage rate.
530
+
531
+ ![](images/6c845b2dd72d1975e141d9764bb20466c42db1ddff02b15a91c340b959cf3762.jpg)
532
+ Figure 32: Impact of GPT-2 XL's sparsity ratio on improving external context usage rate.
533
+
534
+ ![](images/59135ec89298897ceec36ccaf1cc5e6a0036106e7dc0ef8b0eae73e5b199ed52.jpg)
535
+ Figure 35: Impact of OPT-2.7B's sparsity ratio on improving internal memory usage rate.
536
+
537
+ ![](images/7742a2b4c32ad6899f735a87e25b583148b7119a53a8ce23ba47e5395818d859.jpg)
538
+ Figure 33: Impact of GPT-J's sparsity ratio on improving internal memory usage rate.
539
+
540
+ ![](images/62029cb7d74e1f53baaccb1b5ba31b8fd1e74d9087f87aedc8b6ff7952a1529d.jpg)
541
+ Figure 36: Impact of OPT-2.7B's sparsity ratio on improving external context usage rate.
542
+
543
+ ![](images/e7217c230b2b9b092ebe0b0f6ff3953d9e0cccb89181686ab1ce01eca17058e1.jpg)
544
+ Figure 37: Impact of Pythia-6.9B's sparsity ratio on improving internal memory usage rate.
545
+
546
+ ![](images/420744f4dff37330ad1ac01260649c0986db7624a2498b7de04c344277440880.jpg)
547
+ Figure 40: Impact of LLaMA2-7B's sparsity ratio on improving external context usage rate.
548
+
549
+ ![](images/4fee32a74420692ad8b69819e6ba43f050c5d85befcf2e3f2b1a5da4aaff5b48.jpg)
550
+ Figure 38: Impact of Pythia-6.9B's sparsity ratio on improving external context usage rate.
551
+
552
+ ![](images/d3152cb98461778eceee2b485dd997cd57254a332630315a113d6fb29144fa13.jpg)
553
+ Figure 41: Impact of LLaMA2-13B's sparsity ratio on improving internal memory usage rate.
554
+
555
+ ![](images/1f176b23ae1bc4c932b0fda378c65879339a186b3eaedc027ab4f53b3151c2c5.jpg)
556
+ Figure 39: Impact of LLaMA2-7B's sparsity ratio on improving internal memory usage rate.
557
+
558
+ ![](images/7c023b83e80f4c2fd3325f88ca1ed387e1b5d440c13db4079cb50817ec1a8b28.jpg)
559
+ Figure 42: Impact of LLaMA2-13B's sparsity ratio on improving external context usage rate.
560
+
561
+ <table><tr><td rowspan="2">Model</td><td rowspan="2" colspan="2">Method</td><td colspan="2">World Capital</td><td colspan="2">World Capital D</td><td colspan="2">Official Language</td><td colspan="2">Country</td><td colspan="2">Continent</td></tr><tr><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td><td>RM</td><td>RC</td></tr><tr><td rowspan="9">OPT-1.3B</td><td colspan="2">Base</td><td>40.5</td><td>59.5</td><td>36.3</td><td>63.7</td><td>20.3</td><td>79.7</td><td>26.8</td><td>73.2</td><td>19.2</td><td>80.8</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>19.7</td><td>78.4</td><td>37.5</td><td>57.2</td><td>7.9</td><td>91.4</td><td>16.9</td><td>82.6</td><td>7.9</td><td>90.1</td></tr><tr><td>Gradient</td><td>82.7</td><td>12.2</td><td>56.4</td><td>21.2</td><td>37.1</td><td>50.4</td><td>38.0</td><td>61.1</td><td>20.5</td><td>55.9</td></tr><tr><td>PH3 (Ours)</td><td>95.0</td><td>0.2</td><td>87.2</td><td>1.9</td><td>70.3</td><td>16.6</td><td>47.7</td><td>49.1</td><td>43.4</td><td>51.7</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>17.0</td><td>81.4</td><td>38.5</td><td>57.7</td><td>9.3</td><td>89.5</td><td>15.9</td><td>83.8</td><td>6.3</td><td>93.3</td></tr><tr><td>CAD</td><td>8.1</td><td>86.5</td><td>31.6</td><td>60.0</td><td>3.2</td><td>89.6</td><td>0.1</td><td>99.5</td><td>5.1</td><td>89.1</td></tr><tr><td>Gradient</td><td>22.9</td><td>73.7</td><td>35.7</td><td>63.8</td><td>12.4</td><td>82.5</td><td>16.3</td><td>82.9</td><td>17.6</td><td>80.2</td></tr><tr><td>PH3 (Ours)</td><td>0.2</td><td>97.0</td><td>7.9</td><td>69.6</td><td>12.8</td><td>84.1</td><td>4.0</td><td>85.4</td><td>1.6</td><td>44.1</td></tr><tr><td>+ Prompt</td><td>0.4</td><td>99.2</td><td>10.8</td><td>68.8</td><td>9.3</td><td>88.3</td><td>2.4</td><td>92.5</td><td>2.5</td><td>92.1</td></tr><tr><td rowspan="9">OPT-2.7B</td><td colspan="2">Base</td><td>40.2</td><td>59.8</td><td>46.6</td><td>53.4</td><td>8.8</td><td>91.2</td><td>26.5</td><td>73.5</td><td>4.3</td><td>95.7</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>17.7</td><td>80.3</td><td>24.2</td><td>71.6</td><td>3.4</td><td>96.3</td><td>12.7</td><td>87.2</td><td>1.6</td><td>98.1</td></tr><tr><td>Gradient</td><td>75.4</td><td>19.3</td><td>10.3</td><td>79.7</td><td>13.3</td><td>57.7</td><td>42.9</td><td>56.3</td><td>5.3</td><td>94.1</td></tr><tr><td>PH3 (Ours)</td><td>93.4</td><td>0.5</td><td>2.8</td><td>87.6</td><td>75.6</td><td>1.5</td><td>56.3</td><td>38.9</td><td>29.3</td><td>55.5</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>4.7</td><td>94.9</td><td>11.1</td><td>86.9</td><td>3.2</td><td>96.3</td><td>13.2</td><td>86.5</td><td>0.7</td><td>99.0</td></tr><tr><td>CAD</td><td>10.8</td><td>72.2</td><td>28.8</td><td>46.3</td><td>2.7</td><td>87.5</td><td>9.0</td><td>89.1</td><td>0.5</td><td>99.0</td></tr><tr><td>Gradient</td><td>36.1</td><td>62.9</td><td>43.3</td><td>53.5</td><td>7.3</td><td>91.6</td><td>23.7</td><td>76.3</td><td>4.1</td><td>95.9</td></tr><tr><td>PH3 (Ours)</td><td>1.3</td><td>97.8</td><td>3.4</td><td>81.6</td><td>4.1</td><td>94.9</td><td>9.0</td><td>90.9</td><td>0.9</td><td>98.6</td></tr><tr><td>+ Prompt</td><td>0.8</td><td>98.3</td><td>1.3</td><td>94.6</td><td>1.5</td><td>98.2</td><td>6.9</td><td>93.1</td><td>0.0</td><td>99.5</td></tr><tr><td rowspan="9">Pythia-6.9B</td><td colspan="2">Base</td><td>53.3</td><td>46.6</td><td>74.8</td><td>25.2</td><td>49.7</td><td>50.3</td><td>41.3</td><td>58.7</td><td>39.1</td><td>60.9</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>44.7</td><td>51.2</td><td>41.8</td><td>37.8</td><td>16.5</td><td>81.4</td><td>12.8</td><td>87.1</td><td>36.2</td><td>61.6</td></tr><tr><td>Gradient</td><td>56.5</td><td>35.8</td><td>72.8</td><td>22.4</td><td>56.7</td><td>36.3</td><td>37.9</td><td>62.1</td><td>40.1</td><td>58.9</td></tr><tr><td>PH3 (Ours)</td><td>90.2</td><td>6.7</td><td>88.4</td><td>10.2</td><td>71.5</td><td>11.1</td><td>41.8</td><td>57.3</td><td>66.0</td><td>31.0</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>32.7</td><td>63.7</td><td>32.0</td><td>44.8</td><td>8.9</td><td>90.5</td><td>10.4</td><td>89.5</td><td>31.7</td><td>75.6</td></tr><tr><td>CAD</td><td>14.3</td><td>55.1</td><td>22.0</td><td>27.6</td><td>3.3</td><td>78.1</td><td>8.5</td><td>91.2</td><td>12.3</td><td>82.2</td></tr><tr><td>Gradient</td><td>41.4</td><td>53.7</td><td>61.8</td><td>35.6</td><td>48.2</td><td>51.3</td><td>34.3</td><td>65.6</td><td>41.7</td><td>53.0</td></tr><tr><td>PH3 (Ours)</td><td>6.7</td><td>81.7</td><td>34.4</td><td>30.0</td><td>16.4</td><td>70.0</td><td>3.4</td><td>96.3</td><td>3.3</td><td>94.5</td></tr><tr><td>+ Prompt</td><td>0.6</td><td>98.6</td><td>24.4</td><td>60.4</td><td>3.3</td><td>95.6</td><td>0.3</td><td>99.5</td><td>0.7</td><td>98.8</td></tr><tr><td rowspan="9">Pythia-12B</td><td colspan="2">Base</td><td>59.7</td><td>40.3</td><td>64.6</td><td>35.4</td><td>34.3</td><td>65.7</td><td>35.0</td><td>65.0</td><td>43.0</td><td>57.0</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>5.8</td><td>94.1</td><td>43.7</td><td>53.3</td><td>11.1</td><td>85.8</td><td>2.4</td><td>97.5</td><td>10.1</td><td>88.5</td></tr><tr><td>Gradient</td><td>62.9</td><td>27.7</td><td>63.1</td><td>30.2</td><td>39.3</td><td>37.5</td><td>46.2</td><td>53.3</td><td>42.1</td><td>56.9</td></tr><tr><td>PH3 (Ours)</td><td>95.0</td><td>0.6</td><td>82.4</td><td>2.2</td><td>69.9</td><td>6.9</td><td>57.1</td><td>35.9</td><td>70.1</td><td>9.6</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>6.2</td><td>93.6</td><td>34.1</td><td>62.0</td><td>21.1</td><td>77.6</td><td>1.7</td><td>98.3</td><td>6.3</td><td>92.5</td></tr><tr><td>CAD</td><td>3.1</td><td>65.7</td><td>13.6</td><td>42.9</td><td>2.0</td><td>89.4</td><td>3.7</td><td>94.7</td><td>11.3</td><td>80.0</td></tr><tr><td>Gradient</td><td>59.3</td><td>19.6</td><td>33.3</td><td>25.0</td><td>21.3</td><td>54.6</td><td>28.5</td><td>71.3</td><td>40.2</td><td>52.1</td></tr><tr><td>PH3 (Ours)</td><td>18.6</td><td>76.1</td><td>56.9</td><td>33.3</td><td>10.9</td><td>80.4</td><td>16.9</td><td>76.7</td><td>26.9</td><td>67.9</td></tr><tr><td>+ Prompt</td><td>1.9</td><td>97.6</td><td>17.7</td><td>75.8</td><td>3.8</td><td>95.5</td><td>2.2</td><td>97.7</td><td>2.4</td><td>97.0</td></tr><tr><td rowspan="9">LLaMA2-13B</td><td colspan="2">Base</td><td>60.6</td><td>39.4</td><td>74.6</td><td>25.0</td><td>1.6</td><td>98.4</td><td>26.2</td><td>73.7</td><td>5.6</td><td>93.3</td></tr><tr><td rowspan="3">↑ Memory</td><td>Prompt</td><td>0.4</td><td>99.6</td><td>78.5</td><td>21.0</td><td>0.5</td><td>99.5</td><td>22.3</td><td>77.6</td><td>10.7</td><td>89.3</td></tr><tr><td>Gradient</td><td>77.7</td><td>10.0</td><td>89.9</td><td>9.5</td><td>26.1</td><td>68.3</td><td>48.1</td><td>51.4</td><td>30.7</td><td>49.0</td></tr><tr><td>PH3 (Ours)</td><td>86.3</td><td>12.5</td><td>91.5</td><td>8.1</td><td>71.2</td><td>11.6</td><td>47.7</td><td>51.6</td><td>11.9</td><td>45.4</td></tr><tr><td rowspan="5">↑ Context</td><td>Prompt</td><td>0.0</td><td>100.0</td><td>70.9</td><td>28.7</td><td>0.0</td><td>100.0</td><td>7.1</td><td>92.7</td><td>3.6</td><td>96.4</td></tr><tr><td>CAD</td><td>6.2</td><td>91.0</td><td>1.1</td><td>98.8</td><td>0.0</td><td>100.0</td><td>0.5</td><td>99.5</td><td>0.0</td><td>99.6</td></tr><tr><td>Gradient</td><td>46.3</td><td>33.9</td><td>74.1</td><td>25.4</td><td>16.5</td><td>83.3</td><td>20.2</td><td>79.5</td><td>3.6</td><td>96.4</td></tr><tr><td>PH3 (Ours)</td><td>6.2</td><td>92.1</td><td>24.1</td><td>75.9</td><td>1.3</td><td>98.7</td><td>5.6</td><td>94.4</td><td>0.4</td><td>99.6</td></tr><tr><td>+ Prompt</td><td>0.0</td><td>100.0</td><td>31.0</td><td>68.0</td><td>0.0</td><td>100.0</td><td>0.0</td><td>100.0</td><td>0.0</td><td>100.0</td></tr></table>
562
+
563
+ Table 2: Experimental results of OPT-1.3B, OPT-2.7B, Pythia-6.9B, Pythia-12B and LLaMA2-7B on five datasets. Bolds denote the best results.
564
+
565
+ <table><tr><td>Method</td><td>GPT-2 XL</td><td>GPT-J</td><td>OPT-2.7B</td></tr><tr><td>Base</td><td>45.6</td><td>54.8</td><td>51.4</td></tr><tr><td>Prompt</td><td>47.6</td><td>57.4</td><td>54.1</td></tr><tr><td>CAD</td><td>44.5</td><td>55.0</td><td>50.2</td></tr><tr><td>Gradient</td><td>45.3</td><td>55.0</td><td>50.8</td></tr><tr><td>PH3 (k=1)</td><td>47.1</td><td>57.5</td><td>53.5</td></tr><tr><td>PH3 (k=3)</td><td>52.2</td><td>58.6</td><td>55.4</td></tr><tr><td>+ Prompt</td><td>54.0</td><td>59.6</td><td>56.7</td></tr><tr><td>PH3 (k=5)</td><td>49.4</td><td>56.3</td><td>54.0</td></tr></table>
566
+
567
+ Table 3: Experimental results (Recall) of GPT-2 XL, GPT-J and OPT-2.7B on the NQ dataset. Bolds denote the best results.
568
+
569
+ <table><tr><td>Model</td><td>#Layer L</td><td>#Head M</td></tr><tr><td>GPT-2 XL</td><td>48</td><td>25</td></tr><tr><td>GPT-J</td><td>28</td><td>16</td></tr><tr><td>OPT-1.3B</td><td>24</td><td>32</td></tr><tr><td>OPT-2.7B</td><td>32</td><td>32</td></tr><tr><td>Pythia-6.9B</td><td>32</td><td>32</td></tr><tr><td>Pythia-12B</td><td>36</td><td>40</td></tr><tr><td>LLaMA2-7B</td><td>32</td><td>32</td></tr><tr><td>LLaMA2-13B</td><td>40</td><td>40</td></tr></table>
570
+
571
+ Table 4: Model details.
2402.18xxx/2402.18154/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93f653b01299ced82ce3c08de31840dcdfb5eb378f2da111d38996da16c3e8ec
3
+ size 1815704
2402.18xxx/2402.18154/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18158/14b81da5-aa54-4e77-9d91-bf6641da0904_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0c9bf49044bf0e64d37800bb0e92cbc7b62897f3f075fb21b138d9b94470a1
3
+ size 2324391
2402.18xxx/2402.18158/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18158/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0371084c1d811d6036c90a9c15cbb5798163d75461bb1f0cce48cb8af7b5babd
3
+ size 3850136
2402.18xxx/2402.18158/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18191/22c0c70d-15d1-48c3-a330-7b88b8e58e83_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38f76cc5a159e8300a8770688b1f891c8b5fb5c544c9a4c1acaf3b9137bb6f6e
3
+ size 2786938
2402.18xxx/2402.18191/full.md ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Clustering and Ranking: Diversity-preserved Instruction Selection through Expert-aligned Quality Estimation
2
+
3
+ Yuan Ge $^{1*}$ , Yilun Liu $^{2*}$ , Chi Hu $^{1}$ , Weibin Meng $^{2}$ , Shimin Tao $^{2}$ , Xiaofeng Zhao $^{2}$ , Hongxia Ma $^{2}$ , Li Zhang $^{2}$ , Boxing Chen $^{3}$ , Hao Yang $^{2}$ , Bei Li $^{1}$ , Tong Xiao $^{1,4}$ , Jingbo Zhu $^{1,4}$
4
+
5
+ $^{1}$ Northeastern University, Shenyang, China
6
+
7
+ $^{2}$ Huawei, Beijing, China
8
+
9
+ <sup>3</sup> Huawei Canada, Toronto, Canada
10
+
11
+ <sup>4</sup>NiuTrans Research, Shenyang, China
12
+
13
+ # Abstract
14
+
15
+ With contributions from the open-source community, a vast amount of instruction tuning (IT) data has emerged. Given the significant resource allocation required for training and evaluating models, it is advantageous to have an efficient method for selecting high-quality IT data. However, existing methods for instruction data selection have limitations such as relying on fragile external APIs, being affected by biases in GPT models, or reducing the diversity of the selected instruction dataset. In this paper, we propose an industrial-friendly, expert-aligned and diversity-preserved instruction data selection method: Clustering and Ranking (CaR). CaR employs a two-step process: first, it ranks instruction pairs using a high-accuracy $(84.25\%)$ scoring model aligned with expert preferences; second, it preserves dataset diversity through clustering. In our experiment, CaR efficiently selected a mere $1.96\%$ of Alpaca's IT data, yet the resulting AlpacaR model surpassed Alpaca's performance by an average of $32.1\%$ in GPT-4 evaluations. Moreover, we find that data selecting is a consistent paradigm whether the pre-trained model is more capable or the model parameters scaling up. Our approach employs compact models with 550M parameters and incurs just $11.2\%$ of the financial outlay of current methods, enhancing its industrial deployability.
16
+
17
+ # 1 Introduction
18
+
19
+ Language Models (LMs) acquire the capability to follow instructions through Instruction Tuning (IT) (Radford et al., 2019; Brown et al., 2020; Zhang et al., 2023), which aligns Large Language Models (LLMs) with critical human standards such as security, privacy, and legal compliance. Self-instruct proposes a novel methodology that utilizes LMs to construct IT datasets (Wang et al., 2022),
20
+
21
+ ![](images/0af3f667e92cf478a6b4fcd6de8101c5852c724a7d9198032a3eb3c954fb9019.jpg)
22
+ Wining Score (compared to reference response)
23
+ Figure 1: Compares the performance of the proposed AlpaCaR model to established baseline models over four test sets. Our AlpaCaR achieves the best model performance with the smallest amount of instruction tuning data.
24
+
25
+ greatly improving the efficiency of instruction generation. Alpaca leveraged a similar strategy (Taori et al., 2023), utilizing text-davinci-003 to construct the Alpaca_52k dataset, and subsequent IT on LLaMA-7B model (Touvron et al., 2023) led to the creation of Alpaca.
26
+
27
+ Despite these advancements, the quality of instructions remains paramount over their quantity. Zhou et al. (2023) carefully curated 1,000 instructions, ensuring data quality and diversity by human being, resulting in LIMA model significantly outperforming the Alpaca. Nevertheless, creating high-quality instruction sets through manual annotation is both time-consuming and labor-intensive (Chiang et al., 2023). A promising approach to mitigate this challenge involves filtering a small subset of high-quality and diverse instructions from the vast amounts of existing instruction data.
28
+
29
+ Alpagasus (Chen et al., 2023) introduced a
30
+
31
+ <table><tr><td>IQS</td><td>CometInstruct</td><td>GPT-4</td><td>GPT-3.5</td></tr><tr><td>84.25%</td><td>72.44%</td><td>63.19%</td><td>57.48%</td></tr><tr><td>78.12%</td><td>45.00%</td><td>65.00%</td><td>56.25%</td></tr></table>
32
+
33
+ Table 1: Accuracy of the IQS, CometInstruct and GPT models on test sets. Reflecting the alignment of the model with human preferences in the task of Instruction Pairs Quality Estimation. The second row presents results for instruction pairs sourced from the IQE test set, while the third row shows acc on instruction pairs from Vicuna_80, demonstrating the models' generalization to other distributions, see more details in Appendix C.1. The IQS and CometInstruct model were fine-tuned as described in Appendix C.2, while the GPT model used prompts referenced in the Appendix B.2.
34
+
35
+ straightforward yet effective method that utilizes GPT-3.5-Turbo to filter roughly 9k instructions, surpassing Alpaca's performance. However, this approach overlooks data diversity, and GPT's evaluations rated $17.3\%$ instruction pairs generated by text-davinci-003 above 4.5 and $74.9\%$ above 4.0, demonstrating GPT's self-enhancement bias Zheng et al. (2023), rendering it unsuitable for assessing instructions generated by models within the same series. Therefore, more authentic human preferences should be used to filter instruction sets. Moreover, relying on fragile and expensive external GPT APIs limits Alpagasus in industrial deployment, especially in low-computation resource scenarios.
36
+
37
+ In this work, we propose an effective and efficient method for selecting instruction pairs — Clustering and Ranking (CaR). CaR consists of two steps. The first is ranking through quality estimation on instruction pairs, where an expert-aligned scoring model (with 550M parameters only) achieves an accuracy of $84.25\%$ with expert preferences. Then, a clustering step ensures the overall diversity of the dataset, minimizing potential capability gaps. Our contributions are summarized as follows:
38
+
39
+ - We introduce Instruction Pair Quality Estimation (IQE), a new stage before IT process which aims to use the assessment results of instruction datasets as an aid for the actual fine-tuning of language models and evaluation on benchmarks, reducing the time and computational expenses for model performance validation in IT process by over $90\%$ .
40
+ - We propose a novel quality evaluation paradigm for IT dataset that is independent
41
+
42
+ of external APIs and aligns well with human experts' preferences. As shown in Table 1, our small Instruction pair Quality Scoring (IQS) model, compared to GPT-4, achieves a $21.05\%$ improvement in aligning with human preferences for data quality.
43
+
44
+ - We propose CaR, an instruction selection method that aligns with expert insights and preserves diversity, showcasing significant enhancements in model performance and training efficiency. As shown in Fig. 1, CaR uses a small model to filter high-quality instruction data, achieving an average performance exceeding Alpaca by about $13.3\%$ to $32.8\%$ on the Alpaca_52k dataset using only a $1.96\%$ subset of instructions. This implies a reduction of $98\%$ in training time and resources.
45
+ - In section 5, experiments found that the data selecting paradigm is effective even with more adequate pre-training (LLaMA 1-LLaMA 3) or model parameter scaling (7B-30B). However, data selecting methods at higher data quality, such as Alpaca-GPT4 (Peng et al., 2023), are still challenging.
46
+
47
+ In addition, we released our code and models to facilitate future research and industrial endeavors<sup>1</sup>.
48
+
49
+ # 2 Method
50
+
51
+ # 2.1 Motivation
52
+
53
+ Our work is motivated by the challenges of data quality in instruction tuning and the limitations of existing approaches.
54
+
55
+ From Quality Estimation to Instruction Pair Quality Estimation. Quality estimation is a crucial task in machine translation (MT), enabling the assessment of MT models' effectiveness and the selection of high-quality translations for specific purposes, such as manual post-editing. Similarly, LLMs' IT process faces the challenge of rapidly shifting from rare to abundant instruction pairs with inconsistent quality. Ensuring the quality of IT datasets presents a significant challenge, necessitating adjustments to the pre-trained model, executing inference on test datasets, and undergoing evaluation by LLM or human annotators. These processes are not only time-intensive but also demand considerable computational resources. To address this,
56
+
57
+ we propose a paradigm shift from evaluating model performance to assessing IT datasets via IQE. Our goal is to perform a coarse screening of a large number of instructions using IQE, followed by refining and selecting the optimal LLM with minimal datasets to reduce the overall computational cost associated with instruction filtering and verification.
58
+
59
+ GPT as a Judge Exhibits Systematic Bias. Researchers often use GPT preferences as a proxy for human preferences in scenarios requiring human feedback, due to time and cost considerations (Zhou et al., 2023; Rafailov et al., 2023; Dubois et al., 2023; Lee et al., 2023). However, GPT-4 has been shown to exhibit systemic biases in its evaluations, including positional bias, verosity bias, and self-enhancement bias (Zheng et al., 2024a; Wang et al., 2023a). While researchers generally view Alpaca 52k as needing improvement (Alpaca-DataCleaned $^{2}$ ; Liu et al., 2023b), GPT's evaluations rated 9k instruction pairs above 4.5 and 39k above 4.0. Introducing more realistic human preferences for instruction filtering could further enhance model performance.
60
+
61
+ Instruction Diversity Inspires LLMs' Multi-tasks Capability. Recent studies have highlighted the importance of data diversity in improving the performance of LLMs (Zhou et al., 2023; Chen et al., 2023). Dong et al. (2023) found that combining training data from various tasks boosts LLMs' performance in low-resource scenarios. Inspired by these findings, we posit that integrating instructions from different tasks enhances LLMs' capabilities in low-resource settings. Consequently, ensuring the diversity of the IT dataset is paramount, particularly when dealing with large-scale models and limited high-quality data for each task.
62
+
63
+ # 2.2 Clustering and Ranking Method
64
+
65
+ Considering the aforementioned motivations, we propose a straightforward yet effective data selection framework, Cluster and Ranking, which integrates the dimensions of quality and diversity. Inspired by Zhou et al. (2023)'s work, we first select a subset that ensures the retention of a large number of high-quality instructions, then supplement a small number of high-quality instructions from each cluster to enhance data diversity while
66
+
67
+ preserving instruction quality. As illustrated in Fig. 2, the framework begins by evaluating the entire dataset using the IQS model, assigning a score $i$ to each instruction pair $i$ . Subsequently, the cluster model is employed to partition all candidate instruction pairs into $k$ clusters. Finally, all instruction pairs are sorted based on their scores, and the top $n_1$ pairs are selected; Within each cluster, the top $n_2$ pairs are chosen based on their scores. The resulting high-quality sub-dataset with preserved diversity is curated by deduplicating $n_1 + k * n_2$ pairs of instructions and is intended for the training of AlpaCaR.
68
+
69
+ Sections 2.3 and 2.4 provide a comprehensive discussion of the ranking and clustering methodologies implemented in CaR.
70
+
71
+ # 2.3 Single Instruction Pair Quality Estimation
72
+
73
+ To explore the IQE task, we adapt the Comet framework (Rei et al., 2020) and develop a suitable framework for leveraging expert preference. Our training data is derived from expert-revised dataset (Liu et al., 2023b), consisting of 3,751 instruction pairs from Alpaca_52k that were refined by linguistic experts to enhance fluency, accuracy, and semantic coherence between questions and responses. We categorize unedited instructions and responses from text-davinci-003 as GPT Preference, and expert-revised instructions as Expert Preference. To enable the model to discern features across these categories, we curated 2,541 markedly distinct instructions from the expert-revised dataset, ensuring an edit distance above a small threshold. These instruction pairs are then randomly allocated them into training, validation, and test sets following an 8:1:1 distribution.
74
+
75
+ Initially, we experimented with the translation ranking model architecture from the Comet framework to leverage the paired annotations in expert-revised better. In Fig. 10 (left), Comet<sup>struct</sup> optimizes the model using instruction and input as anchors, minimizing semantic distance to human-preferred responses while maximizing distance to GPT-generated outputs. This approach achieves $72.44\%$ accuracy on the test set but fails to fully leverage the improvements about Input made by experts. To address this, as illustrated in Fig. 10 (right), we retained the pre-trained XLM-RoBERTa large in Comet<sup>struct</sup> and directly concatenated the instruction pair components to train the IQS model. As shown in Table 1, our IQS model outperforms GPT-3.5 (version: GPT-3.5-Turbo) and
76
+
77
+ ![](images/4a7fcb8bfe75e649a90558821ad94858782cb9bc2b575678212425135704b414.jpg)
78
+ Figure 2: An overview of Cluster and Ranking (CaR) method. Unlike directly training Alpaca with the entire Alpaca_52k dataset, CaR first uses the IQS model to score all instructions (brown arrow). Then it selects the top $n_1$ instructions ranked by quality. Next, a clustering model (violet arrow) groups all instructions into k clusters, selecting $n_2$ from each. These are concatenated and deduplicated to form a diverse, high-quality sub-dataset for training AlpaCaR.
79
+
80
+ GPT-4 (version: GPT-4-1106-preview). Further analysis reveals that GPT-4 favors original instructions in $62.2\%$ of incorrect cases, showing that even advanced GPT models often prefer GPT-aligned instructions. Additionally, GPT-4 struggles to recognize nuanced semantic changes made by experts in $37.8\%$ of incorrect cases, revealing its difficulty in recognizing expert and nuanced semantic changes with minimal adjustments. Despite GPT-4's strong alignment with human preferences in most general tasks, its subpar performance on the expert-revised dataset highlights a subtle gap between expert preferences and GPT preferences.
81
+
82
+ # 2.4 Diversity
83
+
84
+ Within the instruction filtering framework, it is imperative to filter out a minimal subset of data from a vast array of instructions, resulting in a limited number of instructions per task. In such low-resource scenarios, Dong et al. (2023) has demonstrated that blending training data from various tasks enhances the LLMs' proficiency across different abilities. Intuitively, by assigning a task label to each instruction pair, we can preserve instruction pairs associated with a broader range of tasks, thereby facilitating cross-task instruction synergy and enhancing model performance. To determine task labels for instruction pairs, we evaluated manual labeling, classification models, and clustering models, selecting clustering for our study. Manual labeling, though more accurate, is labor-intensive
85
+
86
+ and less adaptable to various datasets. We hypothesize that instruction pairs within the same task are semantically close, allowing their distribution to be learned via classification models. Nonetheless, such models may struggle with flexibility when faced with out-of-domain data.
87
+
88
+ To enhance the method's versatility, we opted for an unsupervised clustering-based approach to preserve data diversity. A clustering algorithm can identify semantically close instruction pairs and form clusters for different tasks. Moreover, this choice allows for efficient adaptation to different datasets without retraining from scratch by forming new clusters when encountering out-of-domain instruction pairs.
89
+
90
+ Regarding the clustering methodology, we employ the $k$ -Means algorithm. Initially, a sentence-transformers model is used to map sentences to a 384-dimensional dense vector space. Subsequently, semantic features are PCA-reduced to retain $95\%$ of dimensions. Finally, by setting the number of clusters as $k = \sqrt{n / 2}$ , all 52k instruction pairs are clustered into 161 clusters. The diversity of the instruction sub-dataset is maintained by adjusting the quantity of instruction pairs within each cluster.
91
+
92
+ # 3 Experimental Setup
93
+
94
+ To compare AlpaCaR with other models, we obtain a single response for each test set sample using a fixed prompt (Taori et al., 2023). Judge LLMs are then compare responses generated by LLMs
95
+
96
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Num</td><td rowspan="2">Size</td><td colspan="3">PandaLM</td><td colspan="3">Vicuna</td><td colspan="3">CoachLM</td><td colspan="3">Self-instruct</td></tr><tr><td>WS↑</td><td>WR↑</td><td>QS↑</td><td>WS↑</td><td>WR↑</td><td>QS↑</td><td>WS↑</td><td>WR↑</td><td>QS↑</td><td>WS↑</td><td>WR↑</td><td>QS↑</td></tr><tr><td>Alpaca-PandaLM</td><td>52k</td><td>7B</td><td>1.224</td><td>49.4%</td><td>72.9%</td><td>0.288</td><td>8.8%</td><td>20.0%</td><td>0.867</td><td>28.7%</td><td>58.0%</td><td>1.075</td><td>42.9%</td><td>64.7%</td></tr><tr><td>Alpaca-cleaned</td><td>52k</td><td>7B</td><td>1.276</td><td>53.5%</td><td>74.1%</td><td>0.300</td><td>8.8%</td><td>21.3%</td><td>0.953</td><td>35.3%</td><td>60.0%</td><td>1.083</td><td>42.5%</td><td>65.9%</td></tr><tr><td>Vicuna</td><td>70k</td><td>7B</td><td>1.276</td><td>53.5%</td><td>74.1%</td><td>0.688</td><td>17.5%</td><td>51.3%</td><td>0.787</td><td>23.3%</td><td>55.3%</td><td>0.877</td><td>25.8%</td><td>61.9%</td></tr><tr><td>Alpaca</td><td>52k</td><td>7B</td><td>1.341</td><td>54.1%</td><td>80.0%</td><td>0.363</td><td>11.3%</td><td>25.0%</td><td>0.913</td><td>32.7%</td><td>58.7%</td><td>1.139</td><td>42.9%</td><td>71.0%</td></tr><tr><td>Alpagasus</td><td>9k</td><td>7B</td><td>1.324</td><td>54.1%</td><td>78.2%</td><td>0.463</td><td>13.8%</td><td>32.5%</td><td>0.807</td><td>25.3%</td><td>55.3%</td><td>1.123</td><td>44.4%</td><td>67.9%</td></tr><tr><td>AlpaCaR</td><td>1k</td><td>7B</td><td>1.594</td><td>70.6%</td><td>88.8%</td><td>0.813</td><td>27.5%</td><td>53.8%</td><td>1.020</td><td>37.3%</td><td>64.7%</td><td>1.448</td><td>61.9%</td><td>82.9%</td></tr><tr><td>Alpaca</td><td>52k</td><td>13B</td><td>1.365</td><td>56.5%</td><td>80.0%</td><td>0.363</td><td>8.8%</td><td>27.5%</td><td>0.940</td><td>30.7%</td><td>63.3%</td><td>1.155</td><td>45.2%</td><td>70.2%</td></tr><tr><td>Alpagasus</td><td>9k</td><td>13B</td><td>1.347</td><td>54.7%</td><td>80.0%</td><td>0.338</td><td>6.3%</td><td>27.5%</td><td>0.880</td><td>28.0%</td><td>60.0%</td><td>1.230</td><td>48.4%</td><td>74.6%</td></tr><tr><td>AlpaCaR</td><td>1k</td><td>13B</td><td>1.535</td><td>65.9%</td><td>87.6%</td><td>1.025</td><td>37.5%</td><td>65.0%</td><td>1.153</td><td>44.0%</td><td>71.3%</td><td>1.357</td><td>56.3%</td><td>79.4%</td></tr><tr><td>Alpaca</td><td>52k</td><td>30B</td><td>1.276</td><td>50.0%</td><td>77.6%</td><td>0.425</td><td>11.3%</td><td>31.3%</td><td>0.900</td><td>28.0%</td><td>62.0%</td><td>1.155</td><td>43.7%</td><td>71.8%</td></tr><tr><td>Alpagasus</td><td>9k</td><td>30B</td><td>1.382</td><td>57.1%</td><td>81.2%</td><td>0.438</td><td>8.8%</td><td>35.0%</td><td>0.920</td><td>30.0%</td><td>62.0%</td><td>1.214</td><td>46.8%</td><td>74.6%</td></tr><tr><td>AlpaCaR</td><td>1k</td><td>30B</td><td>1.553</td><td>67.1%</td><td>88.2%</td><td>0.950</td><td>28.8%</td><td>66.3%</td><td>1.120</td><td>43.3%</td><td>68.7%</td><td>1.377</td><td>57.1%</td><td>80.6%</td></tr></table>
97
+
98
+ Table 2: Comparative analysis of AlpaCaR and existing methods in the primary experiment. Winning rates are determined relative to the reference responses of the test sets, providing a quantitative measure of performance.
99
+
100
+ against each other or human reference responses, identifying their preferred responses. PandaLM, GPT-4 and human are used as judge, yielding consistent evaluation conclusions.
101
+
102
+ # 3.1 Test Datasets
103
+
104
+ To avoid confusion arising from the similarity in naming between models and datasets, we use the format "ModelName_DatasetSize" to represent datasets. Following previous methodologies, we assess four datasets: Self-instruct_252 (Li et al., 2023b), Vicuna_80 (Chiang et al., 2023), PandaLM_170 (Wang et al., 2023b), and CoachLM_150 (Liu et al., 2023b). This approach covers a broader range of instructions, minimizing evaluation bias.
105
+
106
+ # 3.2 Generations
107
+
108
+ For each test instruction, a single response is generated from each baseline model using LLaMAFactory's default settings (Zheng et al., 2024b): temperature=0.95, top_p=0.7, top_k=50, no beam search, and a maximum token length to 512.
109
+
110
+ # 3.3 Evaluate Metrics
111
+
112
+ For each sample, the judge model receives a single instruction and two candidate responses. It labels the winning response or a tie if both stand out significantly. To address potential bias of LLM judges preferring specific positions, we tested the results twice by swapping the response order and define the final judgment based on:
113
+
114
+ win: win twice, or win once and tie once
115
+ - lose : lose twice, or lose once and tie once
116
+ - tie : tie twice, or win once and lose once
117
+
118
+ We compute three types of winning rates: (1) WS, a winning score formulated as $\mathrm{WS} = 1 + \frac{\#win - \#lose}{\#all}$ . (2) WR, which considers wins cases and is given by $\mathrm{WR} = \frac{\#win}{\#all}$ , where $\# all$ is the number of test set samples; (3) QS, a quality score that measures the ratio of responses reaching the reference level, formulated as $\mathrm{QS} = \frac{\#win + \#tie}{\#all}$ .
119
+
120
+ Evaluation Approach: (1) GPT-4 Turbo, currently the most powerful LLM widely used to replace manual responses quality assessments, with prompts designed by Chiang et al. (2023). However, this method faces limitations due to API dependency and inherent biases. (2) PandaLM, an open-source evaluation model that can be deployed locally, providing efficient LLM assessments (Wang et al., 2023b). Trained on 300k samples using GPT-3.5, it effectively mitigates biases and achieves $88.3\%$ of GPT-4's evaluation capability. (3) Human, three experts with an average of 12.57 years of experience independently conducted comparisons based on the criteria in Appendix E After comprehensive consideration, we use the evaluation results of PandaLM to measure the model's instruction-following ability in most experiments, while some key principal experiments utilize GPT4 and human for assessment. The prompt for GPT4's evaluation is designed by Chiang et al. (2023), as detailed in the Appendix B.1.
121
+
122
+ # 4 Results and Analysis
123
+
124
+ In this section, we compared AlpaCaR with baseline models, including Alpaca, Alpaca-PandaLM, Alpaca-cleaned, Alpagasus, and Vicuna. We replicated all baseline models at a 7B scale and demonstrated the superiority of AlpaCaR at 13B and 30B scales.
125
+
126
+ ![](images/fea7b80242244649acc61382fb684c5358ce4d61b7f624a78ce0dfb9e02e05c4.jpg)
127
+ Figure 3: Consistency between IQS scores and the performance of LLMs.
128
+
129
+ ![](images/d883d83eb3cdbcb99bd02d373656420f57cad8f6eaefa77d92ad58e3f810d6f9.jpg)
130
+
131
+ # 4.1 Comparison with Baselines
132
+
133
+ We conduct a comparative analysis of two established baseline LLMs, Alpaca and Vicuna, which were fine-tuned using 52,000 text instructions through text-davinci-003 and 70,000 ChatGPT dialogues, respectively. Furthermore, we explore three models that advance upon Alpaca: Alpaca-PandaLM and Alpaca-cleaned, which employ instructional enhancement methods, and Alpagasus, which incorporates an instruction filtering method. All models were trained with identical hyperparameter settings. As delineated in Table 2, AlpacaR, at the 7B scale, outperforms not only the foundational models of Alpaca and Vicuna but also Alpaca-PandaLM, Alpaca-cleaned, and Alpagasus. Overall, AlpacaR achieves significant performance improvements over Alpaca across the 7B, 13B, and 30B scales, validating the efficacy of the CaR method. The notable performance gains of AlpacaR, accomplished with reduced data usage compared to Alpagasus, underscore the importance of leveraging high-quality human preferences and data diversity in enhancing model performance.
134
+
135
+ # 4.2 Reliability of IQE Results
136
+
137
+ To verify whether the IQE results genuinely reflect the performance of LLMs after IT, we examined the correlation between scores given by the IQS model and the performance of fine-tuned LLMs on test sets. Given that Alpagasus obtained 9k instructions rated above 4.5 using GPT-3.5-Turbo, we similarly selected the top 9k instructions ranked by IQS model and Comet model. We then calculated the average score for the three IT sub-datasets using the IQS model, fine-tuned LLaMA-7B, and tested its performance by averaging models' winning scores on four datasets against reference. As illustrated in Fig. 3, the average IQS score and the fine-tuned model's performance are generally consistent, indicating that IQE results can approximately reflect
138
+
139
+ ![](images/d6e0377bd6cc98e11a46752e76970cd5d807e9cead0ec63bf7e4d907567f4a31.jpg)
140
+ Figure 4: Model performances with varying $n_1$ .
141
+
142
+ ![](images/a2f9ba12ee8f4af98e88c15b663e8300357b6aa2e38819d2198210aba9c334a9.jpg)
143
+ Figure 5: Performances with varying $n_2$ .
144
+
145
+ the performance of LLMs after fine-tuning.
146
+
147
+ # 4.3 Ablation Study
148
+
149
+ Quality Dimension. To illustrate the significance of data quality, we employed the IQS model's score to rank 52,000 instructions. Subsequently, we extracted subsets of the top 1,000, 2,000 and up to 42,000 instructions to train LLaMA-7B. In Fig. 4, the horizontal axis represents the size of instruction dataset, where a higher count signifies more instructions of relatively lower quality, while the vertical axis shows the winning score relative to Alpaca. The results indicate that models trained with selected data generally surpass the one trained with the entire dataset. As more instructions of relatively lower quality are included, the performance of the LLM generally declines. Remarkably, the model approaches its optimal performance with a mere 1,000 high-quality IT data. Therefore, in the CaR method, we select $n_1 = 1000$ instructions to ensure the chosen IT sub-dataset is of high quality.
150
+
151
+ Selection of $n_2$ : Trade-off between Quantity and Quality. We compared the number of samples selected from each cluster after $k$ -means clustering.
152
+
153
+ <table><tr><td rowspan="2">Method</td><td colspan="3">Vicuna</td><td colspan="3">Self-instruct</td></tr><tr><td>WS†</td><td>WR†</td><td>QS†</td><td>WS†</td><td>WR†</td><td>QS†</td></tr><tr><td>40 × 4</td><td>0.625</td><td>20.0%</td><td>31.3%</td><td>1.226</td><td>48.4%</td><td>61.3%</td></tr><tr><td>80 × 2</td><td>0.600</td><td>18.8%</td><td>30.0%</td><td>1.290</td><td>52.4%</td><td>64.5%</td></tr><tr><td>160 × 1</td><td>0.688</td><td>23.8%</td><td>34.4%</td><td>1.365</td><td>59.5%</td><td>68.3%</td></tr></table>
154
+
155
+ Table 3: Ablation on Diversity: Models with more diverse instruction sets perform better. $(160\times 1$ means 1 highest IQS-scored sample per 160 clusters)
156
+
157
+ ![](images/9ccd2cabc473c847e4daaeab872a7918ffd6c62cf6f0483fddaf48a29938f2d9.jpg)
158
+ Figure 6: Compare AlpaCaR with baselines, including Alpaca and randomly selected 1k instructions.
159
+
160
+ Fig. 5 demonstrates that, compared to using only $1\mathrm{k}$ high-quality data selected by IQS model, the CaR method enhances performance when a small number of samples (up to 5) are selected from each cluster. Selecting too many samples can negatively impact the overall quality of the IT sub-dataset and the performance of the LLMs. Moreover, the CaR method achieves nearly optimal performance by selecting $n_2 = 1$ sample from each cluster, thus enhancing the diversity of the IT sub-dataset.
161
+
162
+ Importance of Diversity. An ideal IT dataset should encompass a rich variety of data, but determining the optimal number of instructions per cluster required for the model to effectively correspond to the task remains a challenge. We designed experiments to demonstrate the importance of diversity and explore values of $n_2$ , the trade-off between the number and quality of samples per cluster.
163
+
164
+ Designing strict ablation experiments in this context is challenging due to the difficulty in ensuring consistent instruction set quality while maintaining the same number of instructions. To explore this, we established three experimental groups with increasing diversity (baseline: reference response). In Table 3, the winning rates on the Self-Instruct and Vicuna test sets show that models with more diverse instruction sets perform better.
165
+
166
+ ![](images/e492839a17903e606fe3be13d1c43e8d4a71f651114a849910a81cfbe4a0b1b0.jpg)
167
+ Figure 7: GPT-4 result on Vicuna_80 dataset: AlpaCaR vs. Alpaca.
168
+
169
+ # 4.4 Compare with Random & GPT-4 Result
170
+
171
+ Fig. 6 presents the results of ablation experiments, revealing that randomly selecting 1,017 instruction pairs from $52\mathrm{k}$ dataset leads to a decrease in model performance compared to Alpaca. In contrast, the instruction pairs selected by the CaR method show significant improvements at 7B $(29.8\%)$ , 13B $(32.7\%)$ , and 30B $(33.1\%)$ scales.
172
+
173
+ Furthermore, to address cost considerations, we employed GPT-4's evaluation framework exclusively on four datasets to compare AlpaCaR against Alpaca. As depicted in Fig. 7 and elaborated upon in Appendix D, GPT-4 exhibited similar evaluative outcomes: AlpaCaR outperformed baseline in the majority of instances, thereby substantiating the efficacy of the CaR method. Employing CaR, which involves selecting $1.96\%$ of the dataset, has proven to yield superior preferences across a variety of parameter scales.
174
+
175
+ # 4.5 Human Evaluation
176
+
177
+ We have formulated detailed evaluation criteria, covering seven aspects: fluency, relevance, correctness, consistency, satisfaction, informativeness and security, which are further categorized into 27 primary and 58 secondary classifications. Additional details are provided in Appendix E.
178
+
179
+ We compared AlpaCaR 30B vs. Alpaca 30B on Vicuna_80 test set. The human evaluation results demonstrated that AlpaCaR performed at least as well as Alpaca across all categories and was preferred by language experts in the vast majority of cases. The specific results are shown in Table 4.
180
+
181
+ Table 7 in Appendix F displays case study from the math category. We found that under strict evaluation criteria, experts believed that neither model provided the correct final answer, resulting in a tie. However, a more detailed analysis reveals that AlpaCaR utilized CoT to explore the correct reasoning steps, although errors occurred after certain steps. In contrast, Alpaca simply provided a con
182
+
183
+ <table><tr><td>Category</td><td>win</td><td>lose</td><td>tie</td><td>WS†</td></tr><tr><td>Writing</td><td>8</td><td>1</td><td>1</td><td>1.700</td></tr><tr><td>Roleplay</td><td>5</td><td>0</td><td>5</td><td>1.500</td></tr><tr><td>Common-sense</td><td>9</td><td>0</td><td>1</td><td>1.900</td></tr><tr><td>Fermi</td><td>7</td><td>2</td><td>1</td><td>1.500</td></tr><tr><td>Counterfactual</td><td>7</td><td>0</td><td>3</td><td>1.700</td></tr><tr><td>Coding</td><td>3</td><td>3</td><td>1</td><td>1.000</td></tr><tr><td>Math</td><td>0</td><td>0</td><td>3</td><td>1.000</td></tr><tr><td>Generic</td><td>6</td><td>0</td><td>4</td><td>1.600</td></tr><tr><td>Knowledge</td><td>7</td><td>2</td><td>1</td><td>1.500</td></tr><tr><td>Total</td><td>52</td><td>8</td><td>20</td><td>1.550</td></tr></table>
184
+
185
+ Table 4: Human evaluation results on Vicuna_80 dataset: AlpaCaR_30B vs. Alpaca_30B.
186
+
187
+ <table><tr><td rowspan="2">Method</td><td colspan="3">Vicuna</td><td colspan="3">Self-instruct</td></tr><tr><td>WS†</td><td>WR†</td><td>QS†</td><td>WS†</td><td>WR†</td><td>QS†</td></tr><tr><td>Alpaca</td><td>0.338</td><td>10.00%</td><td>16.88%</td><td>1.206</td><td>45.63%</td><td>60.32%</td></tr><tr><td>mixed-181k</td><td>0.875</td><td>28.80%</td><td>43.75%</td><td>1.349</td><td>52.38%</td><td>67.46%</td></tr><tr><td>CaR_50k</td><td>1.113</td><td>33.75%</td><td>55.62%</td><td>1.500</td><td>63.89%</td><td>75.00%</td></tr></table>
188
+
189
+ fusingly incorrect answer. We hypothesize that the IQS model has learned experts' preferences for detailed reasoning processes presented in the training data. Consequently, during subset selection, the IQS model favors instruction pairs that showcase meticulous reasoning, resulting in the fine-tuned AlpaCaR exhibiting more comprehensive thought processes in the form of CoT reasoning.
190
+
191
+ # 4.6 Larger Instruction Tuning Datasets
192
+
193
+ To further explore the performance of CaR in more massive and complex datasets, we conducted additional experiments on even larger instruction datasets. Following recent work (Du et al., 2023; Liu et al., 2023a), we combined five instruction tuning datasets, including Alpaca, Dolly_v2 (Conover et al., 2023), Alpaca-evol-instruct (Xu et al., 2023), HC3 (Guo et al., 2023), and LIMA (Zhou et al., 2023), to obtain a large-mixed-dataset containing 181,253 instructions. Then we used CaR to filter the large-mixed dataset and obtained CaR_50k containing 50k instructions.
194
+
195
+ Table 5 shows that the model fine-tuned on ${50}\mathrm{k}$ instructions selected by CaR outperforms Alpaca at the same number of instructions using LLaMA 2 7B as the base pre-trained model. In addition, the model fine-tuned using CaR_50k outperforms the one using mixed-181k instruction tuning dataset.
196
+
197
+ This illustrates that the bottleneck of Alpaca is not that pre-trained LLaMA cannot learn more knowledge from more instructions, but rather that
198
+
199
+ Table 5: CaR is a stable and effective framework even on larger datasets
200
+
201
+ <table><tr><td>Method</td><td>Selection</td><td>Training</td><td>Total</td></tr><tr><td>Alpaca</td><td>0$</td><td>733.35$</td><td>733.35$</td></tr><tr><td>Alpagasus</td><td>12.66$</td><td>104.18$</td><td>116.84$</td></tr><tr><td>AlpaCaR</td><td>0.02$</td><td>13.07$</td><td>13.09$</td></tr></table>
202
+
203
+ Table 6: Cost comparison of 30B scale.
204
+
205
+ the limited quality of instruction dataset restricts the model's performance. It also demonstrates that CaR is a stable and effective framework even on larger datasets. CaR framework can filter 50k high-quality instructions from 181k instruction pairs to get stronger model performances with less training overheads.
206
+
207
+ # 4.7 Cost Comparison
208
+
209
+ Here, we compare the computational costs of AlpaCaR, Alpaca, and Alpagasus, focusing on instruction evaluation and full parameter fine-tuning at the 30B scale, as detailed in Table 6. For instruction evaluation using an API-based method, we refer to the official pricing $^{3}$ , while for model training or inference, we consider the rental costs of GPUs $^{4}$ . In summary, training AlpaCaR significantly saves both time and costs, compared to Alpaca or Alpagasus.
210
+
211
+ # 5 Is the Benefit Derived from Data Selecting Universally Applicable?
212
+
213
+ Filtering a high quality instruction sub-dataset to supervised fine-tuning LLaMA 1 significantly reduces computational cost and effectively improves LLM performances. More crucially, it is essential to ascertain whether data screening constitutes a consistent paradigm for performance enhancement, particularly as pre-trained model become increasingly powerful and model parameters scaling up. In this section, we used the average WS on Vicuna_80 and Self-instruct_252 test set to explore the generalization of data selection.
214
+
215
+ A consistent paradigm when pre-training is more adequate? Base pre-trained LLMs acquire knowledge through pre-training. LLaMA 1, LLaMA 2, and LLaMA 3 were pre-trained using 1T, 2.4T, and 15T tokens, respectively. When pre-trained models exhibit strong capabilities, can they discern the quality of fine-tuning instructions, rendering instruction selecting redundant? To investigate this, we employed LLaMA 1 7B, LLaMA
216
+
217
+ ![](images/f1d05435aa7c3f9bfd2023a7892f5bb40b6c30e476304249ea168417258176e6.jpg)
218
+ Figure 8: Impact of data selection as pre-trained model become more powerful.
219
+
220
+ ![](images/5371668bc50b478ef02bd41ad19b05828f0fe60e53ec03ca57926f795b04d120.jpg)
221
+ Figure 9: Impact of data selection as models parameters or instruction quality increase.
222
+
223
+ 2 7B, and LLaMA 3 8B pre-trained models, comparing fine-tuning using the full dataset or subsets filtered by GPT-3.5 Turbo or CaR. Fig. 8 shows the results on Alpaca_52k and Dolly_15k IT datasets. The findings suggest that even as base pre-trained LLMs become more powerful, models fine-tuned on filtered data surpass those trained on full instructions. LLaMA 3 8B is more susceptible to low-quality instructions, impeding its ability to follow instructions in downstream tasks.
224
+
225
+ A consistent paradigm when model size scaling up? Many new capabilities and phenomena emerge as the model parameters scaling up. Thus another question is whether instruction tuning data selection is still important as the parameters increase. We experimented the performance of the model fine-tuned by full versus selected instructions at the 7B-30B scale, due to limited computational conditions. As shown on the left side of Fig. 9 (left), The horizontal direction showed no significant improvement in model performance even as the model size increased. However, the vertical direction showed that the model performs better using instructions selected by GPT-3.5 or CaR at all scales.
226
+
227
+ A consistent paradigm when instructions quality improves? Alpaca-GPT4 (Peng et al., 2023) contains instruction generated by GPT-4 using Alpaca prompts, which quality significantly improved
228
+
229
+ compared to Alpaca. Distinguishing high-quality instructions remains a challenge when instruction quality generally improves. As depicted in Fig. 9 (right), models trained by CaR-selected instructions are inferior to full instructions. We argue that the IQS model cannot significantly discriminate instruction quality in such a high-quality data distribution, so randomly filtering instructions caused performance degradation similar to Fig. 6. A similar phenomenon occurs when using LLMs to select instructions. Qwen1.5-110B-chat and Qwen-max scored more than 1,800 of the 2,000 instructions in the Alpaca-GPT4 dataset as perfect score, indicating that the quality of the evaluated instructions in this situation approaching the boundaries of the LLMs' capabilities. So data selecting methods at higher data quality are still challenging, and maybe gradient-based (Xia et al., 2024) or in-context learning-based (Li et al., 2023c) methods demonstrate greater potential.
230
+
231
+ # 6 Conclusion
232
+
233
+ In this paper, we focus on exploring and resolving the issue of instruction selection during supervised fine-tuning stage. We introduce the CaR method and examine two perspectives that are warrant considered: (1) Evaluating instruction quality using more authentic human preferences: models trained with data annotated by linguistic experts show higher agreement rates and the selected instructions lead to better-performing models. (2) Instruction diversity inspires LLMs' stronger capability: Under our selection framework, preserving a small number of instructions for different tasks through cluster improves model performance. Experimental results show that fine-tuning LLaMA (ranging from 7B to 30B parameters) with a $1.96\%$ subset of instructions selected by CaR outperforms models trained on full datasets or data selected by GPT. Moreover, data selecting methods using GPT-family or CaR is a consistent paradigm whether the pre-trained model is more capable or the model parameters scaling up, while those at higher data quality are still challenging. Additionally, our approach can be deployed locally without relying on APIs, thereby enabling a more efficient instruction selection approach in low-computation resource environments.
234
+
235
+ # 7 Limitation
236
+
237
+ Despite the outstanding performance of CaR across multiple test sets, its experiments were confined to filtering on only several datasets. The diverse formats of different open-source instruction sets pose challenges for the academic community interested in instruction filtering tasks. In the future, we plan to validate the effectiveness of CaR on more datasets such as WizardLM_evol_instruct_70k (Xu et al., 2023). Moreover, while CaR is primarily used for single-turn dialogue instruction filtering, exploring its application in multi-turn dialogue instruction filtering presents an attractive direction for future research.
238
+
239
+ # 8 Potential Risk & Ethical Consideration
240
+
241
+ We reveal the following potential risks of our research based on ethical considerations:
242
+
243
+ 1. Quality of instruction data: While the proposed method aims to select high-quality instruction data, there is still a risk that the selected subset may not fully represent the diversity and complexity of the entire dataset. This could potentially lead to biased or incomplete training of models and cause adverse social impact.
244
+ 2. Bias and fairness: As with any AI research, there is a need to ensure fairness and mitigate biases. The selection process and scoring model used in CaR should be carefully monitored to prevent any unintentional biases, such as favoring certain types of instructions or excluding underrepresented groups.
245
+ 3. Industrial deployment and responsible use: As the method is designed for industrial scenarios, it is important to consider the responsible use of the developed models. Ensuring that the models are not used for unethical purposes or harmful applications is crucial. Additionally, monitoring and addressing any unintended consequences or biases that may emerge during deployment should be a priority.
246
+
247
+ # 9 Acknowledgement
248
+
249
+ This work was supported in part by the National Science Foundation of China (No.62276056), the Natural Science Foundation of Liaoning Province
250
+
251
+ of China (2022-KF-16-01), the Fundamental Research Funds for the Central Universities (Nos. N2216016 and N2316002), the Yunnan Fundamental Research Projects (No. 202401BC070021), and the Program of Introducing Talents of Discipline to Universities, Plan 111 (No.B16009).
252
+
253
+ # References
254
+
255
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
256
+ Lichang Chen, Shiyang Li, Jun Yan, Hai Wang, Kalpa Gunaratna, Vikas Yadav, Zheng Tang, Vijay Srinivasan, Tianyi Zhou, Heng Huang, et al. 2023. Alpagasus: Training a better alpaca with fewer data. arXiv preprint arXiv:2307.08701.
257
+ Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. 2023. Vicuna: An opensource chatbot impressing gpt-4 with $90\%$ * chatgpt quality.
258
+ Wei-Lin Chiang, Lianmin Zheng, Ying Sheng, Anastasios Nikolas Angelopoulos, Tianle Li, Dacheng Li, Hao Zhang, Banghua Zhu, Michael Jordan, Joseph E Gonzalez, et al. 2024. Chatbot arena: An open platform for evaluating llms by human preference. arXiv preprint arXiv:2403.04132.
259
+ Xu Chu, Ihab F Ilyas, Sanjay Krishnan, and Jiannan Wang. 2016. Data cleaning: Overview and emerging challenges. In Proceedings of the 2016 international conference on management of data, pages 2201-2206.
260
+ Mike Conover, Matt Hayes, Ankit Mathur, Xiangrui Meng, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, et al. 2023. Free dolly: Introducing the world's first truly open instruction-tuned llm.
261
+ Guanting Dong, Hongyi Yuan, Keming Lu, Cheng-peng Li, Mingfeng Xue, Dayiheng Liu, Wei Wang, Zheng Yuan, Chang Zhou, and Jingren Zhou. 2023. How abilities in large language models are affected by supervised fine-tuning data composition. arXiv preprint arXiv:2310.05492.
262
+ Qianlong Du, Chengqing Zong, and Jiajun Zhang. 2023. Mods: Model-oriented data selection for instruction tuning. arXiv preprint arXiv:2311.15653.
263
+ Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B Hashimoto. 2023. Alpacafarm: A simulation framework for methods
264
+
265
+ that learn from human feedback. arXiv preprint arXiv:2305.14387.
266
+ Biyang Guo, Xin Zhang, Ziyuan Wang, Minqi Jiang, Jinran Nie, Yuxuan Ding, Jianwei Yue, and Yupeng Wu. 2023. How close is chatgpt to human experts? comparison corpus, evaluation, and detection. arXiv preprint arXiv:2301.07597.
267
+ Mustafa Hajij, Ghada Zamzmi, Karthikeyan Natesan Ramamurthy, and Aldo Guzman Saenz. 2021. Data-centric ai requires rethinking data notion. arXiv preprint arXiv:2110.02491.
268
+ Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Lu, Thomas Mesnard, Colton Bishop, Victor Carbune, and Abhinav Rastogi. 2023. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv preprint arXiv:2309.00267.
269
+ Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, Hai Zhao, and Pengfei Liu. 2023a. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470.
270
+ Xian Li, Ping Yu, Chunting Zhou, Timo Schick, Luke Zettlemoyer, Omer Levy, Jason Weston, and Mike Lewis. 2023b. Self-alignment with instruction back-translation. arXiv preprint arXiv:2308.06259.
271
+ Yunshui Li, Binyuan Hui, Xiaobo Xia, Jiaxi Yang, Min Yang, Lei Zhang, Shuzheng Si, Junhao Liu, Tongliang Liu, Fei Huang, et al. 2023c. One shot learning as instruction data prospector for large language models. arXiv preprint arXiv:2312.10302.
272
+ Wei Liu, Weihao Zeng, Keqing He, Yong Jiang, and Junxian He. 2023a. What makes good data for alignment? a comprehensive study of automatic data selection in instruction tuning. arXiv preprint arXiv:2312.15685.
273
+ Xiaoyong Liu and W Bruce Croft. 2004. Cluster-based retrieval using language models. In Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval, pages 186-193.
274
+ Yilun Liu, Shimin Tao, Xiaofeng Zhao, Ming Zhu, Wenbing Ma, Junhao Zhu, Chang Su, Yutai Hou, Miao Zhang, Min Zhang, et al. 2023b. Automatic instruction optimization for open-source lmm instruction tuning. arXiv preprint arXiv:2311.13246.
275
+ Mohammad Motamedi, Nikolay Sakharnykh, and Tim Kaldewey. 2021. A data-centric approach for training deep neural networks with less data. arXiv preprint arXiv:2110.03613.
276
+ Yongyu Mu, Abudurexiti Reheman, Zhiquan Cao, Yuchun Fan, Bei Li, Yinqiao Li, Tong Xiao, Chunliang Zhang, and Jingbo Zhu. 2023. Augmenting large language model translators via translation memories. In Findings of the Association for Computational Linguistics: ACL 2023, pages 10287-10299, Toronto, Canada. Association for Computational Linguistics.
277
+
278
+ Lawrence Page, Sergey Brin, Rajeev Motwani, Terry Winograd, et al. 1999. The pagerank citation ranking: Bringing order to the web.
279
+ Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, and Jianfeng Gao. 2023. Instruction tuning with gpt-4. arXiv preprint arXiv:2304.03277.
280
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
281
+ Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D Manning, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. arXiv preprint arXiv:2305.18290.
282
+ Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. COMET: A neural framework for MT evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702, Online. Association for Computational Linguistics.
283
+ Yizhou Sun, Jiawei Han, Peixiang Zhao, Zhijun Yin, Hong Cheng, and Tianyi Wu. 2009. Rankclus: integrating clustering with ranking for heterogeneous information network analysis. In Proceedings of the 12th international conference on extending database technology: advances in database technology, pages 565-576.
284
+ Hongyin Tang, Xingwu Sun, Beihong Jin, Jingang Wang, Fuzheng Zhang, and Wei Wu. 2021. Improving document representations by generating pseudo query embeddings for dense retrieval. arXiv preprint arXiv:2105.03599.
285
+ Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca.
286
+ Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
287
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
288
+ Peiyi Wang, Lei Li, Liang Chen, Dawei Zhu, Binghuai Lin, Yunbo Cao, Qi Liu, Tianyu Liu, and Zhifang Sui. 2023a. Large language models are not fair evaluators. arXiv preprint arXiv:2305.17926.
289
+
290
+ Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, et al. 2023b. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. arXiv preprint arXiv:2306.05087.
291
+ Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2022. Self-instruct: Aligning language model with self generated instructions. arXiv preprint arXiv:2212.10560.
292
+ Mengzhou Xia, Sadhika Malladi, Suchin Gururangan, Sanjeev Arora, and Danqi Chen. 2024. Less: Selecting influential data for targeted instruction tuning. arXiv preprint arXiv:2402.04333.
293
+ Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. 2023. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244.
294
+ Zhiqiang Yuan, Junwei Liu, Qiancheng Zi, Mingwei Liu, Xin Peng, and Yiling Lou. 2023. Evaluating instruction-tuned large language models on code comprehension and generation. arXiv preprint arXiv:2308.01240.
295
+ Daochen Zha, Zaid Pervaiz Bhat, Kwei-Herng Lai, Fan Yang, Zhimeng Jiang, Shaochen Zhong, and Xia Hu. 2023. Data-centric artificial intelligence: A survey. arXiv preprint arXiv:2303.10158.
296
+ Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. 2023. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792.
297
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. 2023. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685.
298
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. 2024a. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36.
299
+ Yaowei Zheng, Richong Zhang, Junhao Zhang, YeYanhan YeYanhan, and Zheyan Luo. 2024b. LlamaFactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pages 400-410, Bangkok, Thailand. Association for Computational Linguistics.
300
+ Chunting Zhou, Pengfei Liu, Puxin Xu, Srini Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. 2023. Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206.
301
+
302
+ # A Related work
303
+
304
+ Quality Estimation and Comet framework. Quality estimation is a pivotal task in machine translation, involving scoring or ranking translation results to select higher-quality data. Comet (Rei et al., 2020) leverages input and reference translations to accurately assess translation quality, employing two architectures: the Estimator model and the Translation Ranking model. The Estimator model directly predicts quality scores for each evaluation instance, while the Translation Ranking model learns parameters from paired evaluation data to predict reasonable quality scores.
305
+
306
+ Algorithm - Data Lifecycle. In the modern era of deep learning, high-quality data has become the cornerstone for training robust and effective models. Over the past decade, there has been a growing emphasis on the collection and curation of superior data (Chu et al., 2016; Motamedi et al., 2021). The emergence of data-centric AI has underscored the belief that data quality is as crucial as algorithmic advancements within the AI/ML lifecycle (Hajij et al., 2021; Zha et al., 2023). This paradigm shift has been particularly evident since the introduction of the Transformer architecture (Vaswani et al., 2017), which has revolutionized the field of language modeling. Rather than focusing on disruptive innovations in model structure, researchers have concentrated on leveraging the effectiveness of the Transformer architecture by stacking transformer blocks to create more potent models. Additionally, significant improvements in model performance have been achieved through the construction of task-specific datasets and the enhancement of data quality (Zhou et al., 2023; Chen et al., 2023; Li et al., 2023c).
307
+
308
+ Further perspective of clustering and ranking. Many domains have employed methods similar to clustering and ranking. In information retrieval, Google extensively utilizes the PageRank algorithm (Page et al., 1999) to calculate the importance of hyperlinks between webpages. Liu et al. developed a cluster-based retrieval model by constructing language models for clusters (Liu and Croft, 2004), combining documents within the same cluster and searching/ranking clusters based on query generation likelihood. Tang et al. enhanced the Bi-encoder's performance in dense information retrieval tasks by using clustering algorithms to generate "pseudo-query embeddings" (Tang et al., 2021).
309
+
310
+ Selecting suitable data for LLM inference is crucial in the RAG field, as discussed by Yuan et al. (2023) and Mu et al. (2023), who explore methods for finding appropriate demonstrations to improve LLM performance. In the network domain, Sun et al. introduced the RankClus framework (Sun et al., 2009), which integrates clustering and ranking methods to strengthen heterogeneous information network analysis.
311
+
312
+ Evaluation of LLMs. Evaluating the open-domain instruction-following capabilities of LLMs presents a significant challenge. Currently, the prevailing approach involves employing human evaluators or GPT-4 to compare the inference response of different models. Consequently, recent studies, including PandaLM (Wang et al., 2023b), Vicuna (Chiang et al., 2023), CoachLM (Liu et al., 2023b), and Self-Instruct (Wang et al., 2022), have curated and provided their own instruction sets to evaluate instruction-finetuned LLMs. Additionally, leaderboards such as MT-Bench (Zheng et al., 2024a), Alpaca-Eval (Dubois et al., 2023), and Chatbot Arena (Chiang et al., 2024) have been established to measure the instruction-following abilities of these models. PandaLM (Wang et al., 2023b) and Auto-J (Li et al., 2023a) efforts focus on training LLMs to provide more impartial and accurate evaluations. By leveraging these latest advancements, we aim to evaluate our model's performance using human-generated instruction sets, ensuring a comprehensive and rigorous assessment of its capabilities in following open-ended instructions.
313
+
314
+ # B Evaluate Prompts
315
+
316
+ # B.1 IQE Prompt
317
+
318
+ [The Start of Assistant A's Instruction and Answer]
319
+ {Instruction pair 1}
320
+
321
+ [The End of Assistant A's Instruction and Answer]
322
+ [The Start of Assistant B's Instruction and Answer]
323
+ {Instruction pair 2}
324
+
325
+ [The End of Assistant B's Instruction and Answer] [System]
326
+
327
+ We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. Please rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance. Please first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.
328
+
329
+ # B.2 Response Comparison Prompt
330
+
331
+ [Question]
332
+
333
+ {Instruction}
334
+
335
+ [The Start of Assistant 1's Answer]
336
+
337
+ {Response 1}
338
+
339
+ [The End of Assistant 1's Answer]
340
+
341
+ [The Start of Assistant 2's Answer]
342
+
343
+ {Response 2}
344
+
345
+ [The End of Assistant 2's Answer]
346
+
347
+ [System]
348
+
349
+ Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any positional biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: "[A]" if assistant A is better, "[B]" if assistant B is better, and "[C]" for a tie.
350
+
351
+ # C Specifics about Instruction Quality Estimation
352
+
353
+ # C.1 Evaluation Metric of IQE
354
+
355
+ The second row of Table 1 presents results for instruction pairs sourced from the IQE test set, which are instructions revised by language expert. The third row shows accuracy on instruction pairs from Vicuna_80, demonstrating the models' generalization to other distributions. The instructions are provided by the dataset, while language experts evaluate the quality of two responses generated by different models, establishing the ground truth labels. In the calculation of accuracy, if the absolute difference between the scores of two responses is less than 0.01 assigned by IQS or Comet_Instruct, the outcome is considered a "Tie".
356
+
357
+ # C.2 Model Architecture of IQS and Comet<sup>instruct</sup>
358
+
359
+ In the IQE task, the IQS model and Comet model correspond to the Estimator model architecture and Translation Ranking model architecture in the Comet framework, respectively. As shown in Fig. 10, The Cometinstruction model concatenates instructions with input to form anchors. It then feeds pairs of better and worse responses into the model. Finally, the model is trained using a triplet margin loss function to distinguish between the superior
360
+
361
+ ![](images/12752d3283c8f1cf239eb6375fed8fec3d0513e7abf8e51bf94f6fbb9b0f3174.jpg)
362
+ Figure 10: Detailed architecture of Comet<sup>struct</sup> model(left) and Instruction pair quality scoring model(right).
363
+
364
+ ![](images/9575e6b90243f0907b212a1bd9d65c5f8f094b29c656ff8b59e7ff85e13bd4b1.jpg)
365
+
366
+ ![](images/e68efebe6bafdcca64edbe30f7feb74d16c2cdbe29aa2a7a308d5e2c19d61d06.jpg)
367
+ Figure 11: GPT-4 result on CoachLM_150 dataset: AlpaCaR vs. Alpaca.
368
+
369
+ ![](images/a6f6715635776c82092f89d7a7a332f1f8988d7a1d9bd99ed0653c4b252d949f.jpg)
370
+ Figure 12: GPT-4 result on Self-instruct_252 dataset: AlpaCaR vs. Alpaca.
371
+
372
+ ![](images/b390f9261673e71f871c8b720be4a38c342303d2d3642bdf59d1154fc62c8748.jpg)
373
+ Figure 13: GPT-4 result on Pandalm_170 dataset: AlpaCaR vs. Alpaca.
374
+
375
+ and inferior responses. The IQS model concatenates instruction pairs and then trains the model using Mean Squared Error as the loss function.
376
+
377
+ # D More Results about GPT-4 Evaluations
378
+
379
+ As illustrated in Fig. 11, 12, 13. Baseline: reference responses.
380
+
381
+ # E Specifics about Human Evaluation Criteria
382
+
383
+ # Fluency
384
+
385
+ - Redundancy: verbose repetition $(>=2)$
386
+ Redundancy: extraneous content clutter
387
+ - Missing: incomplete response
388
+ - Error: syntax & semantics issues
389
+ - Errors: word & symbol errors
390
+
391
+ # - Relevance
392
+
393
+ - Irrelevant
394
+ - Incorrect understanding of object / personality / relationship
395
+ Poor compliance
396
+
397
+ # - Correctness
398
+
399
+ - Incorrect answer to the question
400
+ - Incorrect derivation process
401
+
402
+ # - Consistency
403
+
404
+ - Intrasentence consistency
405
+ - Contextual consistency
406
+ - Content Logical Consistency
407
+ - Query response consistency
408
+
409
+ # Satisfaction
410
+
411
+ - Unclear views, no precise conclusions given (advice / recommendations)
412
+ - Insufficiently organized and comprehensive content
413
+ - Poor extensibility, almost a carbon copy of the query information, lack of embellishments
414
+ - Lack of emotional feedback
415
+ - Not careful enough to reply
416
+ - Biases in characterization and speaker tone
417
+ - Overly subjective and unsubstantiated
418
+ - Some of the recommendations are ineffective, irrelevant to the issue or lack specific advice and assistance
419
+ - Indeed a problem, but a subjective experience problem that is hard to describe
420
+
421
+ # - Informativeness
422
+
423
+ - Missing interpretive information
424
+ - Lack of richness
425
+
426
+ # Security
427
+
428
+ Minor or moderate security violations
429
+ - Serious security violations
430
+
431
+ <table><tr><td>Instruction</td><td>Input</td><td>AlpaCaR_30B</td><td>Alpaca_30B</td><td>Human evaluation</td></tr><tr><td>Given that f(x) = 5x^3 - 2x + 3, find the value of f(2).</td><td>\</td><td>The value of f(2) = 5(2)^3 - 2(2) + 3 = 5(8) - 4 + 3 = 44.</td><td>The value of f(2) is 3.</td><td>Tie</td></tr><tr><td>Solve for x in the equation 3x + 10 = 5(x - 2).</td><td>\</td><td>The equation 3x + 10 = 5(x - 2) can be rearranged to solve for x. First, subtract 5(x - 2) from both sides to get 3x - 5x + 10 = 5(x - 2) - 5x + 10. Then, x = 5.</td><td>x = 7</td><td>Tie</td></tr><tr><td>If the endpoints of a line segment are (2, -2) and (10, 4), what is the length of the segment?</td><td>\</td><td>The length of the line segment can be found using the formula for the distance between two points: d = √(x2 - x1)2 + (y2 - y1)2 In this case, x2 = 10, x1 = 2, y2 = 4, and y1 = -2. Plugging these numbers into the formula, we get d = √(10 - 2)2 + (4 - (-2))2 Similarly, we can calculate the length of the line segment using the Pythagorean-Pythagorean-Circles Theorem: d = √(x2 + y2) In this case, x2 = 10, y2 = 4, so d = √(10 + 4) = 5. Therefore, the length of the line segment is 5.</td><td>The length of the line segment is 8.</td><td>Tie</td></tr></table>
432
+
433
+ # F Case study
434
+
435
+ As illustrated in Table 7.
436
+
437
+ # G Profile of Involved Language Experts
438
+
439
+ To ensure a comprehensive and rigorous human evaluation of LLM abilities, we established a collaboration with the language service center of a prominent international corporation. We recruited a team of highly educated, multilingual language experts with diverse skills in translation, localization, writing, and testing, who dedicated their full-time efforts to this task. Specifically, three experts possessing an average experience of 12.57 years, are responsible for conducting a human evaluation of AlpaCaR and other LLMs.
440
+
441
+ # H Discussion of CaR framework
442
+
443
+ Selecting top-n ranked samples for each cluster is indeed an intuitive and interesting idea that integrates the two steps of clustering and ranking. We have also experimented with this setting in our early research. However, a challenge arises when the predefined number of clusters $k = \sqrt{\text{Number}_{\text{instructions}/2}} = 161$ is used. When top-n is small, the resulting dataset size is insufficient for the model to achieve good instruction-following capacity. Conversely, when top-n is large, it introduces more low-quality instruction pairs, which negatively impacts the performance of LLMs. An
444
+
445
+ Table 7: Case study of LLMs responses in vicuna_80 math category.
446
+
447
+ <table><tr><td rowspan="2">Top-n</td><td colspan="3">Vicuna</td><td colspan="3">Self-instruct</td></tr><tr><td>WS†</td><td>WR†</td><td>QS†</td><td>WS†</td><td>WR†</td><td>QS†</td></tr><tr><td>10</td><td>1.188</td><td>55.00%</td><td>90.00%</td><td>1.230</td><td>45.63%</td><td>77.38%</td></tr><tr><td>20</td><td>1.375</td><td>51.25%</td><td>83.75%</td><td>1.167</td><td>42.86%</td><td>73.81%</td></tr><tr><td>30</td><td>1.300</td><td>57.50%</td><td>85.00%</td><td>1.111</td><td>38.49%</td><td>72.62%</td></tr><tr><td>CaR(ours)</td><td>1.475</td><td>58.75%</td><td>88.75%</td><td>1.310</td><td>51.98%</td><td>78.97%</td></tr></table>
448
+
449
+ Table 8: Discussion of CaR framework: $\mathrm{k} \times$ top-n v.s. ${n}_{1} + \mathrm{k} \times {n}_{2}$
450
+
451
+ early version of our experimental results (baseline: Alpaca 52k) is shown in Table 8.
452
+
453
+ The experimental results indicate that this combinatorial approach performs less effectively than treating the two components separately. Our idea is to additionally and separately extract the top $n_1$ instructions using only the ranking step to ensure that most high-quality instructions are included (as indicated in section 2.2) while using a smaller top $n_2$ to prevent the inclusion of a large number of low-quality instruction pairs. Experimenting with different values of $k$ might alleviate this problem, but we aim to propose a more automated process and avoid involving additional hyperparameter tuning.
2402.18xxx/2402.18191/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14ab3fbeb8e5c8284df9dc7196c227449e44036c9664b0d500a79c1bc31d4dba
3
+ size 657209
2402.18xxx/2402.18191/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_content_list.json ADDED
@@ -0,0 +1,1490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "image",
4
+ "img_path": "images/4451117416aa492c1466349e95c984fa34250e98b6cb144a556d4ed2e0078e2e.jpg",
5
+ "image_caption": [],
6
+ "image_footnote": [],
7
+ "bbox": [
8
+ 173,
9
+ 102,
10
+ 212,
11
+ 130
12
+ ],
13
+ "page_idx": 0
14
+ },
15
+ {
16
+ "type": "text",
17
+ "text": "LEMUR: LOG PARSING WITH ENTROPY SAMPLING",
18
+ "text_level": 1,
19
+ "bbox": [
20
+ 215,
21
+ 103,
22
+ 823,
23
+ 125
24
+ ],
25
+ "page_idx": 0
26
+ },
27
+ {
28
+ "type": "text",
29
+ "text": "AND CHAIN-OF-THOUGHT MERGING",
30
+ "text_level": 1,
31
+ "bbox": [
32
+ 173,
33
+ 130,
34
+ 622,
35
+ 148
36
+ ],
37
+ "page_idx": 0
38
+ },
39
+ {
40
+ "type": "text",
41
+ "text": "Wei Zhang $^{1}$ , Xiangyuan Guan $^{1}$ , Lu Yunhong $^{2}$ , Jie Zhang $^{3}$ , Shuangyong Song $^{3}$ , Xianfu Cheng $^{1}$ , Zhenhe Wu $^{1}$ , Zhoujun Li $^{1}$",
42
+ "bbox": [
43
+ 179,
44
+ 172,
45
+ 622,
46
+ 202
47
+ ],
48
+ "page_idx": 0
49
+ },
50
+ {
51
+ "type": "list",
52
+ "sub_type": "text",
53
+ "list_items": [
54
+ "<sup>1</sup>State Key Lab of Software Development Environment, Beihang University",
55
+ "$^{2}$ Yantai University",
56
+ "3China Telecom"
57
+ ],
58
+ "bbox": [
59
+ 183,
60
+ 203,
61
+ 679,
62
+ 246
63
+ ],
64
+ "page_idx": 0
65
+ },
66
+ {
67
+ "type": "text",
68
+ "text": "zwpride@buaa.edu.cn",
69
+ "bbox": [
70
+ 183,
71
+ 247,
72
+ 370,
73
+ 260
74
+ ],
75
+ "page_idx": 0
76
+ },
77
+ {
78
+ "type": "text",
79
+ "text": "ABSTRACT",
80
+ "text_level": 1,
81
+ "bbox": [
82
+ 450,
83
+ 297,
84
+ 545,
85
+ 311
86
+ ],
87
+ "page_idx": 0
88
+ },
89
+ {
90
+ "type": "text",
91
+ "text": "Logs produced by extensive software systems are integral to monitoring system behaviors. Advanced log analysis facilitates the detection, alerting, and diagnosis of system faults. Log parsing, which entails transforming raw log messages into structured templates, constitutes a critical phase in the automation of log analytics. Existing log parsers fail to identify the correct templates due to reliance on human-made rules. Besides, these methods focus on statistical features while ignoring semantic information in log messages. To address these challenges, we introduce a cutting-edge Log parsing framework with Entropy sampling and chain-of-thought Merging (LEMUR). Specifically, to discard the tedious manual rules, we propose a novel sampling method inspired by information entropy, which efficiently clusters typical logs. Furthermore, to enhance the merging of log templates, we design a chain-of-thought method for large language models (LLMs). LLMs exhibit exceptional semantic comprehension and deftly distinguish between parameters and invariant tokens. We have conducted experiments on large-scale public datasets. Extensive evaluation demonstrates that LEMUR achieves state-of-the-art performance and impressive efficiency. The Code is available at https://github.com/zwpride/lemur.",
92
+ "bbox": [
93
+ 228,
94
+ 329,
95
+ 767,
96
+ 566
97
+ ],
98
+ "page_idx": 0
99
+ },
100
+ {
101
+ "type": "text",
102
+ "text": "1 INTRODUCTION",
103
+ "text_level": 1,
104
+ "bbox": [
105
+ 173,
106
+ 593,
107
+ 336,
108
+ 607
109
+ ],
110
+ "page_idx": 0
111
+ },
112
+ {
113
+ "type": "text",
114
+ "text": "Logs serve as a critical information for system monitoring, offering key insights into system behavior, a fact well-documented in existing literature (He et al., 2021). Their advantage over other types of data lies in their rich informational content and relative ease of interpretation. Through log analysis, several important downstream tasks can be effectively addressed, which include anomaly detection (Du et al., 2017; Nandi et al., 2016), fault diagnosis (He et al., 2018; Zou et al., 2016), and root cause analysis (Tak et al., 2016). Log parsing, a crucial initial step in log analysis (He et al., 2016), separates log messages into two parts: 1) Log Templates. The constant, unchanging parts in logging statements, and 2) Log Variables. The dynamic, changeable details in different executions. In Figure 1, the logging statement logger.info('Wait {wait_time} seconds for {process} to be killed.') can yield various messages like Wait 26 seconds for Thread-20 to be killed. Wait < * > seconds for < * > to be killed is log template, and the changing data like 26 and Thread-20 are log variables.",
115
+ "bbox": [
116
+ 169,
117
+ 625,
118
+ 826,
119
+ 792
120
+ ],
121
+ "page_idx": 0
122
+ },
123
+ {
124
+ "type": "text",
125
+ "text": "In the field of system maintenance, where source code accessibility is often restricted, different parsers (He et al., 2017; Fu et al., 2022; Le & Zhang, 2023; Yu et al., 2023; Jiang et al., 2023) have been developed to differentiate between templates and variables within log messages autonomously. Existing log parsers can be categorized into two groups: syntax-based and semantic-based. Syntax-based log parsers (He et al., 2017; Du & Li, 2016; Makanju et al., 2009; Vaarandi, 2003; Vaarandi & Pihelgas, 2015) utilize specific features or heuristics (e.g., word frequency) to extract the constant parts of log messages as templates. Conversely, semantic-based parsers Liu et al. (2022); Le & Zhang (2023) leverages advanced deep learning frameworks to assimilate semantics and system-specific patterns from annotated log data, thereby facilitating the parsing of new logs. Besides,",
126
+ "bbox": [
127
+ 169,
128
+ 797,
129
+ 826,
130
+ 925
131
+ ],
132
+ "page_idx": 0
133
+ },
134
+ {
135
+ "type": "header",
136
+ "text": "Published as a conference paper at ICLR 2025",
137
+ "bbox": [
138
+ 171,
139
+ 32,
140
+ 478,
141
+ 47
142
+ ],
143
+ "page_idx": 0
144
+ },
145
+ {
146
+ "type": "aside_text",
147
+ "text": "arXiv:2402.18205v5 [cs.SE] 26 Mar 2025",
148
+ "bbox": [
149
+ 22,
150
+ 265,
151
+ 57,
152
+ 708
153
+ ],
154
+ "page_idx": 0
155
+ },
156
+ {
157
+ "type": "page_number",
158
+ "text": "1",
159
+ "bbox": [
160
+ 493,
161
+ 948,
162
+ 504,
163
+ 959
164
+ ],
165
+ "page_idx": 0
166
+ },
167
+ {
168
+ "type": "image",
169
+ "img_path": "images/8cd80afc96c5786b64477820702e60770a087884ac85f5b00867ca737619dd85.jpg",
170
+ "image_caption": [
171
+ "Figure 1: An example of log parsing. Logging Statement cannot be accessed in most cases. Log Message is parsed into a static Template part containing fixed keywords and a Variable part that will vary between different log messages with the same template."
172
+ ],
173
+ "image_footnote": [],
174
+ "bbox": [
175
+ 289,
176
+ 108,
177
+ 709,
178
+ 335
179
+ ],
180
+ "page_idx": 1
181
+ },
182
+ {
183
+ "type": "text",
184
+ "text": "recent works Jiang et al. (2023) leverage large language models (LLMs) (OpenAI, 2023) for log parsing, which also utilizes the powerful semantic understanding of LLMs.",
185
+ "bbox": [
186
+ 169,
187
+ 431,
188
+ 823,
189
+ 462
190
+ ],
191
+ "page_idx": 1
192
+ },
193
+ {
194
+ "type": "text",
195
+ "text": "However, syntax-based methodologies are heavily dependent on meticulously crafted rules. Their performance significantly diminishes with the exponential increase in the volume and intricacy of log data. Furthermore, these techniques often overlook semantic variances among logs. For instance, logs such as success to open file/var/log/system and success to close file/var/log/system display syntactic resemblance, potentially leading to their aggregation by data-driven approaches, which could adversely impact the efficacy of anomaly detection tasks. Semantic-based methods, reliant on pre-labeled logs for training, falter in understanding semantics when applied to unfamiliar domains. LLM-based parsers (Liu et al., 2023; Le & Zhang, 2023; Jiang et al., 2023) have lower availability due to high inference time (such as GPUs) and network latency (Jiao et al., 2023). Besides, LLMs generate unstable results because of the hallucination problem.",
196
+ "bbox": [
197
+ 169,
198
+ 467,
199
+ 823,
200
+ 621
201
+ ],
202
+ "page_idx": 1
203
+ },
204
+ {
205
+ "type": "text",
206
+ "text": "To tackle these challenges, we propose LEMUR, a cutting-edge Log parsing framework with Entropy sampling and Chain-of-Thought Merging (LEMUR). LEMUR brings together the strengths of the syntax-based and semantic-based methods, which consist of three key components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging. Specifically, inspired by information theory Gray (2011), we recognize that different logs encapsulate varying quantities of information, while logs of a similar nature contain comparable amounts. Consequently, we have developed a novel sampling method based on information entropy principles that efficiently clusters characteristic logs by dividing large-scale data into multiple clusters and utilizing efficient sampling and clustering algorithms within those clusters, thereby ensuring that LEMUR remains robust and high-performance in large-scale log scenarios. Then in template generation, we determine the variables and the template in the log based on the information entropy of the token at the same location. In the end, motivated by Chain-of-Thought Wei et al. (2022c), we design a three-hop Chain-of-Thought (infer structure, infer semantics, and infer solution) for merging templates.",
207
+ "bbox": [
208
+ 169,
209
+ 627,
210
+ 826,
211
+ 809
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "We have conducted a comprehensive evaluation on public large-scale log datasets of LogHub He et al. (2020) with seven state-of-the-art log parsing approaches. The results show that LEMUR achieves the highest performance than other baselines for the F1 score of grouping and template accuracy. Generally, the main contributions of this work are listed as follows:",
218
+ "bbox": [
219
+ 169,
220
+ 814,
221
+ 823,
222
+ 871
223
+ ],
224
+ "page_idx": 1
225
+ },
226
+ {
227
+ "type": "text",
228
+ "text": "- To the best of our knowledge, we propose LEMUR, the first unsupervised framework to combine information entropy and large language models for online log parsing.",
229
+ "bbox": [
230
+ 215,
231
+ 895,
232
+ 823,
233
+ 925
234
+ ],
235
+ "page_idx": 1
236
+ },
237
+ {
238
+ "type": "header",
239
+ "text": "Published as a conference paper at ICLR 2025",
240
+ "bbox": [
241
+ 173,
242
+ 32,
243
+ 478,
244
+ 47
245
+ ],
246
+ "page_idx": 1
247
+ },
248
+ {
249
+ "type": "page_number",
250
+ "text": "2",
251
+ "bbox": [
252
+ 493,
253
+ 948,
254
+ 503,
255
+ 959
256
+ ],
257
+ "page_idx": 1
258
+ },
259
+ {
260
+ "type": "image",
261
+ "img_path": "images/acfac27e635353173ebe3873804af37e3615cd9d4a0fa21000f1ecb9ca0dc78f.jpg",
262
+ "image_caption": [
263
+ "Figure 2: Overview of our log parsing framework."
264
+ ],
265
+ "image_footnote": [],
266
+ "bbox": [
267
+ 171,
268
+ 97,
269
+ 826,
270
+ 280
271
+ ],
272
+ "page_idx": 2
273
+ },
274
+ {
275
+ "type": "list",
276
+ "sub_type": "text",
277
+ "list_items": [
278
+ "- We introduce a novel sampling method based on information entropy for efficient log clustering. Besides, by utilizing LLMs, we can accurately merge templates based on semantics rather than syntax-based methods.",
279
+ "- Extensive experiments are conducted on public benchmarks to demonstrate the effectiveness of our LEMUR. The results show that LEMUR outperforms other state-of-the-art methods."
280
+ ],
281
+ "bbox": [
282
+ 215,
283
+ 316,
284
+ 826,
285
+ 405
286
+ ],
287
+ "page_idx": 2
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "2 METHODOLOGY",
292
+ "text_level": 1,
293
+ "bbox": [
294
+ 171,
295
+ 430,
296
+ 344,
297
+ 446
298
+ ],
299
+ "page_idx": 2
300
+ },
301
+ {
302
+ "type": "text",
303
+ "text": "In this section, we overview LEMUR, a framework designed to enhance log message categorization. Figure 2 shows LEMUR, which consists of three main components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging.",
304
+ "bbox": [
305
+ 169,
306
+ 462,
307
+ 823,
308
+ 506
309
+ ],
310
+ "page_idx": 2
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "2.1 INFORMATION ENTROPY CLUSTERING",
315
+ "text_level": 1,
316
+ "bbox": [
317
+ 171,
318
+ 523,
319
+ 483,
320
+ 537
321
+ ],
322
+ "page_idx": 2
323
+ },
324
+ {
325
+ "type": "text",
326
+ "text": "In this section, logs are categorized into buckets based on their length. Some representative logs are selected as the cluster centers within each bucket, and other logs are clustered around these centers.",
327
+ "bbox": [
328
+ 169,
329
+ 550,
330
+ 826,
331
+ 580
332
+ ],
333
+ "page_idx": 2
334
+ },
335
+ {
336
+ "type": "text",
337
+ "text": "Bucket Generation Logs are assigned to buckets $B_{j}$ by a mapping function $f$ based on length: $f:L\\to B$ , with $L$ representing logs and $B$ representing buckets. For each $\\log L_i\\in L$ , we compute its length $\\mathit{len}(L_i)$ after tokenization. The log is then assigned to bucket $B_{j}\\in B$ , where $j$ matches the index of the log's length in the set of unique lengths $l$ . This can be written as $f(L_{i}) = B_{j}$ where $l_{j} = \\mathit{len}(L_{i})$ . This method ensures logs of the same length are grouped, reducing variability and aiding subsequent analysis.",
338
+ "bbox": [
339
+ 169,
340
+ 595,
341
+ 823,
342
+ 679
343
+ ],
344
+ "page_idx": 2
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "Entropy-based Sampling To identify potential log templates from buckets of logs with identical token lengths, we propose a clustering method inspired by information theory Gray (2011), specifically employing Shannon entropy. Unlike conventional clustering which uses random seeds and iterative refinement, our approach selects logs based on their information content, which is ideal for log analysis due to the variability in log messages.",
349
+ "bbox": [
350
+ 169,
351
+ 696,
352
+ 823,
353
+ 767
354
+ ],
355
+ "page_idx": 2
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "Each $\\log x\\in B_j$ is evaluated by its entropy $E(x) = -\\sum_{i}p(x_{i})\\log p(x_{i})$ , where $x_{i}$ is the token and $p(x_{i})$ its probability, gauging its information content. Logs are then ranked by descending entropy into layers $Lay_1,\\ldots ,Lay_n$ to prioritize those with rich information.",
360
+ "bbox": [
361
+ 169,
362
+ 773,
363
+ 823,
364
+ 816
365
+ ],
366
+ "page_idx": 2
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "We select clustering centers from these layers, starting with the highest entropy logs and picking those with either new first tokens or entropy above a threshold. This process repeats until we've chosen $k$ centers or the remaining logs no longer offer new information. Our stopping criterion ensures we gather diverse and informative logs while avoiding redundancy.",
371
+ "bbox": [
372
+ 169,
373
+ 821,
374
+ 823,
375
+ 878
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "Refinement with Jaccard Similarity After obtaining the initial set of $k$ samples, we further refine the selection by merging similar log centers based on their Jaccard similarity. This merging",
382
+ "bbox": [
383
+ 169,
384
+ 895,
385
+ 823,
386
+ 925
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "header",
392
+ "text": "Published as a conference paper at ICLR 2025",
393
+ "bbox": [
394
+ 171,
395
+ 32,
396
+ 478,
397
+ 47
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "page_number",
403
+ "text": "3",
404
+ "bbox": [
405
+ 493,
406
+ 948,
407
+ 504,
408
+ 959
409
+ ],
410
+ "page_idx": 2
411
+ },
412
+ {
413
+ "type": "image",
414
+ "img_path": "images/d3891e64de171cca3656f9851e4f16cfb69c115abcfa0e64ea2ebd961f734eb9.jpg",
415
+ "image_caption": [
416
+ "Figure 3: Three-S-hop Chain-of-Thought Merging Method"
417
+ ],
418
+ "image_footnote": [],
419
+ "bbox": [
420
+ 205,
421
+ 99,
422
+ 795,
423
+ 460
424
+ ],
425
+ "page_idx": 3
426
+ },
427
+ {
428
+ "type": "text",
429
+ "text": "process helps eliminate redundancy while preserving the diversity of log samples. We use the Jaccard similarity to measure the similarity between two logs. The Jaccard similarity between two logs $L_{1}$ and $L_{2}$ is calculated as similarity = $\\frac{|L_1 \\cap L_2|}{|L_1 \\cup L_2|}$ . Let $J_{T}$ be the threshold for Jaccard similarity; if the similarity between two centers $(L_{1}, L_{2})$ exceeds $J_{T}$ , we remove $L_{2}$ . This refinement ensures a diverse and representative set of $k'$ log samples and optimizes the balance between coverage and conciseness in the log samples.",
430
+ "bbox": [
431
+ 169,
432
+ 486,
433
+ 823,
434
+ 575
435
+ ],
436
+ "page_idx": 3
437
+ },
438
+ {
439
+ "type": "text",
440
+ "text": "Token-level Clustering In the clustering process for logs within a bucket $B_{j} \\in B$ , we distinguish between the set of representative logs, $S$ , which serve as cluster centers, and the set of remaining logs, $O$ , which are to be clustered around these centers.",
441
+ "bbox": [
442
+ 169,
443
+ 589,
444
+ 823,
445
+ 633
446
+ ],
447
+ "page_idx": 3
448
+ },
449
+ {
450
+ "type": "text",
451
+ "text": "Let $S = \\{s_1, s_2, \\ldots, s_k\\}$ represent the cluster centers and $O = \\{o_1, o_2, \\ldots, o_n\\}$ denote the logs to be clustered. The objective is to assign each log $o_i \\in O$ to a cluster center $s_j \\in S$ such that the edit distance between them, $d(o_i, s_j)$ , is minimized. The edit distance measures the similarity between two logs in terms of the changes required to convert one into the other at the token level. The edit distance $d(o_i, s_j)$ between a log $o_i$ and a cluster center $s_j$ is defined as:",
452
+ "bbox": [
453
+ 169,
454
+ 638,
455
+ 823,
456
+ 710
457
+ ],
458
+ "page_idx": 3
459
+ },
460
+ {
461
+ "type": "equation",
462
+ "text": "\n$$\nd \\left(o _ {i}, s _ {j}\\right) = \\min _ {s _ {j} \\in S} \\sum_ {t = 1} ^ {T} \\delta \\left(o _ {i, t}, s _ {j, t}\\right)\n$$\n",
463
+ "text_format": "latex",
464
+ "bbox": [
465
+ 388,
466
+ 715,
467
+ 606,
468
+ 756
469
+ ],
470
+ "page_idx": 3
471
+ },
472
+ {
473
+ "type": "text",
474
+ "text": "where $\\delta(o_{i,t}, s_{j,t})$ is the token-level edit distance between the $t$ -th token of $o_i$ and $s_j$ , and $T$ represents the total number of tokens.",
475
+ "bbox": [
476
+ 169,
477
+ 761,
478
+ 823,
479
+ 789
480
+ ],
481
+ "page_idx": 3
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "By minimizing $d(o_i, s_j)$ for each log $o_i$ , we cluster logs around their nearest representative center in $S$ , ensuring that logs within a cluster are as similar as possible according to the defined metric.",
486
+ "bbox": [
487
+ 169,
488
+ 796,
489
+ 823,
490
+ 825
491
+ ],
492
+ "page_idx": 3
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "2.2 TEMPLATE GENERATION",
497
+ "text_level": 1,
498
+ "bbox": [
499
+ 171,
500
+ 840,
501
+ 387,
502
+ 854
503
+ ],
504
+ "page_idx": 3
505
+ },
506
+ {
507
+ "type": "text",
508
+ "text": "Template generation aims to differentiate variables from fixed components in logs. It starts with finding the Longest Common Subsequence (LCS), which indicates shared elements across logs. Then, each log is compared with the LCS to pinpoint unique tokens and their locations; for example, comparing ABC with the LCS B reveals distinct tokens at positions A-0 and C-2.",
509
+ "bbox": [
510
+ 169,
511
+ 867,
512
+ 823,
513
+ 925
514
+ ],
515
+ "page_idx": 3
516
+ },
517
+ {
518
+ "type": "header",
519
+ "text": "Published as a conference paper at ICLR 2025",
520
+ "bbox": [
521
+ 171,
522
+ 32,
523
+ 478,
524
+ 47
525
+ ],
526
+ "page_idx": 3
527
+ },
528
+ {
529
+ "type": "page_number",
530
+ "text": "4",
531
+ "bbox": [
532
+ 493,
533
+ 948,
534
+ 504,
535
+ 959
536
+ ],
537
+ "page_idx": 3
538
+ },
539
+ {
540
+ "type": "table",
541
+ "img_path": "images/9581a771c5dac7fba8340a5ce378ede90a1b9a944067a17dd4ca177083b57b0d.jpg",
542
+ "table_caption": [],
543
+ "table_footnote": [],
544
+ "table_body": "<table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxyfer</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>Template</td><td>14</td><td>114</td><td>36</td><td>50</td><td>120</td><td>46</td><td>149</td><td>50</td><td>118</td><td>166</td><td>75</td><td>6</td><td>8</td><td>27</td><td>43</td><td>341</td></tr><tr><td>Average Length</td><td>7.44</td><td>8.19</td><td>8.76</td><td>6.30</td><td>6.32</td><td>3.48</td><td>8.51</td><td>7.93</td><td>8.30</td><td>5.40</td><td>2.80</td><td>6.28</td><td>9.35</td><td>8.56</td><td>9.01</td><td>9.17</td></tr><tr><td>Various Lengths Proportion</td><td>0.25%</td><td>0.00%</td><td>0.00%</td><td>0.55%</td><td>0.00%</td><td>0.40%</td><td>2.25%</td><td>0.00%</td><td>0.00%</td><td>3.85%</td><td>0.00%</td><td>0.00%</td><td>47.35%</td><td>0.00%</td><td>0.00%</td><td>0.35%</td></tr><tr><td>Messages</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td></tr></table>",
545
+ "bbox": [
546
+ 174,
547
+ 102,
548
+ 823,
549
+ 143
550
+ ],
551
+ "page_idx": 4
552
+ },
553
+ {
554
+ "type": "table",
555
+ "img_path": "images/80cb61bb080e88985ef3e65c181e0b4ef863a88b8203f033179de69e362dc43d.jpg",
556
+ "table_caption": [
557
+ "Table 1: Summary of LogHub datasets."
558
+ ],
559
+ "table_footnote": [],
560
+ "table_body": "<table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>D</td><td>:</td><td>=, :, (), -</td><td>:</td><td>=, :, =, , ( )</td><td>=, -, 9</td><td>9</td><td>:=, =, [, 11</td><td>:=, :, 8</td><td>:=, [, 9</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 4</td><td>20</td><td>12</td></tr><tr><td>k</td><td>2</td><td>8</td><td>6</td><td>8</td><td>9</td><td>9</td><td>11</td><td>8</td><td>25</td><td>9</td><td>12</td><td>12</td><td>12</td><td>4</td><td>20</td><td>12</td></tr><tr><td>J_T</td><td>0.7</td><td>0.7</td><td>0.6</td><td>0.9</td><td>0.6</td><td>0.6</td><td>0.4</td><td>0.6</td><td>0.33</td><td>0.7</td><td>0.7</td><td>0.7</td><td>0.5</td><td>0.7</td><td>0.7</td><td>0.7</td></tr><tr><td>θ</td><td>2.0</td><td>1.7</td><td>2.1</td><td>2.2</td><td>5.5</td><td>1.2</td><td>4.1</td><td>1.1</td><td>0.09</td><td>3.5</td><td>0</td><td>0</td><td>0.1</td><td>0.2</td><td>2.3</td><td>4.7</td></tr></table>",
561
+ "bbox": [
562
+ 174,
563
+ 188,
564
+ 823,
565
+ 234
566
+ ],
567
+ "page_idx": 4
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "Table 2: Transposed hyperparameters setting of Loghub datasets. $D$ is the tokens for word-splitting, $k$ denotes the number of the $S$ in each bucket, $J_{T}$ is the Jaccard similarity threshold for within-bucket merging, and $\\theta$ is the entropy threshold for token identification.",
572
+ "bbox": [
573
+ 169,
574
+ 251,
575
+ 823,
576
+ 292
577
+ ],
578
+ "page_idx": 4
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "For each unique position, we calculate information entropy inline using $H(X) = -\\sum P(x_i)\\log_2P(x_i)$ , where $P(x_{i})$ is the probability of the token at that position, and compile a list of these tokens.",
583
+ "bbox": [
584
+ 169,
585
+ 320,
586
+ 823,
587
+ 362
588
+ ],
589
+ "page_idx": 4
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "To classify a position as a variable, we apply the inline decision: a position is variable if $H > \\theta$ , otherwise fixed, where $H$ represents information entropy and $\\theta$ is a set threshold depending on data characteristics and iterative experiments. When $H$ surpasses $\\theta$ , we substitute all tokens at that position with the variable marker $< * >$ .",
594
+ "bbox": [
595
+ 169,
596
+ 369,
597
+ 823,
598
+ 425
599
+ ],
600
+ "page_idx": 4
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "Additionally, we mark digit-dominated tokens as variables $< * >$ and use NLP to identify fixed-value tokens (e.g., hash values), enhancing variable identification accuracy. This method effectively identifies variables and fixed parts in logs, adapts to different types of logs, and accurately identifies changing parts while preserving the log structure.",
605
+ "bbox": [
606
+ 169,
607
+ 431,
608
+ 825,
609
+ 489
610
+ ],
611
+ "page_idx": 4
612
+ },
613
+ {
614
+ "type": "text",
615
+ "text": "2.3 THREE-S-HOP CHAIN-OF-THOUGHT MERGING",
616
+ "text_level": 1,
617
+ "bbox": [
618
+ 171,
619
+ 505,
620
+ 540,
621
+ 518
622
+ ],
623
+ "page_idx": 4
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "In advanced log message analysis, after categorizing messages into distinct clusters and extracting unique log templates, we encounter a significant challenge: reconciling log templates that are semantically similar but differ in length. Despite structural variations, these templates often represent identical or closely related events. This challenge primarily arises from the limitations of length-based classification methods, which frequently fail to recognize that templates with equivalent semantics but different expressions may correspond to the same event type.",
628
+ "bbox": [
629
+ 169,
630
+ 531,
631
+ 823,
632
+ 614
633
+ ],
634
+ "page_idx": 4
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "Illustrated in Figure 3, We propose Three-S-hop Chain-of-Thought Merging using Large Language Models (LLMs) for semantic parsing. This includes data processing and three dialogue rounds: Structure QA (examines structure and length differences), Semantic QA (probes meanings for semantic equivalences), and Solution QA (decides on merging based on prior analyses). This approach merges templates of different lengths but similar meanings through structural and semantic analysis, enhancing accurate identification.",
639
+ "bbox": [
640
+ 169,
641
+ 621,
642
+ 823,
643
+ 705
644
+ ],
645
+ "page_idx": 4
646
+ },
647
+ {
648
+ "type": "text",
649
+ "text": "3 EXPERIMENT",
650
+ "text_level": 1,
651
+ "bbox": [
652
+ 171,
653
+ 724,
654
+ 318,
655
+ 739
656
+ ],
657
+ "page_idx": 4
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "3.1 IMPLEMENT DETAILS",
662
+ "text_level": 1,
663
+ "bbox": [
664
+ 171,
665
+ 757,
666
+ 366,
667
+ 770
668
+ ],
669
+ "page_idx": 4
670
+ },
671
+ {
672
+ "type": "text",
673
+ "text": "Datasets Experiments are conducted on the most widely-used benchmark datasets published in LogHub (He et al., 2020). More details are available in Table 1.",
674
+ "bbox": [
675
+ 169,
676
+ 782,
677
+ 823,
678
+ 811
679
+ ],
680
+ "page_idx": 4
681
+ },
682
+ {
683
+ "type": "text",
684
+ "text": "Implementation and Configuration We implement LEMUR based on Python 3.10, Apple M3 chip with 8-core CPU, 10-core GPU 16GB RAM, and macOS Sonoma(14.2.1). In experiments, $D$ is the token set for word-splitting, $k$ denotes the number of the $S$ in each bucket, the Jaccard similarity threshold for within-bucket merging $J_{T}$ and the entropy threshold for token identification $\\theta$ are shown in Table 2. We use GPT-4 as the base to implement the three-S-hop chain of thought merging, and LEMUR is suitable for other LLMs. LEMUR uses these models to conduct inference just by locally loading the weights of these models or utilizing the APIs provided by these LLMs.",
685
+ "bbox": [
686
+ 169,
687
+ 825,
688
+ 825,
689
+ 925
690
+ ],
691
+ "page_idx": 4
692
+ },
693
+ {
694
+ "type": "header",
695
+ "text": "Published as a conference paper at ICLR 2025",
696
+ "bbox": [
697
+ 173,
698
+ 32,
699
+ 478,
700
+ 47
701
+ ],
702
+ "page_idx": 4
703
+ },
704
+ {
705
+ "type": "page_number",
706
+ "text": "5",
707
+ "bbox": [
708
+ 493,
709
+ 948,
710
+ 504,
711
+ 959
712
+ ],
713
+ "page_idx": 4
714
+ },
715
+ {
716
+ "type": "table",
717
+ "img_path": "images/a767b151e8cb396d355f26a67203c86fd838efcb0f1736512667595e11f392c2.jpg",
718
+ "table_caption": [],
719
+ "table_footnote": [],
720
+ "table_body": "<table><tr><td rowspan=\"2\">Dataset</td><td colspan=\"2\">Drain</td><td colspan=\"2\">Spell</td><td colspan=\"2\">IPLOM</td><td colspan=\"2\">ULP</td><td colspan=\"2\">Brain</td><td colspan=\"2\">LogPPT</td><td colspan=\"2\">LLMParser</td><td colspan=\"2\">LEMUR</td></tr><tr><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td></tr><tr><td>HDFS</td><td>0.999</td><td>0.998</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.998</td><td>0.999</td><td>0.998</td><td>0.957</td><td>0.845</td><td>0.965</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Hadoop</td><td>0.999</td><td>0.948</td><td>0.920</td><td>0.777</td><td>0.996</td><td>0.954</td><td>0.999</td><td>0.950</td><td>0.999</td><td>0.949</td><td>0.999</td><td>0.977</td><td>0.958</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Spark</td><td>0.992</td><td>0.920</td><td>0.991</td><td>0.905</td><td>0.992</td><td>0.920</td><td>0.995</td><td>0.922</td><td>0.999</td><td>0.998</td><td>0.997</td><td>0.848</td><td>0.906</td><td>0.995</td><td>1</td><td>1</td></tr><tr><td>Zookeeper</td><td>0.999</td><td>0.967</td><td>0.999</td><td>0.964</td><td>0.999</td><td>0.993</td><td>0.999</td><td>0.988</td><td>0.999</td><td>0.985</td><td>1</td><td>1</td><td>0.967</td><td>1</td><td>1</td><td>1</td></tr><tr><td>BGL</td><td>0.999</td><td>0.963</td><td>0.957</td><td>0.786</td><td>0.999</td><td>0.939</td><td>0.999</td><td>0.930</td><td>0.999</td><td>0.986</td><td>0.968</td><td>0.455</td><td>0.864</td><td>0.892</td><td>1</td><td>1</td></tr><tr><td>HPC</td><td>0.991</td><td>0.887</td><td>0.986</td><td>0.654</td><td>0.978</td><td>0.829</td><td>0.994</td><td>0.951</td><td>0.998</td><td>0.945</td><td>0.999</td><td>0.941</td><td>0.910</td><td>0.872</td><td>1</td><td>1</td></tr><tr><td>Thunderbird</td><td>0.999</td><td>0.955</td><td>0.994</td><td>0.844</td><td>0.999</td><td>0.663</td><td>0.999</td><td>0.675</td><td>0.999</td><td>0.971</td><td>0.714</td><td>0.262</td><td>0.799</td><td>0.813</td><td>0.999</td><td>0.982</td></tr><tr><td>Windows</td><td>0.999</td><td>0.997</td><td>0.999</td><td>0.989</td><td>0.995</td><td>0.567</td><td>0.989</td><td>0.410</td><td>0.999</td><td>0.997</td><td>0.992</td><td>0.717</td><td>0.984</td><td>0.783</td><td>1</td><td>1</td></tr><tr><td>Linux</td><td>0.992</td><td>0.690</td><td>0.937</td><td>0.605</td><td>0.964</td><td>0.671</td><td>0.476</td><td>0.363</td><td>0.999</td><td>0.996</td><td>0.713</td><td>0.177</td><td>0.921</td><td>0.961</td><td>0.999</td><td>0.988</td></tr><tr><td>Android</td><td>0.996</td><td>0.911</td><td>0.992</td><td>0.919</td><td>0.949</td><td>0.712</td><td>0.971</td><td>0.838</td><td>0.997</td><td>0.961</td><td>0.989</td><td>0.862</td><td>0.990</td><td>0.873</td><td>0.999</td><td>0.995</td></tr><tr><td>HealthApp</td><td>0.918</td><td>0.780</td><td>0.887</td><td>0.639</td><td>0.958</td><td>0.822</td><td>0.993</td><td>0.901</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.978</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Apache</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.582</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Proxifier</td><td>0.785</td><td>0.526</td><td>0.832</td><td>0.526</td><td>0.786</td><td>0.516</td><td>0.940</td><td>0.024</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>OpenSSH</td><td>0.999</td><td>0.787</td><td>0.918</td><td>0.554</td><td>0.998</td><td>0.540</td><td>0.940</td><td>0.434</td><td>1</td><td>1</td><td>0.983</td><td>0.436</td><td>0.836</td><td>0.697</td><td>1</td><td>1</td></tr><tr><td>OpenStack</td><td>0.993</td><td>0.733</td><td>0.994</td><td>0.764</td><td>0.909</td><td>0.331</td><td>0.834</td><td>0.491</td><td>1</td><td>1</td><td>0.997</td><td>0.492</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Mac</td><td>0.975</td><td>0.786</td><td>0.963</td><td>0.756</td><td>0.957</td><td>0.670</td><td>0.981</td><td>0.814</td><td>0.996</td><td>0.942</td><td>0.720</td><td>0.761</td><td>0.830</td><td>0.871</td><td>0.984</td><td>0.977</td></tr><tr><td>Average</td><td>0.977</td><td>0.865</td><td>0.961</td><td>0.793</td><td>0.968</td><td>0.756</td><td>0.932</td><td>0.733</td><td>0.999</td><td>0.983</td><td>0.939</td><td>0.710</td><td>0.931</td><td>0.913</td><td>0.999</td><td>0.996</td></tr></table>",
721
+ "bbox": [
722
+ 176,
723
+ 101,
724
+ 823,
725
+ 287
726
+ ],
727
+ "page_idx": 5
728
+ },
729
+ {
730
+ "type": "text",
731
+ "text": "Table 3: FGA and GA on LogHub Dataset.",
732
+ "bbox": [
733
+ 354,
734
+ 303,
735
+ 640,
736
+ 319
737
+ ],
738
+ "page_idx": 5
739
+ },
740
+ {
741
+ "type": "image",
742
+ "img_path": "images/9411af60d30473f59ee18eb41a845afe3989654ada0f27c58789836a93502841.jpg",
743
+ "image_caption": [
744
+ "(a) Boxplot of group accuracy on 16 benchmark datasets."
745
+ ],
746
+ "image_footnote": [],
747
+ "bbox": [
748
+ 179,
749
+ 337,
750
+ 464,
751
+ 498
752
+ ],
753
+ "page_idx": 5
754
+ },
755
+ {
756
+ "type": "image",
757
+ "img_path": "images/a44310b976f7f00551abc1835f2e0789f2af9052792f0f00e107120b247a775f.jpg",
758
+ "image_caption": [
759
+ "(b) Boxplot of F1 score of group accuracy on 16 benchmark datasets.",
760
+ "Figure 4: Comparison of group accuracy and F1 score on 16 benchmark datasets."
761
+ ],
762
+ "image_footnote": [],
763
+ "bbox": [
764
+ 535,
765
+ 338,
766
+ 820,
767
+ 498
768
+ ],
769
+ "page_idx": 5
770
+ },
771
+ {
772
+ "type": "text",
773
+ "text": "3.2 BASELINES AND METRICS",
774
+ "text_level": 1,
775
+ "bbox": [
776
+ 171,
777
+ 575,
778
+ 398,
779
+ 590
780
+ ],
781
+ "page_idx": 5
782
+ },
783
+ {
784
+ "type": "text",
785
+ "text": "Baselines As for baselines, we choose Drain (He et al., 2017), Spell (Du & Li, 2016), IPLOM (Makanju et al., 2009), ULP (Sedki et al., 2022), Brain (Yu et al., 2023), LogPPT (Le & Zhang, 2023) and LLMParser (Jiang et al., 2023) as our baselines.",
786
+ "bbox": [
787
+ 169,
788
+ 601,
789
+ 823,
790
+ 643
791
+ ],
792
+ "page_idx": 5
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "F1 score of Grouping Accuracy (FGA) FGA is a template-level metric that focuses on the ratio of correctly grouped templates. Specifically, let $N_{g}$ be the actual correct number of templates in the ground truth, and $N_{p}$ be the number of templates that are generated by a log parser. If $N_{c}$ is the number of templates that are correctly parsed by the log parser, then we can compute the Precision of Grouping Accuracy (PGA) as $\\frac{N_{c}}{N_{p}}$ and the Recall of Grouping Accuracy (RGA) as $\\frac{N_{c}}{N_{g}}$ . The FGA is equal to their harmonic mean, $ie \\frac{2 \\times GPA \\times RGA}{PGA + RGA}$ .",
797
+ "bbox": [
798
+ 169,
799
+ 659,
800
+ 823,
801
+ 753
802
+ ],
803
+ "page_idx": 5
804
+ },
805
+ {
806
+ "type": "text",
807
+ "text": "Grouping Accuracy (GA) GA is computed as the ratio of correctly grouped log messages to the total count of log messages. A log message is considered to be correctly grouped if and only if its template aligns with the same set of log messages as that of the ground truth.",
808
+ "bbox": [
809
+ 169,
810
+ 766,
811
+ 823,
812
+ 809
813
+ ],
814
+ "page_idx": 5
815
+ },
816
+ {
817
+ "type": "text",
818
+ "text": "Execution time. We measure the execution time in seconds and compare LEMUR with other parsers in terms of efficiency.",
819
+ "bbox": [
820
+ 169,
821
+ 824,
822
+ 823,
823
+ 853
824
+ ],
825
+ "page_idx": 5
826
+ },
827
+ {
828
+ "type": "text",
829
+ "text": "3.3 MAIN RESULTS",
830
+ "text_level": 1,
831
+ "bbox": [
832
+ 171,
833
+ 869,
834
+ 323,
835
+ 883
836
+ ],
837
+ "page_idx": 5
838
+ },
839
+ {
840
+ "type": "text",
841
+ "text": "In Table 3, LEMUR, an unsupervised LLM-related model, exhibits remarkable effectiveness and performance in the field of log parsing. When compared to Brain, the current state-of-the-art in",
842
+ "bbox": [
843
+ 169,
844
+ 895,
845
+ 823,
846
+ 925
847
+ ],
848
+ "page_idx": 5
849
+ },
850
+ {
851
+ "type": "header",
852
+ "text": "Published as a conference paper at ICLR 2025",
853
+ "bbox": [
854
+ 173,
855
+ 32,
856
+ 478,
857
+ 47
858
+ ],
859
+ "page_idx": 5
860
+ },
861
+ {
862
+ "type": "page_number",
863
+ "text": "6",
864
+ "bbox": [
865
+ 493,
866
+ 948,
867
+ 504,
868
+ 959
869
+ ],
870
+ "page_idx": 5
871
+ },
872
+ {
873
+ "type": "image",
874
+ "img_path": "images/924dd71228c4109625895c980ead7e2b172d072e6d9a36ad147fe47e99655eab.jpg",
875
+ "image_caption": [
876
+ "Figure 5: Execution time for various datasets."
877
+ ],
878
+ "image_footnote": [],
879
+ "bbox": [
880
+ 181,
881
+ 104,
882
+ 818,
883
+ 262
884
+ ],
885
+ "page_idx": 6
886
+ },
887
+ {
888
+ "type": "image",
889
+ "img_path": "images/6e51146631df5cc7c7169b67ca139f112507fd46af239c073b3cdda91a2e9322.jpg",
890
+ "image_caption": [],
891
+ "image_footnote": [],
892
+ "bbox": [
893
+ 184,
894
+ 306,
895
+ 336,
896
+ 449
897
+ ],
898
+ "page_idx": 6
899
+ },
900
+ {
901
+ "type": "image",
902
+ "img_path": "images/4c1703944fb31e113d2414ece7877f2fafc8b43c2b78d810c40a6c7179e244d7.jpg",
903
+ "image_caption": [],
904
+ "image_footnote": [],
905
+ "bbox": [
906
+ 341,
907
+ 306,
908
+ 495,
909
+ 449
910
+ ],
911
+ "page_idx": 6
912
+ },
913
+ {
914
+ "type": "image",
915
+ "img_path": "images/617f124c3634a86c68d542a5a6a2a2fd53001aa403713466327d6007cf2a88d4.jpg",
916
+ "image_caption": [],
917
+ "image_footnote": [],
918
+ "bbox": [
919
+ 501,
920
+ 305,
921
+ 653,
922
+ 449
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "image",
928
+ "img_path": "images/3e6b4257a6a3bd448d9f2537f32705b345eeea083a32d5821ca7b04562fc4ef4.jpg",
929
+ "image_caption": [],
930
+ "image_footnote": [],
931
+ "bbox": [
932
+ 660,
933
+ 306,
934
+ 812,
935
+ 449
936
+ ],
937
+ "page_idx": 6
938
+ },
939
+ {
940
+ "type": "table",
941
+ "img_path": "images/fdb67906cb9a9d10ac98171e86146ad07de762c0ece5ca4c84a748c6a9062879.jpg",
942
+ "table_caption": [
943
+ "Figure 6: Execution time for various dataset sizes."
944
+ ],
945
+ "table_footnote": [],
946
+ "table_body": "<table><tr><td>Method</td><td>Avg. FGA</td><td>Avg. GA</td></tr><tr><td>Random sampling</td><td>0.843</td><td>0.718</td></tr><tr><td>First-token sampling</td><td>0.913</td><td>0.804</td></tr><tr><td>Entropy sampling</td><td>0.968</td><td>0.851</td></tr><tr><td>Entropy + First-token sampling (Ours)</td><td>0.999</td><td>0.996</td></tr></table>",
947
+ "bbox": [
948
+ 338,
949
+ 486,
950
+ 660,
951
+ 550
952
+ ],
953
+ "page_idx": 6
954
+ },
955
+ {
956
+ "type": "table",
957
+ "img_path": "images/a30c7853ff5372da90b632c1eac4d58a76070c4492ab09b91fdc5044561ab5f4.jpg",
958
+ "table_caption": [
959
+ "Table 4: Performance on different sampling methods."
960
+ ],
961
+ "table_footnote": [],
962
+ "table_body": "<table><tr><td>Method</td><td>Metric</td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td><td>Average</td></tr><tr><td>LEMUR w/o CoT</td><td>FGA</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.992</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>0.998</td></tr><tr><td>LEMUR w/o CoT</td><td>GA</td><td>0.998</td><td>0.977</td><td>1</td><td>0.995</td><td>0.989</td><td>0.996</td><td>0.959</td><td>1</td><td>0.986</td><td>0.956</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.956</td><td>0.988</td></tr><tr><td>LEMUR</td><td>FGA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.984</td><td>0.999</td></tr><tr><td>LEMUR</td><td>GA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>1</td><td>0.988</td><td>0.995</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.977</td><td>0.996</td></tr></table>",
963
+ "bbox": [
964
+ 176,
965
+ 583,
966
+ 823,
967
+ 623
968
+ ],
969
+ "page_idx": 6
970
+ },
971
+ {
972
+ "type": "table",
973
+ "img_path": "images/c7f3ec1d3e4a4e5ecbefcc1e56f5f1e130626e010165b256960a70d1f54a2a38.jpg",
974
+ "table_caption": [
975
+ "Table 5: FGA and GA on LogHub between LEMUR with and without CoT."
976
+ ],
977
+ "table_footnote": [],
978
+ "table_body": "<table><tr><td>Configuration</td><td>Avg.FGA</td><td>Avg.GA</td></tr><tr><td>Default (Our Method)</td><td>0.999</td><td>0.996</td></tr><tr><td>KMeans Clustering (Sinaga &amp; Yang, 2020)</td><td>0.685</td><td>0.698</td></tr><tr><td>DBSCAN Clustering (Khan et al., 2014)</td><td>0.731</td><td>0.744</td></tr><tr><td>Our Method w/o Bucket Grouping</td><td>0.355</td><td>0.394</td></tr></table>",
979
+ "bbox": [
980
+ 338,
981
+ 656,
982
+ 660,
983
+ 717
984
+ ],
985
+ "page_idx": 6
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "Table 6: Additional Ablation Studies on LEMUR",
990
+ "bbox": [
991
+ 336,
992
+ 733,
993
+ 658,
994
+ 747
995
+ ],
996
+ "page_idx": 6
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "unsupervised models, LEMUR demonstrates superior or comparable results across various datasets, underscoring its efficacy in unsupervised learning without the need for annotated data. Moreover, even when juxtaposed with supervised models like LogPPT and LLVMParser, LEMUR shows equal or better performance in most datasets, based on FGA and GA metrics. This is particularly noteworthy as it highlights the capability of LEMUR to match or exceed the performance levels of supervised models, despite the absence of explicit label guidance.",
1001
+ "bbox": [
1002
+ 169,
1003
+ 763,
1004
+ 826,
1005
+ 848
1006
+ ],
1007
+ "page_idx": 6
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "In Figure 4, LEMUR shows the robust performance in GA and FGA. The consistency and robustness of LEMUR are evident in its high FGA and GA across diverse datasets such as HDFS, Hadoop, Spark, etc. This consistency emphasizes its adaptability and robustness to various log parsing requirements. In conclusion, as an unsupervised LLM-related model, LEMUR significantly stands out in the log parsing domain.",
1012
+ "bbox": [
1013
+ 169,
1014
+ 854,
1015
+ 826,
1016
+ 925
1017
+ ],
1018
+ "page_idx": 6
1019
+ },
1020
+ {
1021
+ "type": "header",
1022
+ "text": "Published as a conference paper at ICLR 2025",
1023
+ "bbox": [
1024
+ 173,
1025
+ 32,
1026
+ 478,
1027
+ 47
1028
+ ],
1029
+ "page_idx": 6
1030
+ },
1031
+ {
1032
+ "type": "page_number",
1033
+ "text": "7",
1034
+ "bbox": [
1035
+ 493,
1036
+ 948,
1037
+ 504,
1038
+ 959
1039
+ ],
1040
+ "page_idx": 6
1041
+ },
1042
+ {
1043
+ "type": "text",
1044
+ "text": "Figure 5 presents a comprehensive analysis of the execution times for multiple algorithms: LEMUR, Brain, Drain, IPLOM, Spell, and ULP, across various datasets, comprising 2K data points each. This figure provides critical insights into the efficiency and scalability of these algorithms in processing data. Notably, the execution time of LEMUR stands out for its relative brevity across the majority of the datasets. LEMUR demonstrates a significantly reduced execution time on several key datasets, including Hadoop, BGL, Windows, Android, and Mac, highlighting its efficiency and optimized processing capabilities. Furthermore, we extend this analysis to evaluate the performance scalability of these algorithms across four distinct datasets: BGL, HDFS, Android, and Spark. This extension, as depicted in Figure 6, encompasses varying dataset sizes, thereby providing a more nuanced understanding of each algorithm's adaptability and performance under different data volume conditions in real-world scenarios where dataset sizes can vary significantly.",
1045
+ "bbox": [
1046
+ 169,
1047
+ 103,
1048
+ 823,
1049
+ 257
1050
+ ],
1051
+ "page_idx": 7
1052
+ },
1053
+ {
1054
+ "type": "text",
1055
+ "text": "Compared to all unsupervised methods, we have achieved more superior performance in the preliminary stage before employing LLM. Furthermore, compared to other LLM-based methods, which are mostly supervised, such as LogPPT and LLVMParser, which are essentially inference after fine-tuning, the training process consumes more GPU resources for local deployment or involves larger data transmission over the network for closed-source ChatGPT. LEMUR achieves better results without fine-tuning the LLM. Whether for local deployment inference or remote API, it only utilizes GPU resources for inference, not for fine-tuning. Thus, it requires fewer GPU resources or less network latency in data transmission. In summary, LEMUR has achieved superior performance and unique advantages for unsupervised log parsing or supervised fine-tuning of LLM.",
1056
+ "bbox": [
1057
+ 169,
1058
+ 263,
1059
+ 826,
1060
+ 388
1061
+ ],
1062
+ "page_idx": 7
1063
+ },
1064
+ {
1065
+ "type": "text",
1066
+ "text": "4 ABLATION",
1067
+ "text_level": 1,
1068
+ "bbox": [
1069
+ 171,
1070
+ 409,
1071
+ 294,
1072
+ 422
1073
+ ],
1074
+ "page_idx": 7
1075
+ },
1076
+ {
1077
+ "type": "text",
1078
+ "text": "4.1 EFFECT OF ENTROPY SAMPLING",
1079
+ "text_level": 1,
1080
+ "bbox": [
1081
+ 171,
1082
+ 438,
1083
+ 441,
1084
+ 452
1085
+ ],
1086
+ "page_idx": 7
1087
+ },
1088
+ {
1089
+ "type": "text",
1090
+ "text": "In our comparative analysis, as delineated in Table 4, we meticulously evaluate four distinct sampling methodologies: Random sampling, First-token sampling, Entropy sampling, and a hybrid approach combining Entropy and First-token sampling. Our results, derived from a systematic and empirical evaluation, reveal that the hybrid Entropy + First-token sampling method exhibits superior performance over the other techniques under consideration. The enhanced effectiveness of this method is likely attributable to its synergistic integration of the entropy-based selection criterion, which effectively captures the variability within the data, and the first-token approach, which ensures the representativeness of the initial elements. This fusion of strategies enables a more nuanced and effective sampling process, as evidenced by our empirical findings.",
1091
+ "bbox": [
1092
+ 169,
1093
+ 464,
1094
+ 823,
1095
+ 589
1096
+ ],
1097
+ "page_idx": 7
1098
+ },
1099
+ {
1100
+ "type": "text",
1101
+ "text": "As illustrated in Table 5, which provides a detailed comparison between two versions of LEMUR: one implementing the three-hop Chain-of-Thought approach and the other without it. The left columns of the table present the performance metrics of LEMUR devoid of Chain-of-Thought (CoT), while the right columns display the outcomes following the integration of the CoT methodology. The data delineates a useful, albeit modest, improvement in the performance of LEMUR when augmented with the CoT approach. This enhancement is particularly evident in the FGA and GA metrics across a range of datasets, including Hadoop, BGL, HPC, Thunderbird, Linux, Android, and Mac.",
1102
+ "bbox": [
1103
+ 169,
1104
+ 597,
1105
+ 823,
1106
+ 694
1107
+ ],
1108
+ "page_idx": 7
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "The observed limited degree of enhancement can be ascribed to two primary factors. The first is the already high baseline performance of LEMUR without the incorporation of CoT, which inherently constrains the potential for significant further improvements. The second factor pertains to the relative simplicity of the loghub datasets. In these scenarios, the application of a sophisticated methodology like CoT results in only marginal improvements, primarily because the datasets do not present sufficient complexity to fully exploit and showcase the enhanced capabilities of the CoT-augmented LEMUR.",
1113
+ "bbox": [
1114
+ 169,
1115
+ 700,
1116
+ 823,
1117
+ 797
1118
+ ],
1119
+ "page_idx": 7
1120
+ },
1121
+ {
1122
+ "type": "text",
1123
+ "text": "4.2 ADDITIONAL ABLATION STUDIES ON LEMUR",
1124
+ "text_level": 1,
1125
+ "bbox": [
1126
+ 171,
1127
+ 814,
1128
+ 529,
1129
+ 828
1130
+ ],
1131
+ "page_idx": 7
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "To further validate the robustness of LEMUR, we conducted ablation studies focusing on the impact of different clustering algorithms and the effectiveness of the bucket grouping strategy. Specifically, we experimented with replacing our clustering mechanism with KMeans (Sinaga & Yang, 2020) and DBSCAN (Khan et al., 2014) and evaluated the performance when disabling the bucket grouping based on the length assumption. These modifications aim to probe the sensitivity of LEMUR to these components.",
1136
+ "bbox": [
1137
+ 169,
1138
+ 840,
1139
+ 823,
1140
+ 924
1141
+ ],
1142
+ "page_idx": 7
1143
+ },
1144
+ {
1145
+ "type": "header",
1146
+ "text": "Published as a conference paper at ICLR 2025",
1147
+ "bbox": [
1148
+ 171,
1149
+ 32,
1150
+ 478,
1151
+ 47
1152
+ ],
1153
+ "page_idx": 7
1154
+ },
1155
+ {
1156
+ "type": "page_number",
1157
+ "text": "8",
1158
+ "bbox": [
1159
+ 493,
1160
+ 948,
1161
+ 504,
1162
+ 959
1163
+ ],
1164
+ "page_idx": 7
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "text": "The results, as illustrated in Table 6, highlight the critical role of the selected clustering algorithm and bucket grouping strategy in LEMUR's performance. The substantial decrease in Avg.FGA and Avg.GA when employing alternative clustering methods or omitting the bucket grouping underscores their importance in achieving optimal outcomes. This ablation study confirms the necessity of careful component selection and validates the robustness of LEMUR's methodology.",
1169
+ "bbox": [
1170
+ 169,
1171
+ 103,
1172
+ 823,
1173
+ 176
1174
+ ],
1175
+ "page_idx": 8
1176
+ },
1177
+ {
1178
+ "type": "text",
1179
+ "text": "5 RELATED WORK",
1180
+ "text_level": 1,
1181
+ "bbox": [
1182
+ 171,
1183
+ 200,
1184
+ 346,
1185
+ 217
1186
+ ],
1187
+ "page_idx": 8
1188
+ },
1189
+ {
1190
+ "type": "text",
1191
+ "text": "5.1 LOG Parser",
1192
+ "text_level": 1,
1193
+ "bbox": [
1194
+ 171,
1195
+ 237,
1196
+ 305,
1197
+ 250
1198
+ ],
1199
+ "page_idx": 8
1200
+ },
1201
+ {
1202
+ "type": "text",
1203
+ "text": "In the evolving field of automatic log analysis, crucial for distributed systems and cloud computing, significant progress has been made in log parsing techniques, categorized into frequent pattern mining, clustering, and heuristics rules. Frequent pattern mining is exemplified by SLCT (Vaarandi, 2003) which groups logs based on token frequency, and LogCluster (Vaarandi & Pihelgas, 2015) which removes positional constraints in log grouping. Clustering approaches include LogMine (Hamooni et al., 2016) with its multi-layered clustering system, LKE(Fu et al., 2009) using edit distance and position weighing, SHISO (Mizutani, 2013) improving efficiency through hierarchical clustering, LenMa (Shima, 2016) employing token length vectors, and LPV (Xiao et al., 2020) which uses semantic vectors from word2vec. In the heuristics rules category, IPLOM (Makanju et al., 2009) partitions logs by length and token position, Spell (Du & Li, 2016) approaches parsing as the longest common sequential problem, Drain (He et al., 2017) groups logs by length and prefixes for template updates, and Prefix-Graph (Chu et al., 2021) merges prefix trees into graphs for template generation. Recent advancements have introduced deep learning-based algorithms like Nulog (Nedelkoski et al., 2021) Uniparser (Liu et al., 2022), and LogAP (Rand & Miranskyy, 2021) utilizing comparative learning and machine translation for parsing. However, these deep learning methods face challenges in efficiency and high operational costs due to GPU requirements.",
1204
+ "bbox": [
1205
+ 169,
1206
+ 266,
1207
+ 826,
1208
+ 489
1209
+ ],
1210
+ "page_idx": 8
1211
+ },
1212
+ {
1213
+ "type": "text",
1214
+ "text": "5.2 LARGE LANGUAGE MODEL",
1215
+ "text_level": 1,
1216
+ "bbox": [
1217
+ 171,
1218
+ 512,
1219
+ 406,
1220
+ 525
1221
+ ],
1222
+ "page_idx": 8
1223
+ },
1224
+ {
1225
+ "type": "text",
1226
+ "text": "Language modeling using self-supervised learning and large-scale data, significantly enhances various natural language processing tasks. Specifically, pre-training a Transformer decoder (OpenAI, 2023; Vaswani et al., 2017; Yang et al., 2024a; 2025; 2024b; Ouyang et al., 2022a; Wei et al., 2022a; Yang et al., 2023; Nguyen, 2023; Liu et al., 2024; Chai et al., 2024; Zhang et al., 2024b; a; 2025) aids in unconditional text generation. Performance improvements (BertSum, 2023; Yang et al., 2020b; 2022a;b; 2021b;a; 2020a) in diverse tasks have been linked to the enlargement of Pre-training Language Models (PLMs) by increasing model or data size. This has led to the creation of increasingly larger PLMs, such as GPT-3 with 175 billion parameters and PaLM with 540 billion (Anil et al., 2023), guided by the scaling laws of large language models (Aghajanyan et al., 2023). Despite their similar architectures and pre-training tasks, larger PLMs, such as GPT-4 (OpenAI, 2023), exhibit unique behaviors and emergent abilities, excelling in complex tasks. A prime example is ChatGPT, adapting GPT-series LLMs for engaging dialogues, and showcasing advanced conversational skills. Fine-tuning LLMs on various datasets (Wei et al., 2022b) yields promising results, using human or LLM-created prompts for instruction tuning and refining generations. Chain-of-thought prompting (Wei et al., 2022b), where models explain their reasoning for complex problems, and RLHF (Ouyang et al., 2022b), a strategy significantly enhance their performance.",
1227
+ "bbox": [
1228
+ 169,
1229
+ 541,
1230
+ 826,
1231
+ 763
1232
+ ],
1233
+ "page_idx": 8
1234
+ },
1235
+ {
1236
+ "type": "text",
1237
+ "text": "6 CONCLUSION",
1238
+ "text_level": 1,
1239
+ "bbox": [
1240
+ 171,
1241
+ 791,
1242
+ 318,
1243
+ 806
1244
+ ],
1245
+ "page_idx": 8
1246
+ },
1247
+ {
1248
+ "type": "text",
1249
+ "text": "To enhance log analysis in complex software systems, we propose LEMUR. This framework replaces traditional rule-based methods with an information entropy-based sampling for efficient log clustering and employs large language models (LLMs) for advanced semantic comprehension in template merging. Information entropy streamlines the process of distinguishing between disparate log messages based on their inherent informational content for efficient log clustering. LEMUR has demonstrated superior performance and efficiency in log parsing, validated through extensive tests on large-scale datasets.",
1250
+ "bbox": [
1251
+ 169,
1252
+ 825,
1253
+ 823,
1254
+ 925
1255
+ ],
1256
+ "page_idx": 8
1257
+ },
1258
+ {
1259
+ "type": "header",
1260
+ "text": "Published as a conference paper at ICLR 2025",
1261
+ "bbox": [
1262
+ 171,
1263
+ 32,
1264
+ 478,
1265
+ 47
1266
+ ],
1267
+ "page_idx": 8
1268
+ },
1269
+ {
1270
+ "type": "page_number",
1271
+ "text": "9",
1272
+ "bbox": [
1273
+ 493,
1274
+ 948,
1275
+ 504,
1276
+ 959
1277
+ ],
1278
+ "page_idx": 8
1279
+ },
1280
+ {
1281
+ "type": "text",
1282
+ "text": "REFERENCES",
1283
+ "text_level": 1,
1284
+ "bbox": [
1285
+ 173,
1286
+ 102,
1287
+ 287,
1288
+ 117
1289
+ ],
1290
+ "page_idx": 9
1291
+ },
1292
+ {
1293
+ "type": "list",
1294
+ "sub_type": "ref_text",
1295
+ "list_items": [
1296
+ "Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 265-279. PMLR, 2023.",
1297
+ "Rohan Anil, Andrew M. Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, Sebastian Ruder, Yi Tay, Kefan Xiao, Yuzhong Xu, Yujing Zhang, Gustavo Hernandez Abrego, Junwhan Ahn, Jacob Austin, Paul Barham, Jan A. Botha, James Bradbury, Siddhartha Brahma, Kevin Brooks, Michele Catasta, Yong Cheng, Colin Cherry, Christopher A. Choquette-Choo, Aakanksha Chowdhery, Clément Crepy, Shachi Dave, Mostafa Dehghani, Sunipa Dev, Jacob Devlin, Mark Diaz, Nan Du, Ethan Dyer, Vladimir Feinberg, Fangxiaoyu Feng, Vlad Fienber, Markus Freitag, Xavier Garcia, Sebastian Gehrmann, Lucas Gonzalez, and et al. Palm 2 technical report. CoRR, abs/2305.10403, 2023.",
1298
+ "BertSum. source code, 2023. URL https://github.com/nlpyang/BertSum.",
1299
+ "Linzheng Chai, Shukai Liu, Jian Yang, Yuwei Yin, Ke Jin, Jiaheng Liu, Tao Sun, Ge Zhang, Changyu Ren, Hongcheng Guo, et al. Mceval: Massively multilingual code evaluation. arXiv preprint arXiv:2406.07436, 2024.",
1300
+ "Guojun Chu, Jingyu Wang, Qi Qi, Haifeng Sun, Shimin Tao, and Jianxin Liao. Prefix-graph: A versatile log parsing approach merging prefix tree with probabilistic graph. In 2021 IEEE 37th International Conference on Data Engineering (ICDE), pp. 2411-2422. IEEE, 2021.",
1301
+ "Min Du and Feifei Li. Spell: Streaming parsing of system event logs. In 2016 IEEE 16th International Conference on Data Mining (ICDM), pp. 859-864. IEEE, 2016.",
1302
+ "Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar. Deeplog: Anomaly detection and diagnosis from system logs through deep learning. In Proceedings of the 2017 ACM SIGSAC conference on computer and communications security, pp. 1285-1298, 2017.",
1303
+ "Qiang Fu, Jian-Guang Lou, Yi Wang, and Jiang Li. Execution anomaly detection in distributed systems through unstructured log analysis. In 2009 ninth IEEE international conference on data mining, pp. 149-158. IEEE, 2009.",
1304
+ "Ying Fu, Meng Yan, Jian Xu, Jianguo Li, Zhongxin Liu, Xiaohong Zhang, and Dan Yang. Investigating and improving log parsing in practice. In Proceedings of the 30th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1566-1577, 2022.",
1305
+ "Robert M Gray. Entropy and information theory. Springer Science & Business Media, 2011.",
1306
+ "Hossein Hamooni, Biplob Debnath, Jianwu Xu, Hui Zhang, Guofei Jiang, and Abdullah Mueen. Logmine: Fast pattern recognition for log analytics. In Proceedings of the 25th ACM International on Conference on Information and Knowledge Management, pp. 1573-1582, 2016.",
1307
+ "Pinjia He, Jieming Zhu, Shilin He, Jian Li, and Michael R Lyu. An evaluation study on log parsing and its use in log mining. In 2016 46th annual IEEE/IFIP international conference on dependable systems and networks (DSN), pp. 654-661. IEEE, 2016.",
1308
+ "Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu. Drain: An online log parsing approach with fixed depth tree. In 2017 IEEE international conference on web services (ICWS), pp. 33-40. IEEE, 2017.",
1309
+ "Shilin He, Qingwei Lin, Jian-Guang Lou, Hongyu Zhang, Michael R Lyu, and Dongmei Zhang. Identifying impactful service system problems via log analysis. In Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 60-70, 2018."
1310
+ ],
1311
+ "bbox": [
1312
+ 171,
1313
+ 125,
1314
+ 825,
1315
+ 924
1316
+ ],
1317
+ "page_idx": 9
1318
+ },
1319
+ {
1320
+ "type": "header",
1321
+ "text": "Published as a conference paper at ICLR 2025",
1322
+ "bbox": [
1323
+ 171,
1324
+ 32,
1325
+ 478,
1326
+ 47
1327
+ ],
1328
+ "page_idx": 9
1329
+ },
1330
+ {
1331
+ "type": "page_number",
1332
+ "text": "10",
1333
+ "bbox": [
1334
+ 490,
1335
+ 946,
1336
+ 509,
1337
+ 960
1338
+ ],
1339
+ "page_idx": 9
1340
+ },
1341
+ {
1342
+ "type": "list",
1343
+ "sub_type": "ref_text",
1344
+ "list_items": [
1345
+ "Shilin He, Jieming Zhu, Pinjia He, and Michael R Lyu. Loghub: a large collection of system log datasets towards automated log analytics. arXiv preprint arXiv:2008.06448, 2020.",
1346
+ "Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R Lyu. A survey on automated log analysis for reliability engineering. ACM computing surveys (CSUR), 54(6):1-37, 2021.",
1347
+ "Zhihan Jiang, Jinyang Liu, Zhuangbin Chen, Yichen Li, Junjie Huang, Yintong Huo, Pinjia He, Jiazhen Gu, and Michael R Lyu. Llmparser: A llm-based log parsing framework. arXiv preprint arXiv:2310.01796, 2023.",
1348
+ "Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Xing Wang, and Zhaopeng Tu. Is chatgpt a good translator? a preliminary study. arXiv preprint arXiv:2301.08745, 2023.",
1349
+ "Kamran Khan, Saif Ur Rehman, Kamran Aziz, Simon Fong, and Sababady Sarasvady. Dbscan: Past, present and future. In The fifth international conference on the applications of digital information and web technologies (ICADIWT 2014), pp. 232-238. IEEE, 2014.",
1350
+ "Van-Hoang Le and Hongyu Zhang. Log parsing with prompt-based few-shot learning. arXiv preprint arXiv:2302.07435, 2023.",
1351
+ "Shukai Liu, Linzheng Chai, Jian Yang, Jiajun Shi, He Zhu, Liran Wang, Ke Jin, Wei Zhang, Hualei Zhu, Shuyue Guo, et al. Mdeval: Massively multilingual code debugging. arXiv preprint arXiv:2411.02310, 2024.",
1352
+ "Yilun Liu, Shimin Tao, Weibin Meng, Jingyu Wang, Wenbing Ma, Yanqing Zhao, Yuhang Chen, Hao Yang, Yanfei Jiang, and Xun Chen. Logprompt: Prompt engineering towards zero-shot and interpretable log analysis. arXiv preprint arXiv:2308.07610, 2023.",
1353
+ "Yudong Liu, Xu Zhang, Shilin He, Hongyu Zhang, Liquan Li, Yu Kang, Yong Xu, Minghua Ma, Qingwei Lin, Yingnong Dang, et al. Uniparser: A unified log parser for heterogeneous log data. In Proceedings of the ACM Web Conference 2022, pp. 1893-1901, 2022.",
1354
+ "Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios. Clustering event logs using iterative partitioning. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 1255–1264, 2009.",
1355
+ "Masayoshi Mizutani. Incremental mining of system log format. In 2013 IEEE International Conference on Services Computing, pp. 595-602. IEEE, 2013.",
1356
+ "Animesh Nandi, Atri Mandal, Shubham Atreja, Gargi B Dasgupta, and Subhrajit Bhattacharya. Anomaly detection using program control flow graph mining from execution logs. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining, pp. 215-224, 2016.",
1357
+ "Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao. Self-supervised log parsing. In Machine Learning and Knowledge Discovery in Databases: Applied Data Science Track: European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part IV, pp. 122-138. Springer, 2021.",
1358
+ "Ha-Thanh Nguyen. A brief report on lawgpt 1.0: A virtual legal assistant based on GPT-3. CoRR, abs/2302.05729, 2023.",
1359
+ "OpenAI. Gpt-4 technical report, 2023.",
1360
+ "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022a.",
1361
+ "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul F. Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback. In NeurIPS, 2022b."
1362
+ ],
1363
+ "bbox": [
1364
+ 171,
1365
+ 102,
1366
+ 825,
1367
+ 922
1368
+ ],
1369
+ "page_idx": 10
1370
+ },
1371
+ {
1372
+ "type": "header",
1373
+ "text": "Published as a conference paper at ICLR 2025",
1374
+ "bbox": [
1375
+ 171,
1376
+ 32,
1377
+ 478,
1378
+ 47
1379
+ ],
1380
+ "page_idx": 10
1381
+ },
1382
+ {
1383
+ "type": "page_number",
1384
+ "text": "11",
1385
+ "bbox": [
1386
+ 490,
1387
+ 948,
1388
+ 506,
1389
+ 959
1390
+ ],
1391
+ "page_idx": 10
1392
+ },
1393
+ {
1394
+ "type": "list",
1395
+ "sub_type": "ref_text",
1396
+ "list_items": [
1397
+ "Jared Rand and Andriy Miranskyy. On automatic parsing of log records. In 2021 IEEE/ACM 43rd International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER), pp. 41-45. IEEE, 2021.",
1398
+ "Issam Sedki, Abdelwahab Hamou-Lhadj, Otmane Ait-Mohamed, and Mohammed A Shehab. An effective approach for parsing large log files. In 2022 IEEE International Conference on Software Maintenance and Evolution (ICSME), pp. 1-12. IEEE, 2022.",
1399
+ "Keiichi Shima. Length matters: Clustering system log messages using length of words. arXiv preprint arXiv:1611.03213, 2016.",
1400
+ "Kristina P Sinaga and Miin-Shen Yang. Unsupervised k-means clustering algorithm. IEEE access, 8:80716-80727, 2020.",
1401
+ "Byung Chul Tak, Shu Tao, Lin Yang, Chao Zhu, and Yaoping Ruan. Logan: Problem diagnosis in the cloud using log-based reference models. In 2016 IEEE International Conference on Cloud Engineering (IC2E), pp. 62-67. IEEE, 2016.",
1402
+ "Risto Vaarandi. A data clustering algorithm for mining patterns from event logs. In Proceedings of the 3rd IEEE Workshop on IP Operations & Management (IPOM 2003) (IEEE Cat. No. 03EX764), pp. 119-126. IEEE, 2003.",
1403
+ "Risto Vaarandi and Mauno Pihelgas. Logcluster-a data clustering and pattern mining algorithm for event logs. In 2015 11th International conference on network and service management (CNSM), pp. 1-7. IEEE, 2015.",
1404
+ "Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NIPS, 2017.",
1405
+ "Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners, 2022a.",
1406
+ "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022b.",
1407
+ "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022c.",
1408
+ "Tong Xiao, Zhe Quan, Zhi-Jie Wang, Kaiqi Zhao, and Xiangke Liao. Lpv: A log parser based on vectorization for offline and online log parsing. In 2020 IEEE International Conference on Data Mining (ICDM), pp. 1346-1351. IEEE, 2020.",
1409
+ "Hongyang Yang, Xiao-Yang Liu, and Christina Dan Wang. Fingpt: Open-source financial large language models. CoRR, abs/2306.06031, 2023.",
1410
+ "Jian Yang, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Ming Zhou. Improving neural machine translation with soft template prediction. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pp. 5979-5989. Association for Computational Linguistics, 2020a. doi: 10.18653/V1/2020.ACL-MAIN.531. URL https://doi.org/10.18653/v1/2020.acl-main.531.",
1411
+ "Jian Yang, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Zhoujun Li, and Ming Zhou. Alternating language modeling for cross-lingual pre-training. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pp. 9386-9393. AAAI Press, 2020b."
1412
+ ],
1413
+ "bbox": [
1414
+ 171,
1415
+ 102,
1416
+ 825,
1417
+ 922
1418
+ ],
1419
+ "page_idx": 11
1420
+ },
1421
+ {
1422
+ "type": "header",
1423
+ "text": "Published as a conference paper at ICLR 2025",
1424
+ "bbox": [
1425
+ 171,
1426
+ 32,
1427
+ 478,
1428
+ 47
1429
+ ],
1430
+ "page_idx": 11
1431
+ },
1432
+ {
1433
+ "type": "page_number",
1434
+ "text": "12",
1435
+ "bbox": [
1436
+ 490,
1437
+ 946,
1438
+ 508,
1439
+ 960
1440
+ ],
1441
+ "page_idx": 11
1442
+ },
1443
+ {
1444
+ "type": "list",
1445
+ "sub_type": "ref_text",
1446
+ "list_items": [
1447
+ "Jian Yang, Shuming Ma, Haoyang Huang, Dongdong Zhang, Li Dong, Shaohan Huang, Alexandre Muzio, Saksham Singhal, Hany Hassan, Xia Song, and Furu Wei. Multilingual machine translation systems from microsoft for WMT21 shared task. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussà, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno-Yepes, Philipp Koehn, Tom Kocmi, André Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, WMT@EMNLP 2021, Online Event, November 10-11, 2021, pp. 446-455. Association for Computational Linguistics, 2021a. URL https://aclanthology.org/2021.wmt-1.54.",
1448
+ "Jian Yang, Yuwei Yin, Shuming Ma, Haoyang Huang, Dongdong Zhang, Zhoujun Li, and Furu Wei. Multilingual agreement for multilingual neural machine translation. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, (Volume 2: Short Papers), Virtual Event, August 1-6, 2021, pp. 233-239. Association for Computational Linguistics, 2021b. doi: 10.18653/V1/2021. ACL-SHORT.31. URL https://doi.org/10.18653/v1/2021.acl-short.31.",
1449
+ "Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Furu Wei. High-resource language-specific training for multilingual neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4461-4467. ijcai.org, 2022a. doi: 10.24963/IJCAI.2022/619. URL https://doi.org/10.24963/ijcai.2022/619.",
1450
+ "Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Hongcheng Guo, Zhoujun Li, and Furu Wei. UM4: unified multilingual multiple teacher-student model for zero-resource neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4454-4460. ijcai.org, 2022b. doi: 10.24963/IJCAI.2022/618. URL https://doi.org/10.24963/ijcai.2022/618.",
1451
+ "Jian Yang, Jiaxi Yang, Ke Jin, Yibo Miao, Lei Zhang, Liquan Yang, Zeyu Cui, Yichang Zhang, Binyuan Hui, and Junyang Lin. Evaluating and aligning codellms on human preference. arXiv preprint arXiv:2412.05210, 2024a.",
1452
+ "Jian Yang, Jiajun Zhang, Jiaxi Yang, Ke Jin, Lei Zhang, Qiyao Peng, Ken Deng, Yibo Miao, Tianyu Liu, Zeyu Cui, et al. Execrepobench: Multi-level executable code completion evaluation. arXiv preprint arXiv:2412.11990, 2024b.",
1453
+ "Jian Yang, Wei Zhang, Jiaxi Yang, Yibo Miao, Shanghaoran Quan, Zhenhe Wu, Qiyao Peng, Liquun Yang, Tianyu Liu, Zeyu Cui, et al. Multi-agent collaboration for multilingual code instruction tuning. arXiv preprint arXiv:2502.07487, 2025.",
1454
+ "Siyu Yu, Pinjia He, Ningjiang Chen, and Yifan Wu. Brain: Log parsing with bidirectional parallel tree. IEEE Transactions on Services Computing, 2023.",
1455
+ "Wei Zhang, Xianfu Cheng, Yi Zhang, Jian Yang, Hongcheng Guo, Zhoujun Li, Xiaolin Yin, Xi-angyuan Guan, Xu Shi, Liangfan Zheng, and Bo Zhang. Eclipse: Semantic entropy-lcs for crosslingual industrial log parsing, 2024a. URL https://arxiv.org/abs/2405.13548.",
1456
+ "Wei Zhang, Hongcheng Guo, Jian Yang, Zhoujin Tian, Yi Zhang, Chaoran Yan, Zhoujun Li, Tongliang Li, Xu Shi, Liangfan Zheng, and Bo Zhang. mabc: multi-agent blockchain-inspired collaboration for root cause analysis in micro-services architecture, 2024b. URL https://arxiv.org/abs/2404.12135.",
1457
+ "Wei Zhang, Yi Zhang, Li Zhu, Qianghuai Jia, Feijun Jiang, Hongcheng Guo, Zhoujun Li, and Mengping Zhou. Adc: Enhancing function calling via adversarial datasets and code line-level feedback. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888405.",
1458
+ "De-Qing Zou, Hao Qin, and Hai Jin. Uilog: Improving log-based fault diagnosis by log analysis. Journal of computer science and technology, 31(5):1038-1052, 2016."
1459
+ ],
1460
+ "bbox": [
1461
+ 171,
1462
+ 102,
1463
+ 826,
1464
+ 922
1465
+ ],
1466
+ "page_idx": 12
1467
+ },
1468
+ {
1469
+ "type": "header",
1470
+ "text": "Published as a conference paper at ICLR 2025",
1471
+ "bbox": [
1472
+ 171,
1473
+ 32,
1474
+ 478,
1475
+ 47
1476
+ ],
1477
+ "page_idx": 12
1478
+ },
1479
+ {
1480
+ "type": "page_number",
1481
+ "text": "13",
1482
+ "bbox": [
1483
+ 490,
1484
+ 946,
1485
+ 508,
1486
+ 959
1487
+ ],
1488
+ "page_idx": 12
1489
+ }
1490
+ ]
2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_model.json ADDED
@@ -0,0 +1,2173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "header",
5
+ "bbox": [
6
+ 0.173,
7
+ 0.033,
8
+ 0.48,
9
+ 0.049
10
+ ],
11
+ "angle": 0,
12
+ "content": "Published as a conference paper at ICLR 2025"
13
+ },
14
+ {
15
+ "type": "image",
16
+ "bbox": [
17
+ 0.174,
18
+ 0.103,
19
+ 0.214,
20
+ 0.131
21
+ ],
22
+ "angle": 0,
23
+ "content": null
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.216,
29
+ 0.104,
30
+ 0.825,
31
+ 0.125
32
+ ],
33
+ "angle": 0,
34
+ "content": "LEMUR: LOG PARSING WITH ENTROPY SAMPLING"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.174,
40
+ 0.131,
41
+ 0.623,
42
+ 0.15
43
+ ],
44
+ "angle": 0,
45
+ "content": "AND CHAIN-OF-THOUGHT MERGING"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.181,
51
+ 0.173,
52
+ 0.623,
53
+ 0.203
54
+ ],
55
+ "angle": 0,
56
+ "content": "Wei Zhang\\(^{1}\\), Xiangyuan Guan\\(^{1}\\), Lu Yunhong\\(^{2}\\), Jie Zhang\\(^{3}\\), Shuangyong Song\\(^{3}\\), Xianfu Cheng\\(^{1}\\), Zhenhe Wu\\(^{1}\\), Zhoujun Li\\(^{1}\\)"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.184,
62
+ 0.204,
63
+ 0.68,
64
+ 0.219
65
+ ],
66
+ "angle": 0,
67
+ "content": "<sup>1</sup>State Key Lab of Software Development Environment, Beihang University"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.184,
73
+ 0.219,
74
+ 0.307,
75
+ 0.233
76
+ ],
77
+ "angle": 0,
78
+ "content": "\\(^{2}\\)Yantai University"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.184,
84
+ 0.233,
85
+ 0.293,
86
+ 0.247
87
+ ],
88
+ "angle": 0,
89
+ "content": "3China Telecom"
90
+ },
91
+ {
92
+ "type": "list",
93
+ "bbox": [
94
+ 0.184,
95
+ 0.204,
96
+ 0.68,
97
+ 0.247
98
+ ],
99
+ "angle": 0,
100
+ "content": null
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.184,
106
+ 0.248,
107
+ 0.372,
108
+ 0.261
109
+ ],
110
+ "angle": 0,
111
+ "content": "zwpride@buaa.edu.cn"
112
+ },
113
+ {
114
+ "type": "title",
115
+ "bbox": [
116
+ 0.451,
117
+ 0.299,
118
+ 0.547,
119
+ 0.313
120
+ ],
121
+ "angle": 0,
122
+ "content": "ABSTRACT"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.23,
128
+ 0.33,
129
+ 0.768,
130
+ 0.567
131
+ ],
132
+ "angle": 0,
133
+ "content": "Logs produced by extensive software systems are integral to monitoring system behaviors. Advanced log analysis facilitates the detection, alerting, and diagnosis of system faults. Log parsing, which entails transforming raw log messages into structured templates, constitutes a critical phase in the automation of log analytics. Existing log parsers fail to identify the correct templates due to reliance on human-made rules. Besides, these methods focus on statistical features while ignoring semantic information in log messages. To address these challenges, we introduce a cutting-edge Log parsing framework with Entropy sampling and chain-of-thought Merging (LEMUR). Specifically, to discard the tedious manual rules, we propose a novel sampling method inspired by information entropy, which efficiently clusters typical logs. Furthermore, to enhance the merging of log templates, we design a chain-of-thought method for large language models (LLMs). LLMs exhibit exceptional semantic comprehension and deftly distinguish between parameters and invariant tokens. We have conducted experiments on large-scale public datasets. Extensive evaluation demonstrates that LEMUR achieves state-of-the-art performance and impressive efficiency. The Code is available at https://github.com/zwpride/lemur."
134
+ },
135
+ {
136
+ "type": "title",
137
+ "bbox": [
138
+ 0.174,
139
+ 0.594,
140
+ 0.338,
141
+ 0.608
142
+ ],
143
+ "angle": 0,
144
+ "content": "1 INTRODUCTION"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.17,
150
+ 0.625,
151
+ 0.827,
152
+ 0.794
153
+ ],
154
+ "angle": 0,
155
+ "content": "Logs serve as a critical information for system monitoring, offering key insights into system behavior, a fact well-documented in existing literature (He et al., 2021). Their advantage over other types of data lies in their rich informational content and relative ease of interpretation. Through log analysis, several important downstream tasks can be effectively addressed, which include anomaly detection (Du et al., 2017; Nandi et al., 2016), fault diagnosis (He et al., 2018; Zou et al., 2016), and root cause analysis (Tak et al., 2016). Log parsing, a crucial initial step in log analysis (He et al., 2016), separates log messages into two parts: 1) Log Templates. The constant, unchanging parts in logging statements, and 2) Log Variables. The dynamic, changeable details in different executions. In Figure 1, the logging statement logger.info('Wait {wait_time} seconds for {process} to be killed.') can yield various messages like Wait 26 seconds for Thread-20 to be killed. Wait < * > seconds for < * > to be killed is log template, and the changing data like 26 and Thread-20 are log variables."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.17,
161
+ 0.799,
162
+ 0.828,
163
+ 0.926
164
+ ],
165
+ "angle": 0,
166
+ "content": "In the field of system maintenance, where source code accessibility is often restricted, different parsers (He et al., 2017; Fu et al., 2022; Le & Zhang, 2023; Yu et al., 2023; Jiang et al., 2023) have been developed to differentiate between templates and variables within log messages autonomously. Existing log parsers can be categorized into two groups: syntax-based and semantic-based. Syntax-based log parsers (He et al., 2017; Du & Li, 2016; Makanju et al., 2009; Vaarandi, 2003; Vaarandi & Pihelgas, 2015) utilize specific features or heuristics (e.g., word frequency) to extract the constant parts of log messages as templates. Conversely, semantic-based parsers Liu et al. (2022); Le & Zhang (2023) leverages advanced deep learning frameworks to assimilate semantics and system-specific patterns from annotated log data, thereby facilitating the parsing of new logs. Besides,"
167
+ },
168
+ {
169
+ "type": "aside_text",
170
+ "bbox": [
171
+ 0.023,
172
+ 0.266,
173
+ 0.058,
174
+ 0.709
175
+ ],
176
+ "angle": 270,
177
+ "content": "arXiv:2402.18205v5 [cs.SE] 26 Mar 2025"
178
+ },
179
+ {
180
+ "type": "page_number",
181
+ "bbox": [
182
+ 0.495,
183
+ 0.949,
184
+ 0.505,
185
+ 0.96
186
+ ],
187
+ "angle": 0,
188
+ "content": "1"
189
+ }
190
+ ],
191
+ [
192
+ {
193
+ "type": "header",
194
+ "bbox": [
195
+ 0.174,
196
+ 0.033,
197
+ 0.48,
198
+ 0.049
199
+ ],
200
+ "angle": 0,
201
+ "content": "Published as a conference paper at ICLR 2025"
202
+ },
203
+ {
204
+ "type": "image",
205
+ "bbox": [
206
+ 0.29,
207
+ 0.109,
208
+ 0.71,
209
+ 0.337
210
+ ],
211
+ "angle": 0,
212
+ "content": null
213
+ },
214
+ {
215
+ "type": "image_caption",
216
+ "bbox": [
217
+ 0.171,
218
+ 0.358,
219
+ 0.825,
220
+ 0.401
221
+ ],
222
+ "angle": 0,
223
+ "content": "Figure 1: An example of log parsing. Logging Statement cannot be accessed in most cases. Log Message is parsed into a static Template part containing fixed keywords and a Variable part that will vary between different log messages with the same template."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.171,
229
+ 0.433,
230
+ 0.825,
231
+ 0.463
232
+ ],
233
+ "angle": 0,
234
+ "content": "recent works Jiang et al. (2023) leverage large language models (LLMs) (OpenAI, 2023) for log parsing, which also utilizes the powerful semantic understanding of LLMs."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.171,
240
+ 0.468,
241
+ 0.825,
242
+ 0.622
243
+ ],
244
+ "angle": 0,
245
+ "content": "However, syntax-based methodologies are heavily dependent on meticulously crafted rules. Their performance significantly diminishes with the exponential increase in the volume and intricacy of log data. Furthermore, these techniques often overlook semantic variances among logs. For instance, logs such as success to open file/var/log/system and success to close file/var/log/system display syntactic resemblance, potentially leading to their aggregation by data-driven approaches, which could adversely impact the efficacy of anomaly detection tasks. Semantic-based methods, reliant on pre-labeled logs for training, falter in understanding semantics when applied to unfamiliar domains. LLM-based parsers (Liu et al., 2023; Le & Zhang, 2023; Jiang et al., 2023) have lower availability due to high inference time (such as GPUs) and network latency (Jiao et al., 2023). Besides, LLMs generate unstable results because of the hallucination problem."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.171,
251
+ 0.628,
252
+ 0.827,
253
+ 0.81
254
+ ],
255
+ "angle": 0,
256
+ "content": "To tackle these challenges, we propose LEMUR, a cutting-edge Log parsing framework with Entropy sampling and Chain-of-Thought Merging (LEMUR). LEMUR brings together the strengths of the syntax-based and semantic-based methods, which consist of three key components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging. Specifically, inspired by information theory Gray (2011), we recognize that different logs encapsulate varying quantities of information, while logs of a similar nature contain comparable amounts. Consequently, we have developed a novel sampling method based on information entropy principles that efficiently clusters characteristic logs by dividing large-scale data into multiple clusters and utilizing efficient sampling and clustering algorithms within those clusters, thereby ensuring that LEMUR remains robust and high-performance in large-scale log scenarios. Then in template generation, we determine the variables and the template in the log based on the information entropy of the token at the same location. In the end, motivated by Chain-of-Thought Wei et al. (2022c), we design a three-hop Chain-of-Thought (infer structure, infer semantics, and infer solution) for merging templates."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.171,
262
+ 0.815,
263
+ 0.825,
264
+ 0.872
265
+ ],
266
+ "angle": 0,
267
+ "content": "We have conducted a comprehensive evaluation on public large-scale log datasets of LogHub He et al. (2020) with seven state-of-the-art log parsing approaches. The results show that LEMUR achieves the highest performance than other baselines for the F1 score of grouping and template accuracy. Generally, the main contributions of this work are listed as follows:"
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.217,
273
+ 0.896,
274
+ 0.825,
275
+ 0.926
276
+ ],
277
+ "angle": 0,
278
+ "content": "- To the best of our knowledge, we propose LEMUR, the first unsupervised framework to combine information entropy and large language models for online log parsing."
279
+ },
280
+ {
281
+ "type": "page_number",
282
+ "bbox": [
283
+ 0.494,
284
+ 0.949,
285
+ 0.504,
286
+ 0.96
287
+ ],
288
+ "angle": 0,
289
+ "content": "2"
290
+ }
291
+ ],
292
+ [
293
+ {
294
+ "type": "header",
295
+ "bbox": [
296
+ 0.173,
297
+ 0.033,
298
+ 0.48,
299
+ 0.049
300
+ ],
301
+ "angle": 0,
302
+ "content": "Published as a conference paper at ICLR 2025"
303
+ },
304
+ {
305
+ "type": "image",
306
+ "bbox": [
307
+ 0.172,
308
+ 0.098,
309
+ 0.827,
310
+ 0.281
311
+ ],
312
+ "angle": 0,
313
+ "content": null
314
+ },
315
+ {
316
+ "type": "image_caption",
317
+ "bbox": [
318
+ 0.331,
319
+ 0.287,
320
+ 0.666,
321
+ 0.304
322
+ ],
323
+ "angle": 0,
324
+ "content": "Figure 2: Overview of our log parsing framework."
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.217,
330
+ 0.318,
331
+ 0.825,
332
+ 0.36
333
+ ],
334
+ "angle": 0,
335
+ "content": "- We introduce a novel sampling method based on information entropy for efficient log clustering. Besides, by utilizing LLMs, we can accurately merge templates based on semantics rather than syntax-based methods."
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.217,
341
+ 0.366,
342
+ 0.827,
343
+ 0.406
344
+ ],
345
+ "angle": 0,
346
+ "content": "- Extensive experiments are conducted on public benchmarks to demonstrate the effectiveness of our LEMUR. The results show that LEMUR outperforms other state-of-the-art methods."
347
+ },
348
+ {
349
+ "type": "list",
350
+ "bbox": [
351
+ 0.217,
352
+ 0.318,
353
+ 0.827,
354
+ 0.406
355
+ ],
356
+ "angle": 0,
357
+ "content": null
358
+ },
359
+ {
360
+ "type": "title",
361
+ "bbox": [
362
+ 0.173,
363
+ 0.431,
364
+ 0.345,
365
+ 0.447
366
+ ],
367
+ "angle": 0,
368
+ "content": "2 METHODOLOGY"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.171,
374
+ 0.463,
375
+ 0.825,
376
+ 0.507
377
+ ],
378
+ "angle": 0,
379
+ "content": "In this section, we overview LEMUR, a framework designed to enhance log message categorization. Figure 2 shows LEMUR, which consists of three main components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging."
380
+ },
381
+ {
382
+ "type": "title",
383
+ "bbox": [
384
+ 0.172,
385
+ 0.524,
386
+ 0.484,
387
+ 0.538
388
+ ],
389
+ "angle": 0,
390
+ "content": "2.1 INFORMATION ENTROPY CLUSTERING"
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.171,
396
+ 0.551,
397
+ 0.827,
398
+ 0.581
399
+ ],
400
+ "angle": 0,
401
+ "content": "In this section, logs are categorized into buckets based on their length. Some representative logs are selected as the cluster centers within each bucket, and other logs are clustered around these centers."
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.171,
407
+ 0.596,
408
+ 0.825,
409
+ 0.68
410
+ ],
411
+ "angle": 0,
412
+ "content": "Bucket Generation Logs are assigned to buckets \\(B_{j}\\) by a mapping function \\(f\\) based on length: \\(f:L\\to B\\) , with \\(L\\) representing logs and \\(B\\) representing buckets. For each \\(\\log L_i\\in L\\) , we compute its length \\(\\mathit{len}(L_i)\\) after tokenization. The log is then assigned to bucket \\(B_{j}\\in B\\) , where \\(j\\) matches the index of the log's length in the set of unique lengths \\(l\\) . This can be written as \\(f(L_{i}) = B_{j}\\) where \\(l_{j} = \\mathit{len}(L_{i})\\) . This method ensures logs of the same length are grouped, reducing variability and aiding subsequent analysis."
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.171,
418
+ 0.697,
419
+ 0.825,
420
+ 0.768
421
+ ],
422
+ "angle": 0,
423
+ "content": "Entropy-based Sampling To identify potential log templates from buckets of logs with identical token lengths, we propose a clustering method inspired by information theory Gray (2011), specifically employing Shannon entropy. Unlike conventional clustering which uses random seeds and iterative refinement, our approach selects logs based on their information content, which is ideal for log analysis due to the variability in log messages."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.171,
429
+ 0.774,
430
+ 0.825,
431
+ 0.817
432
+ ],
433
+ "angle": 0,
434
+ "content": "Each \\(\\log x\\in B_j\\) is evaluated by its entropy \\(E(x) = -\\sum_{i}p(x_{i})\\log p(x_{i})\\), where \\(x_{i}\\) is the token and \\(p(x_{i})\\) its probability, gauging its information content. Logs are then ranked by descending entropy into layers \\(Lay_1,\\ldots ,Lay_n\\) to prioritize those with rich information."
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.171,
440
+ 0.823,
441
+ 0.825,
442
+ 0.88
443
+ ],
444
+ "angle": 0,
445
+ "content": "We select clustering centers from these layers, starting with the highest entropy logs and picking those with either new first tokens or entropy above a threshold. This process repeats until we've chosen \\( k \\) centers or the remaining logs no longer offer new information. Our stopping criterion ensures we gather diverse and informative logs while avoiding redundancy."
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.171,
451
+ 0.896,
452
+ 0.825,
453
+ 0.926
454
+ ],
455
+ "angle": 0,
456
+ "content": "Refinement with Jaccard Similarity After obtaining the initial set of \\(k\\) samples, we further refine the selection by merging similar log centers based on their Jaccard similarity. This merging"
457
+ },
458
+ {
459
+ "type": "page_number",
460
+ "bbox": [
461
+ 0.494,
462
+ 0.949,
463
+ 0.505,
464
+ 0.96
465
+ ],
466
+ "angle": 0,
467
+ "content": "3"
468
+ }
469
+ ],
470
+ [
471
+ {
472
+ "type": "header",
473
+ "bbox": [
474
+ 0.173,
475
+ 0.033,
476
+ 0.48,
477
+ 0.049
478
+ ],
479
+ "angle": 0,
480
+ "content": "Published as a conference paper at ICLR 2025"
481
+ },
482
+ {
483
+ "type": "image",
484
+ "bbox": [
485
+ 0.207,
486
+ 0.101,
487
+ 0.797,
488
+ 0.462
489
+ ],
490
+ "angle": 0,
491
+ "content": null
492
+ },
493
+ {
494
+ "type": "image_caption",
495
+ "bbox": [
496
+ 0.302,
497
+ 0.471,
498
+ 0.695,
499
+ 0.487
500
+ ],
501
+ "angle": 0,
502
+ "content": "Figure 3: Three-S-hop Chain-of-Thought Merging Method"
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.171,
508
+ 0.487,
509
+ 0.825,
510
+ 0.577
511
+ ],
512
+ "angle": 0,
513
+ "content": "process helps eliminate redundancy while preserving the diversity of log samples. We use the Jaccard similarity to measure the similarity between two logs. The Jaccard similarity between two logs \\( L_{1} \\) and \\( L_{2} \\) is calculated as similarity = \\( \\frac{|L_1 \\cap L_2|}{|L_1 \\cup L_2|} \\). Let \\( J_{T} \\) be the threshold for Jaccard similarity; if the similarity between two centers \\( (L_{1}, L_{2}) \\) exceeds \\( J_{T} \\), we remove \\( L_{2} \\). This refinement ensures a diverse and representative set of \\( k' \\) log samples and optimizes the balance between coverage and conciseness in the log samples."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.171,
519
+ 0.59,
520
+ 0.825,
521
+ 0.634
522
+ ],
523
+ "angle": 0,
524
+ "content": "Token-level Clustering In the clustering process for logs within a bucket \\( B_{j} \\in B \\), we distinguish between the set of representative logs, \\( S \\), which serve as cluster centers, and the set of remaining logs, \\( O \\), which are to be clustered around these centers."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.171,
530
+ 0.64,
531
+ 0.825,
532
+ 0.711
533
+ ],
534
+ "angle": 0,
535
+ "content": "Let \\( S = \\{s_1, s_2, \\ldots, s_k\\} \\) represent the cluster centers and \\( O = \\{o_1, o_2, \\ldots, o_n\\} \\) denote the logs to be clustered. The objective is to assign each log \\( o_i \\in O \\) to a cluster center \\( s_j \\in S \\) such that the edit distance between them, \\( d(o_i, s_j) \\), is minimized. The edit distance measures the similarity between two logs in terms of the changes required to convert one into the other at the token level. The edit distance \\( d(o_i, s_j) \\) between a log \\( o_i \\) and a cluster center \\( s_j \\) is defined as:"
536
+ },
537
+ {
538
+ "type": "equation",
539
+ "bbox": [
540
+ 0.39,
541
+ 0.716,
542
+ 0.607,
543
+ 0.757
544
+ ],
545
+ "angle": 0,
546
+ "content": "\\[\nd \\left(o _ {i}, s _ {j}\\right) = \\min _ {s _ {j} \\in S} \\sum_ {t = 1} ^ {T} \\delta \\left(o _ {i, t}, s _ {j, t}\\right)\n\\]"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.171,
552
+ 0.762,
553
+ 0.825,
554
+ 0.79
555
+ ],
556
+ "angle": 0,
557
+ "content": "where \\(\\delta(o_{i,t}, s_{j,t})\\) is the token-level edit distance between the \\(t\\)-th token of \\(o_i\\) and \\(s_j\\), and \\(T\\) represents the total number of tokens."
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.171,
563
+ 0.797,
564
+ 0.825,
565
+ 0.827
566
+ ],
567
+ "angle": 0,
568
+ "content": "By minimizing \\( d(o_i, s_j) \\) for each log \\( o_i \\), we cluster logs around their nearest representative center in \\( S \\), ensuring that logs within a cluster are as similar as possible according to the defined metric."
569
+ },
570
+ {
571
+ "type": "title",
572
+ "bbox": [
573
+ 0.172,
574
+ 0.842,
575
+ 0.388,
576
+ 0.856
577
+ ],
578
+ "angle": 0,
579
+ "content": "2.2 TEMPLATE GENERATION"
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.171,
585
+ 0.868,
586
+ 0.825,
587
+ 0.926
588
+ ],
589
+ "angle": 0,
590
+ "content": "Template generation aims to differentiate variables from fixed components in logs. It starts with finding the Longest Common Subsequence (LCS), which indicates shared elements across logs. Then, each log is compared with the LCS to pinpoint unique tokens and their locations; for example, comparing ABC with the LCS B reveals distinct tokens at positions A-0 and C-2."
591
+ },
592
+ {
593
+ "type": "page_number",
594
+ "bbox": [
595
+ 0.494,
596
+ 0.949,
597
+ 0.505,
598
+ 0.96
599
+ ],
600
+ "angle": 0,
601
+ "content": "4"
602
+ }
603
+ ],
604
+ [
605
+ {
606
+ "type": "header",
607
+ "bbox": [
608
+ 0.174,
609
+ 0.033,
610
+ 0.48,
611
+ 0.049
612
+ ],
613
+ "angle": 0,
614
+ "content": "Published as a conference paper at ICLR 2025"
615
+ },
616
+ {
617
+ "type": "table",
618
+ "bbox": [
619
+ 0.175,
620
+ 0.103,
621
+ 0.825,
622
+ 0.145
623
+ ],
624
+ "angle": 0,
625
+ "content": "<table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxyfer</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>Template</td><td>14</td><td>114</td><td>36</td><td>50</td><td>120</td><td>46</td><td>149</td><td>50</td><td>118</td><td>166</td><td>75</td><td>6</td><td>8</td><td>27</td><td>43</td><td>341</td></tr><tr><td>Average Length</td><td>7.44</td><td>8.19</td><td>8.76</td><td>6.30</td><td>6.32</td><td>3.48</td><td>8.51</td><td>7.93</td><td>8.30</td><td>5.40</td><td>2.80</td><td>6.28</td><td>9.35</td><td>8.56</td><td>9.01</td><td>9.17</td></tr><tr><td>Various Lengths Proportion</td><td>0.25%</td><td>0.00%</td><td>0.00%</td><td>0.55%</td><td>0.00%</td><td>0.40%</td><td>2.25%</td><td>0.00%</td><td>0.00%</td><td>3.85%</td><td>0.00%</td><td>0.00%</td><td>47.35%</td><td>0.00%</td><td>0.00%</td><td>0.35%</td></tr><tr><td>Messages</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td></tr></table>"
626
+ },
627
+ {
628
+ "type": "table_caption",
629
+ "bbox": [
630
+ 0.368,
631
+ 0.16,
632
+ 0.63,
633
+ 0.175
634
+ ],
635
+ "angle": 0,
636
+ "content": "Table 1: Summary of LogHub datasets."
637
+ },
638
+ {
639
+ "type": "table",
640
+ "bbox": [
641
+ 0.175,
642
+ 0.189,
643
+ 0.825,
644
+ 0.236
645
+ ],
646
+ "angle": 0,
647
+ "content": "<table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>D</td><td>:</td><td>=, :, (), -</td><td>:</td><td>=, :, =, , ( )</td><td>=, -, 9</td><td>9</td><td>:=, =, [, 11</td><td>:=, :, 8</td><td>:=, [, 9</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 4</td><td>20</td><td>12</td></tr><tr><td>k</td><td>2</td><td>8</td><td>6</td><td>8</td><td>9</td><td>9</td><td>11</td><td>8</td><td>25</td><td>9</td><td>12</td><td>12</td><td>12</td><td>4</td><td>20</td><td>12</td></tr><tr><td>J_T</td><td>0.7</td><td>0.7</td><td>0.6</td><td>0.9</td><td>0.6</td><td>0.6</td><td>0.4</td><td>0.6</td><td>0.33</td><td>0.7</td><td>0.7</td><td>0.7</td><td>0.5</td><td>0.7</td><td>0.7</td><td>0.7</td></tr><tr><td>θ</td><td>2.0</td><td>1.7</td><td>2.1</td><td>2.2</td><td>5.5</td><td>1.2</td><td>4.1</td><td>1.1</td><td>0.09</td><td>3.5</td><td>0</td><td>0</td><td>0.1</td><td>0.2</td><td>2.3</td><td>4.7</td></tr></table>"
648
+ },
649
+ {
650
+ "type": "table_caption",
651
+ "bbox": [
652
+ 0.171,
653
+ 0.252,
654
+ 0.825,
655
+ 0.294
656
+ ],
657
+ "angle": 0,
658
+ "content": "Table 2: Transposed hyperparameters setting of Loghub datasets. \\(D\\) is the tokens for word-splitting, \\(k\\) denotes the number of the \\(S\\) in each bucket, \\(J_{T}\\) is the Jaccard similarity threshold for within-bucket merging, and \\(\\theta\\) is the entropy threshold for token identification."
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.171,
664
+ 0.321,
665
+ 0.825,
666
+ 0.363
667
+ ],
668
+ "angle": 0,
669
+ "content": "For each unique position, we calculate information entropy inline using \\( H(X) = -\\sum P(x_i)\\log_2P(x_i) \\), where \\( P(x_{i}) \\) is the probability of the token at that position, and compile a list of these tokens."
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.17,
675
+ 0.37,
676
+ 0.825,
677
+ 0.426
678
+ ],
679
+ "angle": 0,
680
+ "content": "To classify a position as a variable, we apply the inline decision: a position is variable if \\( H > \\theta \\), otherwise fixed, where \\( H \\) represents information entropy and \\( \\theta \\) is a set threshold depending on data characteristics and iterative experiments. When \\( H \\) surpasses \\( \\theta \\), we substitute all tokens at that position with the variable marker \\( < * > \\)."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.171,
686
+ 0.433,
687
+ 0.826,
688
+ 0.49
689
+ ],
690
+ "angle": 0,
691
+ "content": "Additionally, we mark digit-dominated tokens as variables \\( < * > \\) and use NLP to identify fixed-value tokens (e.g., hash values), enhancing variable identification accuracy. This method effectively identifies variables and fixed parts in logs, adapts to different types of logs, and accurately identifies changing parts while preserving the log structure."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.172,
697
+ 0.506,
698
+ 0.542,
699
+ 0.52
700
+ ],
701
+ "angle": 0,
702
+ "content": "2.3 THREE-S-HOP CHAIN-OF-THOUGHT MERGING"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.17,
708
+ 0.532,
709
+ 0.825,
710
+ 0.616
711
+ ],
712
+ "angle": 0,
713
+ "content": "In advanced log message analysis, after categorizing messages into distinct clusters and extracting unique log templates, we encounter a significant challenge: reconciling log templates that are semantically similar but differ in length. Despite structural variations, these templates often represent identical or closely related events. This challenge primarily arises from the limitations of length-based classification methods, which frequently fail to recognize that templates with equivalent semantics but different expressions may correspond to the same event type."
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.17,
719
+ 0.622,
720
+ 0.825,
721
+ 0.706
722
+ ],
723
+ "angle": 0,
724
+ "content": "Illustrated in Figure 3, We propose Three-S-hop Chain-of-Thought Merging using Large Language Models (LLMs) for semantic parsing. This includes data processing and three dialogue rounds: Structure QA (examines structure and length differences), Semantic QA (probes meanings for semantic equivalences), and Solution QA (decides on merging based on prior analyses). This approach merges templates of different lengths but similar meanings through structural and semantic analysis, enhancing accurate identification."
725
+ },
726
+ {
727
+ "type": "title",
728
+ "bbox": [
729
+ 0.172,
730
+ 0.726,
731
+ 0.319,
732
+ 0.74
733
+ ],
734
+ "angle": 0,
735
+ "content": "3 EXPERIMENT"
736
+ },
737
+ {
738
+ "type": "title",
739
+ "bbox": [
740
+ 0.172,
741
+ 0.758,
742
+ 0.367,
743
+ 0.771
744
+ ],
745
+ "angle": 0,
746
+ "content": "3.1 IMPLEMENT DETAILS"
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.171,
752
+ 0.784,
753
+ 0.825,
754
+ 0.812
755
+ ],
756
+ "angle": 0,
757
+ "content": "Datasets Experiments are conducted on the most widely-used benchmark datasets published in LogHub (He et al., 2020). More details are available in Table 1."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.17,
763
+ 0.827,
764
+ 0.826,
765
+ 0.926
766
+ ],
767
+ "angle": 0,
768
+ "content": "Implementation and Configuration We implement LEMUR based on Python 3.10, Apple M3 chip with 8-core CPU, 10-core GPU 16GB RAM, and macOS Sonoma(14.2.1). In experiments, \\( D \\) is the token set for word-splitting, \\( k \\) denotes the number of the \\( S \\) in each bucket, the Jaccard similarity threshold for within-bucket merging \\( J_{T} \\) and the entropy threshold for token identification \\( \\theta \\) are shown in Table 2. We use GPT-4 as the base to implement the three-S-hop chain of thought merging, and LEMUR is suitable for other LLMs. LEMUR uses these models to conduct inference just by locally loading the weights of these models or utilizing the APIs provided by these LLMs."
769
+ },
770
+ {
771
+ "type": "page_number",
772
+ "bbox": [
773
+ 0.494,
774
+ 0.949,
775
+ 0.505,
776
+ 0.96
777
+ ],
778
+ "angle": 0,
779
+ "content": "5"
780
+ }
781
+ ],
782
+ [
783
+ {
784
+ "type": "header",
785
+ "bbox": [
786
+ 0.174,
787
+ 0.033,
788
+ 0.48,
789
+ 0.049
790
+ ],
791
+ "angle": 0,
792
+ "content": "Published as a conference paper at ICLR 2025"
793
+ },
794
+ {
795
+ "type": "table",
796
+ "bbox": [
797
+ 0.177,
798
+ 0.102,
799
+ 0.825,
800
+ 0.289
801
+ ],
802
+ "angle": 0,
803
+ "content": "<table><tr><td rowspan=\"2\">Dataset</td><td colspan=\"2\">Drain</td><td colspan=\"2\">Spell</td><td colspan=\"2\">IPLOM</td><td colspan=\"2\">ULP</td><td colspan=\"2\">Brain</td><td colspan=\"2\">LogPPT</td><td colspan=\"2\">LLMParser</td><td colspan=\"2\">LEMUR</td></tr><tr><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td></tr><tr><td>HDFS</td><td>0.999</td><td>0.998</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.998</td><td>0.999</td><td>0.998</td><td>0.957</td><td>0.845</td><td>0.965</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Hadoop</td><td>0.999</td><td>0.948</td><td>0.920</td><td>0.777</td><td>0.996</td><td>0.954</td><td>0.999</td><td>0.950</td><td>0.999</td><td>0.949</td><td>0.999</td><td>0.977</td><td>0.958</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Spark</td><td>0.992</td><td>0.920</td><td>0.991</td><td>0.905</td><td>0.992</td><td>0.920</td><td>0.995</td><td>0.922</td><td>0.999</td><td>0.998</td><td>0.997</td><td>0.848</td><td>0.906</td><td>0.995</td><td>1</td><td>1</td></tr><tr><td>Zookeeper</td><td>0.999</td><td>0.967</td><td>0.999</td><td>0.964</td><td>0.999</td><td>0.993</td><td>0.999</td><td>0.988</td><td>0.999</td><td>0.985</td><td>1</td><td>1</td><td>0.967</td><td>1</td><td>1</td><td>1</td></tr><tr><td>BGL</td><td>0.999</td><td>0.963</td><td>0.957</td><td>0.786</td><td>0.999</td><td>0.939</td><td>0.999</td><td>0.930</td><td>0.999</td><td>0.986</td><td>0.968</td><td>0.455</td><td>0.864</td><td>0.892</td><td>1</td><td>1</td></tr><tr><td>HPC</td><td>0.991</td><td>0.887</td><td>0.986</td><td>0.654</td><td>0.978</td><td>0.829</td><td>0.994</td><td>0.951</td><td>0.998</td><td>0.945</td><td>0.999</td><td>0.941</td><td>0.910</td><td>0.872</td><td>1</td><td>1</td></tr><tr><td>Thunderbird</td><td>0.999</td><td>0.955</td><td>0.994</td><td>0.844</td><td>0.999</td><td>0.663</td><td>0.999</td><td>0.675</td><td>0.999</td><td>0.971</td><td>0.714</td><td>0.262</td><td>0.799</td><td>0.813</td><td>0.999</td><td>0.982</td></tr><tr><td>Windows</td><td>0.999</td><td>0.997</td><td>0.999</td><td>0.989</td><td>0.995</td><td>0.567</td><td>0.989</td><td>0.410</td><td>0.999</td><td>0.997</td><td>0.992</td><td>0.717</td><td>0.984</td><td>0.783</td><td>1</td><td>1</td></tr><tr><td>Linux</td><td>0.992</td><td>0.690</td><td>0.937</td><td>0.605</td><td>0.964</td><td>0.671</td><td>0.476</td><td>0.363</td><td>0.999</td><td>0.996</td><td>0.713</td><td>0.177</td><td>0.921</td><td>0.961</td><td>0.999</td><td>0.988</td></tr><tr><td>Android</td><td>0.996</td><td>0.911</td><td>0.992</td><td>0.919</td><td>0.949</td><td>0.712</td><td>0.971</td><td>0.838</td><td>0.997</td><td>0.961</td><td>0.989</td><td>0.862</td><td>0.990</td><td>0.873</td><td>0.999</td><td>0.995</td></tr><tr><td>HealthApp</td><td>0.918</td><td>0.780</td><td>0.887</td><td>0.639</td><td>0.958</td><td>0.822</td><td>0.993</td><td>0.901</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.978</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Apache</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.582</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Proxifier</td><td>0.785</td><td>0.526</td><td>0.832</td><td>0.526</td><td>0.786</td><td>0.516</td><td>0.940</td><td>0.024</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>OpenSSH</td><td>0.999</td><td>0.787</td><td>0.918</td><td>0.554</td><td>0.998</td><td>0.540</td><td>0.940</td><td>0.434</td><td>1</td><td>1</td><td>0.983</td><td>0.436</td><td>0.836</td><td>0.697</td><td>1</td><td>1</td></tr><tr><td>OpenStack</td><td>0.993</td><td>0.733</td><td>0.994</td><td>0.764</td><td>0.909</td><td>0.331</td><td>0.834</td><td>0.491</td><td>1</td><td>1</td><td>0.997</td><td>0.492</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Mac</td><td>0.975</td><td>0.786</td><td>0.963</td><td>0.756</td><td>0.957</td><td>0.670</td><td>0.981</td><td>0.814</td><td>0.996</td><td>0.942</td><td>0.720</td><td>0.761</td><td>0.830</td><td>0.871</td><td>0.984</td><td>0.977</td></tr><tr><td>Average</td><td>0.977</td><td>0.865</td><td>0.961</td><td>0.793</td><td>0.968</td><td>0.756</td><td>0.932</td><td>0.733</td><td>0.999</td><td>0.983</td><td>0.939</td><td>0.710</td><td>0.931</td><td>0.913</td><td>0.999</td><td>0.996</td></tr></table>"
804
+ },
805
+ {
806
+ "type": "table_caption",
807
+ "bbox": [
808
+ 0.356,
809
+ 0.304,
810
+ 0.642,
811
+ 0.32
812
+ ],
813
+ "angle": 0,
814
+ "content": "Table 3: FGA and GA on LogHub Dataset."
815
+ },
816
+ {
817
+ "type": "image",
818
+ "bbox": [
819
+ 0.18,
820
+ 0.338,
821
+ 0.465,
822
+ 0.499
823
+ ],
824
+ "angle": 0,
825
+ "content": null
826
+ },
827
+ {
828
+ "type": "image_caption",
829
+ "bbox": [
830
+ 0.173,
831
+ 0.509,
832
+ 0.47,
833
+ 0.535
834
+ ],
835
+ "angle": 0,
836
+ "content": "(a) Boxplot of group accuracy on 16 benchmark datasets."
837
+ },
838
+ {
839
+ "type": "image",
840
+ "bbox": [
841
+ 0.536,
842
+ 0.339,
843
+ 0.821,
844
+ 0.499
845
+ ],
846
+ "angle": 0,
847
+ "content": null
848
+ },
849
+ {
850
+ "type": "image_caption",
851
+ "bbox": [
852
+ 0.528,
853
+ 0.509,
854
+ 0.825,
855
+ 0.535
856
+ ],
857
+ "angle": 0,
858
+ "content": "(b) Boxplot of F1 score of group accuracy on 16 benchmark datasets."
859
+ },
860
+ {
861
+ "type": "image_caption",
862
+ "bbox": [
863
+ 0.23,
864
+ 0.547,
865
+ 0.766,
866
+ 0.564
867
+ ],
868
+ "angle": 0,
869
+ "content": "Figure 4: Comparison of group accuracy and F1 score on 16 benchmark datasets."
870
+ },
871
+ {
872
+ "type": "title",
873
+ "bbox": [
874
+ 0.173,
875
+ 0.577,
876
+ 0.4,
877
+ 0.591
878
+ ],
879
+ "angle": 0,
880
+ "content": "3.2 BASELINES AND METRICS"
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.171,
886
+ 0.602,
887
+ 0.825,
888
+ 0.645
889
+ ],
890
+ "angle": 0,
891
+ "content": "Baselines As for baselines, we choose Drain (He et al., 2017), Spell (Du & Li, 2016), IPLOM (Makanju et al., 2009), ULP (Sedki et al., 2022), Brain (Yu et al., 2023), LogPPT (Le & Zhang, 2023) and LLMParser (Jiang et al., 2023) as our baselines."
892
+ },
893
+ {
894
+ "type": "text",
895
+ "bbox": [
896
+ 0.171,
897
+ 0.66,
898
+ 0.825,
899
+ 0.754
900
+ ],
901
+ "angle": 0,
902
+ "content": "F1 score of Grouping Accuracy (FGA) FGA is a template-level metric that focuses on the ratio of correctly grouped templates. Specifically, let \\( N_{g} \\) be the actual correct number of templates in the ground truth, and \\( N_{p} \\) be the number of templates that are generated by a log parser. If \\( N_{c} \\) is the number of templates that are correctly parsed by the log parser, then we can compute the Precision of Grouping Accuracy (PGA) as \\( \\frac{N_{c}}{N_{p}} \\) and the Recall of Grouping Accuracy (RGA) as \\( \\frac{N_{c}}{N_{g}} \\). The FGA is equal to their harmonic mean, \\( ie \\frac{2 \\times GPA \\times RGA}{PGA + RGA} \\)."
903
+ },
904
+ {
905
+ "type": "text",
906
+ "bbox": [
907
+ 0.171,
908
+ 0.767,
909
+ 0.825,
910
+ 0.81
911
+ ],
912
+ "angle": 0,
913
+ "content": "Grouping Accuracy (GA) GA is computed as the ratio of correctly grouped log messages to the total count of log messages. A log message is considered to be correctly grouped if and only if its template aligns with the same set of log messages as that of the ground truth."
914
+ },
915
+ {
916
+ "type": "text",
917
+ "bbox": [
918
+ 0.171,
919
+ 0.825,
920
+ 0.825,
921
+ 0.854
922
+ ],
923
+ "angle": 0,
924
+ "content": "Execution time. We measure the execution time in seconds and compare LEMUR with other parsers in terms of efficiency."
925
+ },
926
+ {
927
+ "type": "title",
928
+ "bbox": [
929
+ 0.173,
930
+ 0.87,
931
+ 0.325,
932
+ 0.884
933
+ ],
934
+ "angle": 0,
935
+ "content": "3.3 MAIN RESULTS"
936
+ },
937
+ {
938
+ "type": "text",
939
+ "bbox": [
940
+ 0.171,
941
+ 0.896,
942
+ 0.825,
943
+ 0.926
944
+ ],
945
+ "angle": 0,
946
+ "content": "In Table 3, LEMUR, an unsupervised LLM-related model, exhibits remarkable effectiveness and performance in the field of log parsing. When compared to Brain, the current state-of-the-art in"
947
+ },
948
+ {
949
+ "type": "page_number",
950
+ "bbox": [
951
+ 0.494,
952
+ 0.949,
953
+ 0.506,
954
+ 0.96
955
+ ],
956
+ "angle": 0,
957
+ "content": "6"
958
+ }
959
+ ],
960
+ [
961
+ {
962
+ "type": "header",
963
+ "bbox": [
964
+ 0.174,
965
+ 0.033,
966
+ 0.48,
967
+ 0.049
968
+ ],
969
+ "angle": 0,
970
+ "content": "Published as a conference paper at ICLR 2025"
971
+ },
972
+ {
973
+ "type": "image",
974
+ "bbox": [
975
+ 0.182,
976
+ 0.106,
977
+ 0.82,
978
+ 0.263
979
+ ],
980
+ "angle": 0,
981
+ "content": null
982
+ },
983
+ {
984
+ "type": "image_caption",
985
+ "bbox": [
986
+ 0.347,
987
+ 0.281,
988
+ 0.651,
989
+ 0.296
990
+ ],
991
+ "angle": 0,
992
+ "content": "Figure 5: Execution time for various datasets."
993
+ },
994
+ {
995
+ "type": "image",
996
+ "bbox": [
997
+ 0.185,
998
+ 0.307,
999
+ 0.338,
1000
+ 0.45
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": null
1004
+ },
1005
+ {
1006
+ "type": "image",
1007
+ "bbox": [
1008
+ 0.343,
1009
+ 0.307,
1010
+ 0.496,
1011
+ 0.45
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": null
1015
+ },
1016
+ {
1017
+ "type": "image",
1018
+ "bbox": [
1019
+ 0.502,
1020
+ 0.306,
1021
+ 0.655,
1022
+ 0.45
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": null
1026
+ },
1027
+ {
1028
+ "type": "image",
1029
+ "bbox": [
1030
+ 0.661,
1031
+ 0.307,
1032
+ 0.813,
1033
+ 0.45
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": null
1037
+ },
1038
+ {
1039
+ "type": "table_caption",
1040
+ "bbox": [
1041
+ 0.333,
1042
+ 0.47,
1043
+ 0.665,
1044
+ 0.484
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "Figure 6: Execution time for various dataset sizes."
1048
+ },
1049
+ {
1050
+ "type": "table",
1051
+ "bbox": [
1052
+ 0.339,
1053
+ 0.487,
1054
+ 0.661,
1055
+ 0.551
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "<table><tr><td>Method</td><td>Avg. FGA</td><td>Avg. GA</td></tr><tr><td>Random sampling</td><td>0.843</td><td>0.718</td></tr><tr><td>First-token sampling</td><td>0.913</td><td>0.804</td></tr><tr><td>Entropy sampling</td><td>0.968</td><td>0.851</td></tr><tr><td>Entropy + First-token sampling (Ours)</td><td>0.999</td><td>0.996</td></tr></table>"
1059
+ },
1060
+ {
1061
+ "type": "table_caption",
1062
+ "bbox": [
1063
+ 0.322,
1064
+ 0.567,
1065
+ 0.674,
1066
+ 0.581
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "Table 4: Performance on different sampling methods."
1070
+ },
1071
+ {
1072
+ "type": "table",
1073
+ "bbox": [
1074
+ 0.177,
1075
+ 0.584,
1076
+ 0.825,
1077
+ 0.624
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "<table><tr><td>Method</td><td>Metric</td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td><td>Average</td></tr><tr><td>LEMUR w/o CoT</td><td>FGA</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.992</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>0.998</td></tr><tr><td>LEMUR w/o CoT</td><td>GA</td><td>0.998</td><td>0.977</td><td>1</td><td>0.995</td><td>0.989</td><td>0.996</td><td>0.959</td><td>1</td><td>0.986</td><td>0.956</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.956</td><td>0.988</td></tr><tr><td>LEMUR</td><td>FGA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.984</td><td>0.999</td></tr><tr><td>LEMUR</td><td>GA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>1</td><td>0.988</td><td>0.995</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.977</td><td>0.996</td></tr></table>"
1081
+ },
1082
+ {
1083
+ "type": "table_caption",
1084
+ "bbox": [
1085
+ 0.252,
1086
+ 0.64,
1087
+ 0.746,
1088
+ 0.654
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "Table 5: FGA and GA on LogHub between LEMUR with and without CoT."
1092
+ },
1093
+ {
1094
+ "type": "table",
1095
+ "bbox": [
1096
+ 0.339,
1097
+ 0.657,
1098
+ 0.661,
1099
+ 0.718
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "<table><tr><td>Configuration</td><td>Avg.FGA</td><td>Avg.GA</td></tr><tr><td>Default (Our Method)</td><td>0.999</td><td>0.996</td></tr><tr><td>KMeans Clustering (Sinaga &amp; Yang, 2020)</td><td>0.685</td><td>0.698</td></tr><tr><td>DBSCAN Clustering (Khan et al., 2014)</td><td>0.731</td><td>0.744</td></tr><tr><td>Our Method w/o Bucket Grouping</td><td>0.355</td><td>0.394</td></tr></table>"
1103
+ },
1104
+ {
1105
+ "type": "table_caption",
1106
+ "bbox": [
1107
+ 0.338,
1108
+ 0.734,
1109
+ 0.66,
1110
+ 0.748
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "Table 6: Additional Ablation Studies on LEMUR"
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "bbox": [
1118
+ 0.171,
1119
+ 0.764,
1120
+ 0.827,
1121
+ 0.849
1122
+ ],
1123
+ "angle": 0,
1124
+ "content": "unsupervised models, LEMUR demonstrates superior or comparable results across various datasets, underscoring its efficacy in unsupervised learning without the need for annotated data. Moreover, even when juxtaposed with supervised models like LogPPT and LLVMParser, LEMUR shows equal or better performance in most datasets, based on FGA and GA metrics. This is particularly noteworthy as it highlights the capability of LEMUR to match or exceed the performance levels of supervised models, despite the absence of explicit label guidance."
1125
+ },
1126
+ {
1127
+ "type": "text",
1128
+ "bbox": [
1129
+ 0.171,
1130
+ 0.855,
1131
+ 0.828,
1132
+ 0.926
1133
+ ],
1134
+ "angle": 0,
1135
+ "content": "In Figure 4, LEMUR shows the robust performance in GA and FGA. The consistency and robustness of LEMUR are evident in its high FGA and GA across diverse datasets such as HDFS, Hadoop, Spark, etc. This consistency emphasizes its adaptability and robustness to various log parsing requirements. In conclusion, as an unsupervised LLM-related model, LEMUR significantly stands out in the log parsing domain."
1136
+ },
1137
+ {
1138
+ "type": "page_number",
1139
+ "bbox": [
1140
+ 0.494,
1141
+ 0.949,
1142
+ 0.506,
1143
+ 0.96
1144
+ ],
1145
+ "angle": 0,
1146
+ "content": "7"
1147
+ }
1148
+ ],
1149
+ [
1150
+ {
1151
+ "type": "header",
1152
+ "bbox": [
1153
+ 0.173,
1154
+ 0.033,
1155
+ 0.48,
1156
+ 0.048
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "Published as a conference paper at ICLR 2025"
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "bbox": [
1164
+ 0.171,
1165
+ 0.104,
1166
+ 0.825,
1167
+ 0.258
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "Figure 5 presents a comprehensive analysis of the execution times for multiple algorithms: LEMUR, Brain, Drain, IPLOM, Spell, and ULP, across various datasets, comprising 2K data points each. This figure provides critical insights into the efficiency and scalability of these algorithms in processing data. Notably, the execution time of LEMUR stands out for its relative brevity across the majority of the datasets. LEMUR demonstrates a significantly reduced execution time on several key datasets, including Hadoop, BGL, Windows, Android, and Mac, highlighting its efficiency and optimized processing capabilities. Furthermore, we extend this analysis to evaluate the performance scalability of these algorithms across four distinct datasets: BGL, HDFS, Android, and Spark. This extension, as depicted in Figure 6, encompasses varying dataset sizes, thereby providing a more nuanced understanding of each algorithm's adaptability and performance under different data volume conditions in real-world scenarios where dataset sizes can vary significantly."
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "bbox": [
1175
+ 0.171,
1176
+ 0.265,
1177
+ 0.827,
1178
+ 0.39
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "Compared to all unsupervised methods, we have achieved more superior performance in the preliminary stage before employing LLM. Furthermore, compared to other LLM-based methods, which are mostly supervised, such as LogPPT and LLVMParser, which are essentially inference after fine-tuning, the training process consumes more GPU resources for local deployment or involves larger data transmission over the network for closed-source ChatGPT. LEMUR achieves better results without fine-tuning the LLM. Whether for local deployment inference or remote API, it only utilizes GPU resources for inference, not for fine-tuning. Thus, it requires fewer GPU resources or less network latency in data transmission. In summary, LEMUR has achieved superior performance and unique advantages for unsupervised log parsing or supervised fine-tuning of LLM."
1182
+ },
1183
+ {
1184
+ "type": "title",
1185
+ "bbox": [
1186
+ 0.172,
1187
+ 0.41,
1188
+ 0.295,
1189
+ 0.423
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "4 ABLATION"
1193
+ },
1194
+ {
1195
+ "type": "title",
1196
+ "bbox": [
1197
+ 0.172,
1198
+ 0.439,
1199
+ 0.442,
1200
+ 0.453
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "4.1 EFFECT OF ENTROPY SAMPLING"
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "bbox": [
1208
+ 0.171,
1209
+ 0.465,
1210
+ 0.825,
1211
+ 0.59
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "In our comparative analysis, as delineated in Table 4, we meticulously evaluate four distinct sampling methodologies: Random sampling, First-token sampling, Entropy sampling, and a hybrid approach combining Entropy and First-token sampling. Our results, derived from a systematic and empirical evaluation, reveal that the hybrid Entropy + First-token sampling method exhibits superior performance over the other techniques under consideration. The enhanced effectiveness of this method is likely attributable to its synergistic integration of the entropy-based selection criterion, which effectively captures the variability within the data, and the first-token approach, which ensures the representativeness of the initial elements. This fusion of strategies enables a more nuanced and effective sampling process, as evidenced by our empirical findings."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.171,
1220
+ 0.598,
1221
+ 0.825,
1222
+ 0.695
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "As illustrated in Table 5, which provides a detailed comparison between two versions of LEMUR: one implementing the three-hop Chain-of-Thought approach and the other without it. The left columns of the table present the performance metrics of LEMUR devoid of Chain-of-Thought (CoT), while the right columns display the outcomes following the integration of the CoT methodology. The data delineates a useful, albeit modest, improvement in the performance of LEMUR when augmented with the CoT approach. This enhancement is particularly evident in the FGA and GA metrics across a range of datasets, including Hadoop, BGL, HPC, Thunderbird, Linux, Android, and Mac."
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "bbox": [
1230
+ 0.171,
1231
+ 0.701,
1232
+ 0.825,
1233
+ 0.799
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "The observed limited degree of enhancement can be ascribed to two primary factors. The first is the already high baseline performance of LEMUR without the incorporation of CoT, which inherently constrains the potential for significant further improvements. The second factor pertains to the relative simplicity of the loghub datasets. In these scenarios, the application of a sophisticated methodology like CoT results in only marginal improvements, primarily because the datasets do not present sufficient complexity to fully exploit and showcase the enhanced capabilities of the CoT-augmented LEMUR."
1237
+ },
1238
+ {
1239
+ "type": "title",
1240
+ "bbox": [
1241
+ 0.172,
1242
+ 0.815,
1243
+ 0.53,
1244
+ 0.829
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "4.2 ADDITIONAL ABLATION STUDIES ON LEMUR"
1248
+ },
1249
+ {
1250
+ "type": "text",
1251
+ "bbox": [
1252
+ 0.171,
1253
+ 0.841,
1254
+ 0.825,
1255
+ 0.925
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "To further validate the robustness of LEMUR, we conducted ablation studies focusing on the impact of different clustering algorithms and the effectiveness of the bucket grouping strategy. Specifically, we experimented with replacing our clustering mechanism with KMeans (Sinaga & Yang, 2020) and DBSCAN (Khan et al., 2014) and evaluated the performance when disabling the bucket grouping based on the length assumption. These modifications aim to probe the sensitivity of LEMUR to these components."
1259
+ },
1260
+ {
1261
+ "type": "page_number",
1262
+ "bbox": [
1263
+ 0.494,
1264
+ 0.949,
1265
+ 0.505,
1266
+ 0.96
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "8"
1270
+ }
1271
+ ],
1272
+ [
1273
+ {
1274
+ "type": "header",
1275
+ "bbox": [
1276
+ 0.173,
1277
+ 0.033,
1278
+ 0.48,
1279
+ 0.049
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "Published as a conference paper at ICLR 2025"
1283
+ },
1284
+ {
1285
+ "type": "text",
1286
+ "bbox": [
1287
+ 0.171,
1288
+ 0.104,
1289
+ 0.825,
1290
+ 0.177
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "The results, as illustrated in Table 6, highlight the critical role of the selected clustering algorithm and bucket grouping strategy in LEMUR's performance. The substantial decrease in Avg.FGA and Avg.GA when employing alternative clustering methods or omitting the bucket grouping underscores their importance in achieving optimal outcomes. This ablation study confirms the necessity of careful component selection and validates the robustness of LEMUR's methodology."
1294
+ },
1295
+ {
1296
+ "type": "title",
1297
+ "bbox": [
1298
+ 0.172,
1299
+ 0.202,
1300
+ 0.347,
1301
+ 0.218
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "5 RELATED WORK"
1305
+ },
1306
+ {
1307
+ "type": "title",
1308
+ "bbox": [
1309
+ 0.172,
1310
+ 0.238,
1311
+ 0.307,
1312
+ 0.251
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "5.1 LOG Parser"
1316
+ },
1317
+ {
1318
+ "type": "text",
1319
+ "bbox": [
1320
+ 0.171,
1321
+ 0.267,
1322
+ 0.827,
1323
+ 0.49
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "In the evolving field of automatic log analysis, crucial for distributed systems and cloud computing, significant progress has been made in log parsing techniques, categorized into frequent pattern mining, clustering, and heuristics rules. Frequent pattern mining is exemplified by SLCT (Vaarandi, 2003) which groups logs based on token frequency, and LogCluster (Vaarandi & Pihelgas, 2015) which removes positional constraints in log grouping. Clustering approaches include LogMine (Hamooni et al., 2016) with its multi-layered clustering system, LKE(Fu et al., 2009) using edit distance and position weighing, SHISO (Mizutani, 2013) improving efficiency through hierarchical clustering, LenMa (Shima, 2016) employing token length vectors, and LPV (Xiao et al., 2020) which uses semantic vectors from word2vec. In the heuristics rules category, IPLOM (Makanju et al., 2009) partitions logs by length and token position, Spell (Du & Li, 2016) approaches parsing as the longest common sequential problem, Drain (He et al., 2017) groups logs by length and prefixes for template updates, and Prefix-Graph (Chu et al., 2021) merges prefix trees into graphs for template generation. Recent advancements have introduced deep learning-based algorithms like Nulog (Nedelkoski et al., 2021) Uniparser (Liu et al., 2022), and LogAP (Rand & Miranskyy, 2021) utilizing comparative learning and machine translation for parsing. However, these deep learning methods face challenges in efficiency and high operational costs due to GPU requirements."
1327
+ },
1328
+ {
1329
+ "type": "title",
1330
+ "bbox": [
1331
+ 0.172,
1332
+ 0.513,
1333
+ 0.407,
1334
+ 0.526
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "5.2 LARGE LANGUAGE MODEL"
1338
+ },
1339
+ {
1340
+ "type": "text",
1341
+ "bbox": [
1342
+ 0.171,
1343
+ 0.542,
1344
+ 0.827,
1345
+ 0.765
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "Language modeling using self-supervised learning and large-scale data, significantly enhances various natural language processing tasks. Specifically, pre-training a Transformer decoder (OpenAI, 2023; Vaswani et al., 2017; Yang et al., 2024a; 2025; 2024b; Ouyang et al., 2022a; Wei et al., 2022a; Yang et al., 2023; Nguyen, 2023; Liu et al., 2024; Chai et al., 2024; Zhang et al., 2024b; a; 2025) aids in unconditional text generation. Performance improvements (BertSum, 2023; Yang et al., 2020b; 2022a;b; 2021b;a; 2020a) in diverse tasks have been linked to the enlargement of Pre-training Language Models (PLMs) by increasing model or data size. This has led to the creation of increasingly larger PLMs, such as GPT-3 with 175 billion parameters and PaLM with 540 billion (Anil et al., 2023), guided by the scaling laws of large language models (Aghajanyan et al., 2023). Despite their similar architectures and pre-training tasks, larger PLMs, such as GPT-4 (OpenAI, 2023), exhibit unique behaviors and emergent abilities, excelling in complex tasks. A prime example is ChatGPT, adapting GPT-series LLMs for engaging dialogues, and showcasing advanced conversational skills. Fine-tuning LLMs on various datasets (Wei et al., 2022b) yields promising results, using human or LLM-created prompts for instruction tuning and refining generations. Chain-of-thought prompting (Wei et al., 2022b), where models explain their reasoning for complex problems, and RLHF (Ouyang et al., 2022b), a strategy significantly enhance their performance."
1349
+ },
1350
+ {
1351
+ "type": "title",
1352
+ "bbox": [
1353
+ 0.172,
1354
+ 0.792,
1355
+ 0.32,
1356
+ 0.807
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "6 CONCLUSION"
1360
+ },
1361
+ {
1362
+ "type": "text",
1363
+ "bbox": [
1364
+ 0.171,
1365
+ 0.827,
1366
+ 0.825,
1367
+ 0.926
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "To enhance log analysis in complex software systems, we propose LEMUR. This framework replaces traditional rule-based methods with an information entropy-based sampling for efficient log clustering and employs large language models (LLMs) for advanced semantic comprehension in template merging. Information entropy streamlines the process of distinguishing between disparate log messages based on their inherent informational content for efficient log clustering. LEMUR has demonstrated superior performance and efficiency in log parsing, validated through extensive tests on large-scale datasets."
1371
+ },
1372
+ {
1373
+ "type": "page_number",
1374
+ "bbox": [
1375
+ 0.494,
1376
+ 0.949,
1377
+ 0.506,
1378
+ 0.96
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "9"
1382
+ }
1383
+ ],
1384
+ [
1385
+ {
1386
+ "type": "header",
1387
+ "bbox": [
1388
+ 0.173,
1389
+ 0.033,
1390
+ 0.48,
1391
+ 0.049
1392
+ ],
1393
+ "angle": 0,
1394
+ "content": "Published as a conference paper at ICLR 2025"
1395
+ },
1396
+ {
1397
+ "type": "title",
1398
+ "bbox": [
1399
+ 0.174,
1400
+ 0.103,
1401
+ 0.289,
1402
+ 0.118
1403
+ ],
1404
+ "angle": 0,
1405
+ "content": "REFERENCES"
1406
+ },
1407
+ {
1408
+ "type": "ref_text",
1409
+ "bbox": [
1410
+ 0.174,
1411
+ 0.126,
1412
+ 0.826,
1413
+ 0.21
1414
+ ],
1415
+ "angle": 0,
1416
+ "content": "Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 265-279. PMLR, 2023."
1417
+ },
1418
+ {
1419
+ "type": "ref_text",
1420
+ "bbox": [
1421
+ 0.174,
1422
+ 0.218,
1423
+ 0.825,
1424
+ 0.359
1425
+ ],
1426
+ "angle": 0,
1427
+ "content": "Rohan Anil, Andrew M. Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, Sebastian Ruder, Yi Tay, Kefan Xiao, Yuzhong Xu, Yujing Zhang, Gustavo Hernandez Abrego, Junwhan Ahn, Jacob Austin, Paul Barham, Jan A. Botha, James Bradbury, Siddhartha Brahma, Kevin Brooks, Michele Catasta, Yong Cheng, Colin Cherry, Christopher A. Choquette-Choo, Aakanksha Chowdhery, Clément Crepy, Shachi Dave, Mostafa Dehghani, Sunipa Dev, Jacob Devlin, Mark Diaz, Nan Du, Ethan Dyer, Vladimir Feinberg, Fangxiaoyu Feng, Vlad Fienber, Markus Freitag, Xavier Garcia, Sebastian Gehrmann, Lucas Gonzalez, and et al. Palm 2 technical report. CoRR, abs/2305.10403, 2023."
1428
+ },
1429
+ {
1430
+ "type": "ref_text",
1431
+ "bbox": [
1432
+ 0.173,
1433
+ 0.367,
1434
+ 0.749,
1435
+ 0.383
1436
+ ],
1437
+ "angle": 0,
1438
+ "content": "BertSum. source code, 2023. URL https://github.com/nlpyang/BertSum."
1439
+ },
1440
+ {
1441
+ "type": "ref_text",
1442
+ "bbox": [
1443
+ 0.173,
1444
+ 0.39,
1445
+ 0.825,
1446
+ 0.433
1447
+ ],
1448
+ "angle": 0,
1449
+ "content": "Linzheng Chai, Shukai Liu, Jian Yang, Yuwei Yin, Ke Jin, Jiaheng Liu, Tao Sun, Ge Zhang, Changyu Ren, Hongcheng Guo, et al. Mceval: Massively multilingual code evaluation. arXiv preprint arXiv:2406.07436, 2024."
1450
+ },
1451
+ {
1452
+ "type": "ref_text",
1453
+ "bbox": [
1454
+ 0.173,
1455
+ 0.44,
1456
+ 0.825,
1457
+ 0.484
1458
+ ],
1459
+ "angle": 0,
1460
+ "content": "Guojun Chu, Jingyu Wang, Qi Qi, Haifeng Sun, Shimin Tao, and Jianxin Liao. Prefix-graph: A versatile log parsing approach merging prefix tree with probabilistic graph. In 2021 IEEE 37th International Conference on Data Engineering (ICDE), pp. 2411-2422. IEEE, 2021."
1461
+ },
1462
+ {
1463
+ "type": "ref_text",
1464
+ "bbox": [
1465
+ 0.173,
1466
+ 0.491,
1467
+ 0.825,
1468
+ 0.521
1469
+ ],
1470
+ "angle": 0,
1471
+ "content": "Min Du and Feifei Li. Spell: Streaming parsing of system event logs. In 2016 IEEE 16th International Conference on Data Mining (ICDM), pp. 859-864. IEEE, 2016."
1472
+ },
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.173,
1477
+ 0.528,
1478
+ 0.825,
1479
+ 0.571
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar. Deeplog: Anomaly detection and diagnosis from system logs through deep learning. In Proceedings of the 2017 ACM SIGSAC conference on computer and communications security, pp. 1285-1298, 2017."
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.173,
1488
+ 0.578,
1489
+ 0.825,
1490
+ 0.622
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "Qiang Fu, Jian-Guang Lou, Yi Wang, and Jiang Li. Execution anomaly detection in distributed systems through unstructured log analysis. In 2009 ninth IEEE international conference on data mining, pp. 149-158. IEEE, 2009."
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.174,
1499
+ 0.629,
1500
+ 0.825,
1501
+ 0.686
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "Ying Fu, Meng Yan, Jian Xu, Jianguo Li, Zhongxin Liu, Xiaohong Zhang, and Dan Yang. Investigating and improving log parsing in practice. In Proceedings of the 30th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1566-1577, 2022."
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.173,
1510
+ 0.693,
1511
+ 0.783,
1512
+ 0.71
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "Robert M Gray. Entropy and information theory. Springer Science & Business Media, 2011."
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.174,
1521
+ 0.716,
1522
+ 0.825,
1523
+ 0.76
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "Hossein Hamooni, Biplob Debnath, Jianwu Xu, Hui Zhang, Guofei Jiang, and Abdullah Mueen. Logmine: Fast pattern recognition for log analytics. In Proceedings of the 25th ACM International on Conference on Information and Knowledge Management, pp. 1573-1582, 2016."
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.173,
1532
+ 0.766,
1533
+ 0.825,
1534
+ 0.81
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "Pinjia He, Jieming Zhu, Shilin He, Jian Li, and Michael R Lyu. An evaluation study on log parsing and its use in log mining. In 2016 46th annual IEEE/IFIP international conference on dependable systems and networks (DSN), pp. 654-661. IEEE, 2016."
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.173,
1543
+ 0.818,
1544
+ 0.825,
1545
+ 0.86
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu. Drain: An online log parsing approach with fixed depth tree. In 2017 IEEE international conference on web services (ICWS), pp. 33-40. IEEE, 2017."
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.173,
1554
+ 0.868,
1555
+ 0.825,
1556
+ 0.925
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "Shilin He, Qingwei Lin, Jian-Guang Lou, Hongyu Zhang, Michael R Lyu, and Dongmei Zhang. Identifying impactful service system problems via log analysis. In Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 60-70, 2018."
1560
+ },
1561
+ {
1562
+ "type": "list",
1563
+ "bbox": [
1564
+ 0.173,
1565
+ 0.126,
1566
+ 0.826,
1567
+ 0.925
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": null
1571
+ },
1572
+ {
1573
+ "type": "page_number",
1574
+ "bbox": [
1575
+ 0.491,
1576
+ 0.948,
1577
+ 0.511,
1578
+ 0.961
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "10"
1582
+ }
1583
+ ],
1584
+ [
1585
+ {
1586
+ "type": "header",
1587
+ "bbox": [
1588
+ 0.173,
1589
+ 0.033,
1590
+ 0.48,
1591
+ 0.049
1592
+ ],
1593
+ "angle": 0,
1594
+ "content": "Published as a conference paper at ICLR 2025"
1595
+ },
1596
+ {
1597
+ "type": "ref_text",
1598
+ "bbox": [
1599
+ 0.173,
1600
+ 0.103,
1601
+ 0.826,
1602
+ 0.134
1603
+ ],
1604
+ "angle": 0,
1605
+ "content": "Shilin He, Jieming Zhu, Pinjia He, and Michael R Lyu. Loghub: a large collection of system log datasets towards automated log analytics. arXiv preprint arXiv:2008.06448, 2020."
1606
+ },
1607
+ {
1608
+ "type": "ref_text",
1609
+ "bbox": [
1610
+ 0.173,
1611
+ 0.139,
1612
+ 0.826,
1613
+ 0.183
1614
+ ],
1615
+ "angle": 0,
1616
+ "content": "Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R Lyu. A survey on automated log analysis for reliability engineering. ACM computing surveys (CSUR), 54(6):1-37, 2021."
1617
+ },
1618
+ {
1619
+ "type": "ref_text",
1620
+ "bbox": [
1621
+ 0.173,
1622
+ 0.19,
1623
+ 0.826,
1624
+ 0.234
1625
+ ],
1626
+ "angle": 0,
1627
+ "content": "Zhihan Jiang, Jinyang Liu, Zhuangbin Chen, Yichen Li, Junjie Huang, Yintong Huo, Pinjia He, Jiazhen Gu, and Michael R Lyu. Llmparser: A llm-based log parsing framework. arXiv preprint arXiv:2310.01796, 2023."
1628
+ },
1629
+ {
1630
+ "type": "ref_text",
1631
+ "bbox": [
1632
+ 0.173,
1633
+ 0.24,
1634
+ 0.826,
1635
+ 0.271
1636
+ ],
1637
+ "angle": 0,
1638
+ "content": "Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Xing Wang, and Zhaopeng Tu. Is chatgpt a good translator? a preliminary study. arXiv preprint arXiv:2301.08745, 2023."
1639
+ },
1640
+ {
1641
+ "type": "ref_text",
1642
+ "bbox": [
1643
+ 0.173,
1644
+ 0.277,
1645
+ 0.826,
1646
+ 0.321
1647
+ ],
1648
+ "angle": 0,
1649
+ "content": "Kamran Khan, Saif Ur Rehman, Kamran Aziz, Simon Fong, and Sababady Sarasvady. Dbscan: Past, present and future. In The fifth international conference on the applications of digital information and web technologies (ICADIWT 2014), pp. 232-238. IEEE, 2014."
1650
+ },
1651
+ {
1652
+ "type": "ref_text",
1653
+ "bbox": [
1654
+ 0.173,
1655
+ 0.328,
1656
+ 0.826,
1657
+ 0.357
1658
+ ],
1659
+ "angle": 0,
1660
+ "content": "Van-Hoang Le and Hongyu Zhang. Log parsing with prompt-based few-shot learning. arXiv preprint arXiv:2302.07435, 2023."
1661
+ },
1662
+ {
1663
+ "type": "ref_text",
1664
+ "bbox": [
1665
+ 0.173,
1666
+ 0.364,
1667
+ 0.826,
1668
+ 0.407
1669
+ ],
1670
+ "angle": 0,
1671
+ "content": "Shukai Liu, Linzheng Chai, Jian Yang, Jiajun Shi, He Zhu, Liran Wang, Ke Jin, Wei Zhang, Hualei Zhu, Shuyue Guo, et al. Mdeval: Massively multilingual code debugging. arXiv preprint arXiv:2411.02310, 2024."
1672
+ },
1673
+ {
1674
+ "type": "ref_text",
1675
+ "bbox": [
1676
+ 0.173,
1677
+ 0.414,
1678
+ 0.826,
1679
+ 0.458
1680
+ ],
1681
+ "angle": 0,
1682
+ "content": "Yilun Liu, Shimin Tao, Weibin Meng, Jingyu Wang, Wenbing Ma, Yanqing Zhao, Yuhang Chen, Hao Yang, Yanfei Jiang, and Xun Chen. Logprompt: Prompt engineering towards zero-shot and interpretable log analysis. arXiv preprint arXiv:2308.07610, 2023."
1683
+ },
1684
+ {
1685
+ "type": "ref_text",
1686
+ "bbox": [
1687
+ 0.173,
1688
+ 0.465,
1689
+ 0.826,
1690
+ 0.509
1691
+ ],
1692
+ "angle": 0,
1693
+ "content": "Yudong Liu, Xu Zhang, Shilin He, Hongyu Zhang, Liquan Li, Yu Kang, Yong Xu, Minghua Ma, Qingwei Lin, Yingnong Dang, et al. Uniparser: A unified log parser for heterogeneous log data. In Proceedings of the ACM Web Conference 2022, pp. 1893-1901, 2022."
1694
+ },
1695
+ {
1696
+ "type": "ref_text",
1697
+ "bbox": [
1698
+ 0.173,
1699
+ 0.515,
1700
+ 0.826,
1701
+ 0.56
1702
+ ],
1703
+ "angle": 0,
1704
+ "content": "Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios. Clustering event logs using iterative partitioning. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 1255–1264, 2009."
1705
+ },
1706
+ {
1707
+ "type": "ref_text",
1708
+ "bbox": [
1709
+ 0.173,
1710
+ 0.566,
1711
+ 0.826,
1712
+ 0.595
1713
+ ],
1714
+ "angle": 0,
1715
+ "content": "Masayoshi Mizutani. Incremental mining of system log format. In 2013 IEEE International Conference on Services Computing, pp. 595-602. IEEE, 2013."
1716
+ },
1717
+ {
1718
+ "type": "ref_text",
1719
+ "bbox": [
1720
+ 0.173,
1721
+ 0.602,
1722
+ 0.826,
1723
+ 0.658
1724
+ ],
1725
+ "angle": 0,
1726
+ "content": "Animesh Nandi, Atri Mandal, Shubham Atreja, Gargi B Dasgupta, and Subhrajit Bhattacharya. Anomaly detection using program control flow graph mining from execution logs. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining, pp. 215-224, 2016."
1727
+ },
1728
+ {
1729
+ "type": "ref_text",
1730
+ "bbox": [
1731
+ 0.173,
1732
+ 0.666,
1733
+ 0.826,
1734
+ 0.724
1735
+ ],
1736
+ "angle": 0,
1737
+ "content": "Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao. Self-supervised log parsing. In Machine Learning and Knowledge Discovery in Databases: Applied Data Science Track: European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part IV, pp. 122-138. Springer, 2021."
1738
+ },
1739
+ {
1740
+ "type": "ref_text",
1741
+ "bbox": [
1742
+ 0.173,
1743
+ 0.73,
1744
+ 0.826,
1745
+ 0.76
1746
+ ],
1747
+ "angle": 0,
1748
+ "content": "Ha-Thanh Nguyen. A brief report on lawgpt 1.0: A virtual legal assistant based on GPT-3. CoRR, abs/2302.05729, 2023."
1749
+ },
1750
+ {
1751
+ "type": "ref_text",
1752
+ "bbox": [
1753
+ 0.173,
1754
+ 0.767,
1755
+ 0.429,
1756
+ 0.784
1757
+ ],
1758
+ "angle": 0,
1759
+ "content": "OpenAI. Gpt-4 technical report, 2023."
1760
+ },
1761
+ {
1762
+ "type": "ref_text",
1763
+ "bbox": [
1764
+ 0.173,
1765
+ 0.79,
1766
+ 0.826,
1767
+ 0.846
1768
+ ],
1769
+ "angle": 0,
1770
+ "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022a."
1771
+ },
1772
+ {
1773
+ "type": "ref_text",
1774
+ "bbox": [
1775
+ 0.173,
1776
+ 0.854,
1777
+ 0.826,
1778
+ 0.924
1779
+ ],
1780
+ "angle": 0,
1781
+ "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul F. Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback. In NeurIPS, 2022b."
1782
+ },
1783
+ {
1784
+ "type": "list",
1785
+ "bbox": [
1786
+ 0.173,
1787
+ 0.103,
1788
+ 0.826,
1789
+ 0.924
1790
+ ],
1791
+ "angle": 0,
1792
+ "content": null
1793
+ },
1794
+ {
1795
+ "type": "page_number",
1796
+ "bbox": [
1797
+ 0.491,
1798
+ 0.949,
1799
+ 0.508,
1800
+ 0.96
1801
+ ],
1802
+ "angle": 0,
1803
+ "content": "11"
1804
+ }
1805
+ ],
1806
+ [
1807
+ {
1808
+ "type": "header",
1809
+ "bbox": [
1810
+ 0.173,
1811
+ 0.033,
1812
+ 0.48,
1813
+ 0.049
1814
+ ],
1815
+ "angle": 0,
1816
+ "content": "Published as a conference paper at ICLR 2025"
1817
+ },
1818
+ {
1819
+ "type": "ref_text",
1820
+ "bbox": [
1821
+ 0.173,
1822
+ 0.103,
1823
+ 0.826,
1824
+ 0.147
1825
+ ],
1826
+ "angle": 0,
1827
+ "content": "Jared Rand and Andriy Miranskyy. On automatic parsing of log records. In 2021 IEEE/ACM 43rd International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER), pp. 41-45. IEEE, 2021."
1828
+ },
1829
+ {
1830
+ "type": "ref_text",
1831
+ "bbox": [
1832
+ 0.173,
1833
+ 0.158,
1834
+ 0.826,
1835
+ 0.202
1836
+ ],
1837
+ "angle": 0,
1838
+ "content": "Issam Sedki, Abdelwahab Hamou-Lhadj, Otmane Ait-Mohamed, and Mohammed A Shehab. An effective approach for parsing large log files. In 2022 IEEE International Conference on Software Maintenance and Evolution (ICSME), pp. 1-12. IEEE, 2022."
1839
+ },
1840
+ {
1841
+ "type": "ref_text",
1842
+ "bbox": [
1843
+ 0.173,
1844
+ 0.213,
1845
+ 0.826,
1846
+ 0.243
1847
+ ],
1848
+ "angle": 0,
1849
+ "content": "Keiichi Shima. Length matters: Clustering system log messages using length of words. arXiv preprint arXiv:1611.03213, 2016."
1850
+ },
1851
+ {
1852
+ "type": "ref_text",
1853
+ "bbox": [
1854
+ 0.173,
1855
+ 0.254,
1856
+ 0.826,
1857
+ 0.283
1858
+ ],
1859
+ "angle": 0,
1860
+ "content": "Kristina P Sinaga and Miin-Shen Yang. Unsupervised k-means clustering algorithm. IEEE access, 8:80716-80727, 2020."
1861
+ },
1862
+ {
1863
+ "type": "ref_text",
1864
+ "bbox": [
1865
+ 0.173,
1866
+ 0.294,
1867
+ 0.826,
1868
+ 0.338
1869
+ ],
1870
+ "angle": 0,
1871
+ "content": "Byung Chul Tak, Shu Tao, Lin Yang, Chao Zhu, and Yaoping Ruan. Logan: Problem diagnosis in the cloud using log-based reference models. In 2016 IEEE International Conference on Cloud Engineering (IC2E), pp. 62-67. IEEE, 2016."
1872
+ },
1873
+ {
1874
+ "type": "ref_text",
1875
+ "bbox": [
1876
+ 0.173,
1877
+ 0.349,
1878
+ 0.826,
1879
+ 0.393
1880
+ ],
1881
+ "angle": 0,
1882
+ "content": "Risto Vaarandi. A data clustering algorithm for mining patterns from event logs. In Proceedings of the 3rd IEEE Workshop on IP Operations & Management (IPOM 2003) (IEEE Cat. No. 03EX764), pp. 119-126. IEEE, 2003."
1883
+ },
1884
+ {
1885
+ "type": "ref_text",
1886
+ "bbox": [
1887
+ 0.173,
1888
+ 0.404,
1889
+ 0.826,
1890
+ 0.448
1891
+ ],
1892
+ "angle": 0,
1893
+ "content": "Risto Vaarandi and Mauno Pihelgas. Logcluster-a data clustering and pattern mining algorithm for event logs. In 2015 11th International conference on network and service management (CNSM), pp. 1-7. IEEE, 2015."
1894
+ },
1895
+ {
1896
+ "type": "ref_text",
1897
+ "bbox": [
1898
+ 0.173,
1899
+ 0.458,
1900
+ 0.826,
1901
+ 0.488
1902
+ ],
1903
+ "angle": 0,
1904
+ "content": "Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NIPS, 2017."
1905
+ },
1906
+ {
1907
+ "type": "ref_text",
1908
+ "bbox": [
1909
+ 0.173,
1910
+ 0.499,
1911
+ 0.826,
1912
+ 0.53
1913
+ ],
1914
+ "angle": 0,
1915
+ "content": "Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners, 2022a."
1916
+ },
1917
+ {
1918
+ "type": "ref_text",
1919
+ "bbox": [
1920
+ 0.173,
1921
+ 0.54,
1922
+ 0.826,
1923
+ 0.583
1924
+ ],
1925
+ "angle": 0,
1926
+ "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022b."
1927
+ },
1928
+ {
1929
+ "type": "ref_text",
1930
+ "bbox": [
1931
+ 0.173,
1932
+ 0.594,
1933
+ 0.826,
1934
+ 0.638
1935
+ ],
1936
+ "angle": 0,
1937
+ "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022c."
1938
+ },
1939
+ {
1940
+ "type": "ref_text",
1941
+ "bbox": [
1942
+ 0.173,
1943
+ 0.649,
1944
+ 0.826,
1945
+ 0.693
1946
+ ],
1947
+ "angle": 0,
1948
+ "content": "Tong Xiao, Zhe Quan, Zhi-Jie Wang, Kaiqi Zhao, and Xiangke Liao. Lpv: A log parser based on vectorization for offline and online log parsing. In 2020 IEEE International Conference on Data Mining (ICDM), pp. 1346-1351. IEEE, 2020."
1949
+ },
1950
+ {
1951
+ "type": "ref_text",
1952
+ "bbox": [
1953
+ 0.173,
1954
+ 0.703,
1955
+ 0.826,
1956
+ 0.734
1957
+ ],
1958
+ "angle": 0,
1959
+ "content": "Hongyang Yang, Xiao-Yang Liu, and Christina Dan Wang. Fingpt: Open-source financial large language models. CoRR, abs/2306.06031, 2023."
1960
+ },
1961
+ {
1962
+ "type": "ref_text",
1963
+ "bbox": [
1964
+ 0.173,
1965
+ 0.744,
1966
+ 0.826,
1967
+ 0.829
1968
+ ],
1969
+ "angle": 0,
1970
+ "content": "Jian Yang, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Ming Zhou. Improving neural machine translation with soft template prediction. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pp. 5979-5989. Association for Computational Linguistics, 2020a. doi: 10.18653/V1/2020.ACL-MAIN.531. URL https://doi.org/10.18653/v1/2020.acl-main.531."
1971
+ },
1972
+ {
1973
+ "type": "ref_text",
1974
+ "bbox": [
1975
+ 0.173,
1976
+ 0.84,
1977
+ 0.826,
1978
+ 0.924
1979
+ ],
1980
+ "angle": 0,
1981
+ "content": "Jian Yang, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Zhoujun Li, and Ming Zhou. Alternating language modeling for cross-lingual pre-training. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pp. 9386-9393. AAAI Press, 2020b."
1982
+ },
1983
+ {
1984
+ "type": "list",
1985
+ "bbox": [
1986
+ 0.173,
1987
+ 0.103,
1988
+ 0.826,
1989
+ 0.924
1990
+ ],
1991
+ "angle": 0,
1992
+ "content": null
1993
+ },
1994
+ {
1995
+ "type": "page_number",
1996
+ "bbox": [
1997
+ 0.491,
1998
+ 0.948,
1999
+ 0.509,
2000
+ 0.961
2001
+ ],
2002
+ "angle": 0,
2003
+ "content": "12"
2004
+ }
2005
+ ],
2006
+ [
2007
+ {
2008
+ "type": "header",
2009
+ "bbox": [
2010
+ 0.173,
2011
+ 0.033,
2012
+ 0.48,
2013
+ 0.049
2014
+ ],
2015
+ "angle": 0,
2016
+ "content": "Published as a conference paper at ICLR 2025"
2017
+ },
2018
+ {
2019
+ "type": "ref_text",
2020
+ "bbox": [
2021
+ 0.173,
2022
+ 0.103,
2023
+ 0.828,
2024
+ 0.233
2025
+ ],
2026
+ "angle": 0,
2027
+ "content": "Jian Yang, Shuming Ma, Haoyang Huang, Dongdong Zhang, Li Dong, Shaohan Huang, Alexandre Muzio, Saksham Singhal, Hany Hassan, Xia Song, and Furu Wei. Multilingual machine translation systems from microsoft for WMT21 shared task. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussà, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno-Yepes, Philipp Koehn, Tom Kocmi, André Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, WMT@EMNLP 2021, Online Event, November 10-11, 2021, pp. 446-455. Association for Computational Linguistics, 2021a. URL https://aclanthology.org/2021.wmt-1.54."
2028
+ },
2029
+ {
2030
+ "type": "ref_text",
2031
+ "bbox": [
2032
+ 0.173,
2033
+ 0.238,
2034
+ 0.828,
2035
+ 0.338
2036
+ ],
2037
+ "angle": 0,
2038
+ "content": "Jian Yang, Yuwei Yin, Shuming Ma, Haoyang Huang, Dongdong Zhang, Zhoujun Li, and Furu Wei. Multilingual agreement for multilingual neural machine translation. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, (Volume 2: Short Papers), Virtual Event, August 1-6, 2021, pp. 233-239. Association for Computational Linguistics, 2021b. doi: 10.18653/V1/2021. ACL-SHORT.31. URL https://doi.org/10.18653/v1/2021.acl-short.31."
2039
+ },
2040
+ {
2041
+ "type": "ref_text",
2042
+ "bbox": [
2043
+ 0.173,
2044
+ 0.345,
2045
+ 0.828,
2046
+ 0.417
2047
+ ],
2048
+ "angle": 0,
2049
+ "content": "Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Furu Wei. High-resource language-specific training for multilingual neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4461-4467. ijcai.org, 2022a. doi: 10.24963/IJCAI.2022/619. URL https://doi.org/10.24963/ijcai.2022/619."
2050
+ },
2051
+ {
2052
+ "type": "ref_text",
2053
+ "bbox": [
2054
+ 0.173,
2055
+ 0.424,
2056
+ 0.828,
2057
+ 0.51
2058
+ ],
2059
+ "angle": 0,
2060
+ "content": "Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Hongcheng Guo, Zhoujun Li, and Furu Wei. UM4: unified multilingual multiple teacher-student model for zero-resource neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4454-4460. ijcai.org, 2022b. doi: 10.24963/IJCAI.2022/618. URL https://doi.org/10.24963/ijcai.2022/618."
2061
+ },
2062
+ {
2063
+ "type": "ref_text",
2064
+ "bbox": [
2065
+ 0.173,
2066
+ 0.518,
2067
+ 0.828,
2068
+ 0.561
2069
+ ],
2070
+ "angle": 0,
2071
+ "content": "Jian Yang, Jiaxi Yang, Ke Jin, Yibo Miao, Lei Zhang, Liquan Yang, Zeyu Cui, Yichang Zhang, Binyuan Hui, and Junyang Lin. Evaluating and aligning codellms on human preference. arXiv preprint arXiv:2412.05210, 2024a."
2072
+ },
2073
+ {
2074
+ "type": "ref_text",
2075
+ "bbox": [
2076
+ 0.173,
2077
+ 0.569,
2078
+ 0.828,
2079
+ 0.613
2080
+ ],
2081
+ "angle": 0,
2082
+ "content": "Jian Yang, Jiajun Zhang, Jiaxi Yang, Ke Jin, Lei Zhang, Qiyao Peng, Ken Deng, Yibo Miao, Tianyu Liu, Zeyu Cui, et al. Execrepobench: Multi-level executable code completion evaluation. arXiv preprint arXiv:2412.11990, 2024b."
2083
+ },
2084
+ {
2085
+ "type": "ref_text",
2086
+ "bbox": [
2087
+ 0.173,
2088
+ 0.621,
2089
+ 0.828,
2090
+ 0.664
2091
+ ],
2092
+ "angle": 0,
2093
+ "content": "Jian Yang, Wei Zhang, Jiaxi Yang, Yibo Miao, Shanghaoran Quan, Zhenhe Wu, Qiyao Peng, Liquun Yang, Tianyu Liu, Zeyu Cui, et al. Multi-agent collaboration for multilingual code instruction tuning. arXiv preprint arXiv:2502.07487, 2025."
2094
+ },
2095
+ {
2096
+ "type": "ref_text",
2097
+ "bbox": [
2098
+ 0.173,
2099
+ 0.672,
2100
+ 0.828,
2101
+ 0.703
2102
+ ],
2103
+ "angle": 0,
2104
+ "content": "Siyu Yu, Pinjia He, Ningjiang Chen, and Yifan Wu. Brain: Log parsing with bidirectional parallel tree. IEEE Transactions on Services Computing, 2023."
2105
+ },
2106
+ {
2107
+ "type": "ref_text",
2108
+ "bbox": [
2109
+ 0.173,
2110
+ 0.71,
2111
+ 0.828,
2112
+ 0.754
2113
+ ],
2114
+ "angle": 0,
2115
+ "content": "Wei Zhang, Xianfu Cheng, Yi Zhang, Jian Yang, Hongcheng Guo, Zhoujun Li, Xiaolin Yin, Xi-angyuan Guan, Xu Shi, Liangfan Zheng, and Bo Zhang. Eclipse: Semantic entropy-lcs for crosslingual industrial log parsing, 2024a. URL https://arxiv.org/abs/2405.13548."
2116
+ },
2117
+ {
2118
+ "type": "ref_text",
2119
+ "bbox": [
2120
+ 0.173,
2121
+ 0.761,
2122
+ 0.828,
2123
+ 0.819
2124
+ ],
2125
+ "angle": 0,
2126
+ "content": "Wei Zhang, Hongcheng Guo, Jian Yang, Zhoujin Tian, Yi Zhang, Chaoran Yan, Zhoujun Li, Tongliang Li, Xu Shi, Liangfan Zheng, and Bo Zhang. mabc: multi-agent blockchain-inspired collaboration for root cause analysis in micro-services architecture, 2024b. URL https://arxiv.org/abs/2404.12135."
2127
+ },
2128
+ {
2129
+ "type": "ref_text",
2130
+ "bbox": [
2131
+ 0.173,
2132
+ 0.827,
2133
+ 0.828,
2134
+ 0.884
2135
+ ],
2136
+ "angle": 0,
2137
+ "content": "Wei Zhang, Yi Zhang, Li Zhu, Qianghuai Jia, Feijun Jiang, Hongcheng Guo, Zhoujun Li, and Mengping Zhou. Adc: Enhancing function calling via adversarial datasets and code line-level feedback. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888405."
2138
+ },
2139
+ {
2140
+ "type": "ref_text",
2141
+ "bbox": [
2142
+ 0.173,
2143
+ 0.892,
2144
+ 0.828,
2145
+ 0.923
2146
+ ],
2147
+ "angle": 0,
2148
+ "content": "De-Qing Zou, Hao Qin, and Hai Jin. Uilog: Improving log-based fault diagnosis by log analysis. Journal of computer science and technology, 31(5):1038-1052, 2016."
2149
+ },
2150
+ {
2151
+ "type": "list",
2152
+ "bbox": [
2153
+ 0.173,
2154
+ 0.103,
2155
+ 0.828,
2156
+ 0.923
2157
+ ],
2158
+ "angle": 0,
2159
+ "content": null
2160
+ },
2161
+ {
2162
+ "type": "page_number",
2163
+ "bbox": [
2164
+ 0.491,
2165
+ 0.948,
2166
+ 0.509,
2167
+ 0.96
2168
+ ],
2169
+ "angle": 0,
2170
+ "content": "13"
2171
+ }
2172
+ ]
2173
+ ]
2402.18xxx/2402.18205/1d6075b4-2164-4381-976c-0de70e92268a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e63f9613039eb4b6d77955932b28e3c7566546501e0162a6b6d574615720d5f4
3
+ size 1502269
2402.18xxx/2402.18205/full.md ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![](images/4451117416aa492c1466349e95c984fa34250e98b6cb144a556d4ed2e0078e2e.jpg)
2
+
3
+ # LEMUR: LOG PARSING WITH ENTROPY SAMPLING
4
+
5
+ # AND CHAIN-OF-THOUGHT MERGING
6
+
7
+ Wei Zhang $^{1}$ , Xiangyuan Guan $^{1}$ , Lu Yunhong $^{2}$ , Jie Zhang $^{3}$ , Shuangyong Song $^{3}$ , Xianfu Cheng $^{1}$ , Zhenhe Wu $^{1}$ , Zhoujun Li $^{1}$
8
+
9
+ <sup>1</sup>State Key Lab of Software Development Environment, Beihang University
10
+ $^{2}$ Yantai University
11
+ 3China Telecom
12
+
13
+ zwpride@buaa.edu.cn
14
+
15
+ # ABSTRACT
16
+
17
+ Logs produced by extensive software systems are integral to monitoring system behaviors. Advanced log analysis facilitates the detection, alerting, and diagnosis of system faults. Log parsing, which entails transforming raw log messages into structured templates, constitutes a critical phase in the automation of log analytics. Existing log parsers fail to identify the correct templates due to reliance on human-made rules. Besides, these methods focus on statistical features while ignoring semantic information in log messages. To address these challenges, we introduce a cutting-edge Log parsing framework with Entropy sampling and chain-of-thought Merging (LEMUR). Specifically, to discard the tedious manual rules, we propose a novel sampling method inspired by information entropy, which efficiently clusters typical logs. Furthermore, to enhance the merging of log templates, we design a chain-of-thought method for large language models (LLMs). LLMs exhibit exceptional semantic comprehension and deftly distinguish between parameters and invariant tokens. We have conducted experiments on large-scale public datasets. Extensive evaluation demonstrates that LEMUR achieves state-of-the-art performance and impressive efficiency. The Code is available at https://github.com/zwpride/lemur.
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ Logs serve as a critical information for system monitoring, offering key insights into system behavior, a fact well-documented in existing literature (He et al., 2021). Their advantage over other types of data lies in their rich informational content and relative ease of interpretation. Through log analysis, several important downstream tasks can be effectively addressed, which include anomaly detection (Du et al., 2017; Nandi et al., 2016), fault diagnosis (He et al., 2018; Zou et al., 2016), and root cause analysis (Tak et al., 2016). Log parsing, a crucial initial step in log analysis (He et al., 2016), separates log messages into two parts: 1) Log Templates. The constant, unchanging parts in logging statements, and 2) Log Variables. The dynamic, changeable details in different executions. In Figure 1, the logging statement logger.info('Wait {wait_time} seconds for {process} to be killed.') can yield various messages like Wait 26 seconds for Thread-20 to be killed. Wait < * > seconds for < * > to be killed is log template, and the changing data like 26 and Thread-20 are log variables.
22
+
23
+ In the field of system maintenance, where source code accessibility is often restricted, different parsers (He et al., 2017; Fu et al., 2022; Le & Zhang, 2023; Yu et al., 2023; Jiang et al., 2023) have been developed to differentiate between templates and variables within log messages autonomously. Existing log parsers can be categorized into two groups: syntax-based and semantic-based. Syntax-based log parsers (He et al., 2017; Du & Li, 2016; Makanju et al., 2009; Vaarandi, 2003; Vaarandi & Pihelgas, 2015) utilize specific features or heuristics (e.g., word frequency) to extract the constant parts of log messages as templates. Conversely, semantic-based parsers Liu et al. (2022); Le & Zhang (2023) leverages advanced deep learning frameworks to assimilate semantics and system-specific patterns from annotated log data, thereby facilitating the parsing of new logs. Besides,
24
+
25
+ ![](images/8cd80afc96c5786b64477820702e60770a087884ac85f5b00867ca737619dd85.jpg)
26
+ Figure 1: An example of log parsing. Logging Statement cannot be accessed in most cases. Log Message is parsed into a static Template part containing fixed keywords and a Variable part that will vary between different log messages with the same template.
27
+
28
+ recent works Jiang et al. (2023) leverage large language models (LLMs) (OpenAI, 2023) for log parsing, which also utilizes the powerful semantic understanding of LLMs.
29
+
30
+ However, syntax-based methodologies are heavily dependent on meticulously crafted rules. Their performance significantly diminishes with the exponential increase in the volume and intricacy of log data. Furthermore, these techniques often overlook semantic variances among logs. For instance, logs such as success to open file/var/log/system and success to close file/var/log/system display syntactic resemblance, potentially leading to their aggregation by data-driven approaches, which could adversely impact the efficacy of anomaly detection tasks. Semantic-based methods, reliant on pre-labeled logs for training, falter in understanding semantics when applied to unfamiliar domains. LLM-based parsers (Liu et al., 2023; Le & Zhang, 2023; Jiang et al., 2023) have lower availability due to high inference time (such as GPUs) and network latency (Jiao et al., 2023). Besides, LLMs generate unstable results because of the hallucination problem.
31
+
32
+ To tackle these challenges, we propose LEMUR, a cutting-edge Log parsing framework with Entropy sampling and Chain-of-Thought Merging (LEMUR). LEMUR brings together the strengths of the syntax-based and semantic-based methods, which consist of three key components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging. Specifically, inspired by information theory Gray (2011), we recognize that different logs encapsulate varying quantities of information, while logs of a similar nature contain comparable amounts. Consequently, we have developed a novel sampling method based on information entropy principles that efficiently clusters characteristic logs by dividing large-scale data into multiple clusters and utilizing efficient sampling and clustering algorithms within those clusters, thereby ensuring that LEMUR remains robust and high-performance in large-scale log scenarios. Then in template generation, we determine the variables and the template in the log based on the information entropy of the token at the same location. In the end, motivated by Chain-of-Thought Wei et al. (2022c), we design a three-hop Chain-of-Thought (infer structure, infer semantics, and infer solution) for merging templates.
33
+
34
+ We have conducted a comprehensive evaluation on public large-scale log datasets of LogHub He et al. (2020) with seven state-of-the-art log parsing approaches. The results show that LEMUR achieves the highest performance than other baselines for the F1 score of grouping and template accuracy. Generally, the main contributions of this work are listed as follows:
35
+
36
+ - To the best of our knowledge, we propose LEMUR, the first unsupervised framework to combine information entropy and large language models for online log parsing.
37
+
38
+ ![](images/acfac27e635353173ebe3873804af37e3615cd9d4a0fa21000f1ecb9ca0dc78f.jpg)
39
+ Figure 2: Overview of our log parsing framework.
40
+
41
+ - We introduce a novel sampling method based on information entropy for efficient log clustering. Besides, by utilizing LLMs, we can accurately merge templates based on semantics rather than syntax-based methods.
42
+ - Extensive experiments are conducted on public benchmarks to demonstrate the effectiveness of our LEMUR. The results show that LEMUR outperforms other state-of-the-art methods.
43
+
44
+ # 2 METHODOLOGY
45
+
46
+ In this section, we overview LEMUR, a framework designed to enhance log message categorization. Figure 2 shows LEMUR, which consists of three main components: Information Entropy Clustering, Template Generation, and Chain-of-Thought Merging.
47
+
48
+ # 2.1 INFORMATION ENTROPY CLUSTERING
49
+
50
+ In this section, logs are categorized into buckets based on their length. Some representative logs are selected as the cluster centers within each bucket, and other logs are clustered around these centers.
51
+
52
+ Bucket Generation Logs are assigned to buckets $B_{j}$ by a mapping function $f$ based on length: $f:L\to B$ , with $L$ representing logs and $B$ representing buckets. For each $\log L_i\in L$ , we compute its length $\mathit{len}(L_i)$ after tokenization. The log is then assigned to bucket $B_{j}\in B$ , where $j$ matches the index of the log's length in the set of unique lengths $l$ . This can be written as $f(L_{i}) = B_{j}$ where $l_{j} = \mathit{len}(L_{i})$ . This method ensures logs of the same length are grouped, reducing variability and aiding subsequent analysis.
53
+
54
+ Entropy-based Sampling To identify potential log templates from buckets of logs with identical token lengths, we propose a clustering method inspired by information theory Gray (2011), specifically employing Shannon entropy. Unlike conventional clustering which uses random seeds and iterative refinement, our approach selects logs based on their information content, which is ideal for log analysis due to the variability in log messages.
55
+
56
+ Each $\log x\in B_j$ is evaluated by its entropy $E(x) = -\sum_{i}p(x_{i})\log p(x_{i})$ , where $x_{i}$ is the token and $p(x_{i})$ its probability, gauging its information content. Logs are then ranked by descending entropy into layers $Lay_1,\ldots ,Lay_n$ to prioritize those with rich information.
57
+
58
+ We select clustering centers from these layers, starting with the highest entropy logs and picking those with either new first tokens or entropy above a threshold. This process repeats until we've chosen $k$ centers or the remaining logs no longer offer new information. Our stopping criterion ensures we gather diverse and informative logs while avoiding redundancy.
59
+
60
+ Refinement with Jaccard Similarity After obtaining the initial set of $k$ samples, we further refine the selection by merging similar log centers based on their Jaccard similarity. This merging
61
+
62
+ ![](images/d3891e64de171cca3656f9851e4f16cfb69c115abcfa0e64ea2ebd961f734eb9.jpg)
63
+ Figure 3: Three-S-hop Chain-of-Thought Merging Method
64
+
65
+ process helps eliminate redundancy while preserving the diversity of log samples. We use the Jaccard similarity to measure the similarity between two logs. The Jaccard similarity between two logs $L_{1}$ and $L_{2}$ is calculated as similarity = $\frac{|L_1 \cap L_2|}{|L_1 \cup L_2|}$ . Let $J_{T}$ be the threshold for Jaccard similarity; if the similarity between two centers $(L_{1}, L_{2})$ exceeds $J_{T}$ , we remove $L_{2}$ . This refinement ensures a diverse and representative set of $k'$ log samples and optimizes the balance between coverage and conciseness in the log samples.
66
+
67
+ Token-level Clustering In the clustering process for logs within a bucket $B_{j} \in B$ , we distinguish between the set of representative logs, $S$ , which serve as cluster centers, and the set of remaining logs, $O$ , which are to be clustered around these centers.
68
+
69
+ Let $S = \{s_1, s_2, \ldots, s_k\}$ represent the cluster centers and $O = \{o_1, o_2, \ldots, o_n\}$ denote the logs to be clustered. The objective is to assign each log $o_i \in O$ to a cluster center $s_j \in S$ such that the edit distance between them, $d(o_i, s_j)$ , is minimized. The edit distance measures the similarity between two logs in terms of the changes required to convert one into the other at the token level. The edit distance $d(o_i, s_j)$ between a log $o_i$ and a cluster center $s_j$ is defined as:
70
+
71
+ $$
72
+ d \left(o _ {i}, s _ {j}\right) = \min _ {s _ {j} \in S} \sum_ {t = 1} ^ {T} \delta \left(o _ {i, t}, s _ {j, t}\right)
73
+ $$
74
+
75
+ where $\delta(o_{i,t}, s_{j,t})$ is the token-level edit distance between the $t$ -th token of $o_i$ and $s_j$ , and $T$ represents the total number of tokens.
76
+
77
+ By minimizing $d(o_i, s_j)$ for each log $o_i$ , we cluster logs around their nearest representative center in $S$ , ensuring that logs within a cluster are as similar as possible according to the defined metric.
78
+
79
+ # 2.2 TEMPLATE GENERATION
80
+
81
+ Template generation aims to differentiate variables from fixed components in logs. It starts with finding the Longest Common Subsequence (LCS), which indicates shared elements across logs. Then, each log is compared with the LCS to pinpoint unique tokens and their locations; for example, comparing ABC with the LCS B reveals distinct tokens at positions A-0 and C-2.
82
+
83
+ <table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxyfer</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>Template</td><td>14</td><td>114</td><td>36</td><td>50</td><td>120</td><td>46</td><td>149</td><td>50</td><td>118</td><td>166</td><td>75</td><td>6</td><td>8</td><td>27</td><td>43</td><td>341</td></tr><tr><td>Average Length</td><td>7.44</td><td>8.19</td><td>8.76</td><td>6.30</td><td>6.32</td><td>3.48</td><td>8.51</td><td>7.93</td><td>8.30</td><td>5.40</td><td>2.80</td><td>6.28</td><td>9.35</td><td>8.56</td><td>9.01</td><td>9.17</td></tr><tr><td>Various Lengths Proportion</td><td>0.25%</td><td>0.00%</td><td>0.00%</td><td>0.55%</td><td>0.00%</td><td>0.40%</td><td>2.25%</td><td>0.00%</td><td>0.00%</td><td>3.85%</td><td>0.00%</td><td>0.00%</td><td>47.35%</td><td>0.00%</td><td>0.00%</td><td>0.35%</td></tr><tr><td>Messages</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td><td>2000</td></tr></table>
84
+
85
+ Table 1: Summary of LogHub datasets.
86
+
87
+ <table><tr><td></td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td></tr><tr><td>D</td><td>:</td><td>=, :, (), -</td><td>:</td><td>=, :, =, , ( )</td><td>=, -, 9</td><td>9</td><td>:=, =, [, 11</td><td>:=, :, 8</td><td>:=, [, 9</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 12</td><td>:=, 4</td><td>20</td><td>12</td></tr><tr><td>k</td><td>2</td><td>8</td><td>6</td><td>8</td><td>9</td><td>9</td><td>11</td><td>8</td><td>25</td><td>9</td><td>12</td><td>12</td><td>12</td><td>4</td><td>20</td><td>12</td></tr><tr><td>J_T</td><td>0.7</td><td>0.7</td><td>0.6</td><td>0.9</td><td>0.6</td><td>0.6</td><td>0.4</td><td>0.6</td><td>0.33</td><td>0.7</td><td>0.7</td><td>0.7</td><td>0.5</td><td>0.7</td><td>0.7</td><td>0.7</td></tr><tr><td>θ</td><td>2.0</td><td>1.7</td><td>2.1</td><td>2.2</td><td>5.5</td><td>1.2</td><td>4.1</td><td>1.1</td><td>0.09</td><td>3.5</td><td>0</td><td>0</td><td>0.1</td><td>0.2</td><td>2.3</td><td>4.7</td></tr></table>
88
+
89
+ Table 2: Transposed hyperparameters setting of Loghub datasets. $D$ is the tokens for word-splitting, $k$ denotes the number of the $S$ in each bucket, $J_{T}$ is the Jaccard similarity threshold for within-bucket merging, and $\theta$ is the entropy threshold for token identification.
90
+
91
+ For each unique position, we calculate information entropy inline using $H(X) = -\sum P(x_i)\log_2P(x_i)$ , where $P(x_{i})$ is the probability of the token at that position, and compile a list of these tokens.
92
+
93
+ To classify a position as a variable, we apply the inline decision: a position is variable if $H > \theta$ , otherwise fixed, where $H$ represents information entropy and $\theta$ is a set threshold depending on data characteristics and iterative experiments. When $H$ surpasses $\theta$ , we substitute all tokens at that position with the variable marker $< * >$ .
94
+
95
+ Additionally, we mark digit-dominated tokens as variables $< * >$ and use NLP to identify fixed-value tokens (e.g., hash values), enhancing variable identification accuracy. This method effectively identifies variables and fixed parts in logs, adapts to different types of logs, and accurately identifies changing parts while preserving the log structure.
96
+
97
+ # 2.3 THREE-S-HOP CHAIN-OF-THOUGHT MERGING
98
+
99
+ In advanced log message analysis, after categorizing messages into distinct clusters and extracting unique log templates, we encounter a significant challenge: reconciling log templates that are semantically similar but differ in length. Despite structural variations, these templates often represent identical or closely related events. This challenge primarily arises from the limitations of length-based classification methods, which frequently fail to recognize that templates with equivalent semantics but different expressions may correspond to the same event type.
100
+
101
+ Illustrated in Figure 3, We propose Three-S-hop Chain-of-Thought Merging using Large Language Models (LLMs) for semantic parsing. This includes data processing and three dialogue rounds: Structure QA (examines structure and length differences), Semantic QA (probes meanings for semantic equivalences), and Solution QA (decides on merging based on prior analyses). This approach merges templates of different lengths but similar meanings through structural and semantic analysis, enhancing accurate identification.
102
+
103
+ # 3 EXPERIMENT
104
+
105
+ # 3.1 IMPLEMENT DETAILS
106
+
107
+ Datasets Experiments are conducted on the most widely-used benchmark datasets published in LogHub (He et al., 2020). More details are available in Table 1.
108
+
109
+ Implementation and Configuration We implement LEMUR based on Python 3.10, Apple M3 chip with 8-core CPU, 10-core GPU 16GB RAM, and macOS Sonoma(14.2.1). In experiments, $D$ is the token set for word-splitting, $k$ denotes the number of the $S$ in each bucket, the Jaccard similarity threshold for within-bucket merging $J_{T}$ and the entropy threshold for token identification $\theta$ are shown in Table 2. We use GPT-4 as the base to implement the three-S-hop chain of thought merging, and LEMUR is suitable for other LLMs. LEMUR uses these models to conduct inference just by locally loading the weights of these models or utilizing the APIs provided by these LLMs.
110
+
111
+ <table><tr><td rowspan="2">Dataset</td><td colspan="2">Drain</td><td colspan="2">Spell</td><td colspan="2">IPLOM</td><td colspan="2">ULP</td><td colspan="2">Brain</td><td colspan="2">LogPPT</td><td colspan="2">LLMParser</td><td colspan="2">LEMUR</td></tr><tr><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td><td>FGA</td><td>GA</td></tr><tr><td>HDFS</td><td>0.999</td><td>0.998</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.998</td><td>0.999</td><td>0.998</td><td>0.957</td><td>0.845</td><td>0.965</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Hadoop</td><td>0.999</td><td>0.948</td><td>0.920</td><td>0.777</td><td>0.996</td><td>0.954</td><td>0.999</td><td>0.950</td><td>0.999</td><td>0.949</td><td>0.999</td><td>0.977</td><td>0.958</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Spark</td><td>0.992</td><td>0.920</td><td>0.991</td><td>0.905</td><td>0.992</td><td>0.920</td><td>0.995</td><td>0.922</td><td>0.999</td><td>0.998</td><td>0.997</td><td>0.848</td><td>0.906</td><td>0.995</td><td>1</td><td>1</td></tr><tr><td>Zookeeper</td><td>0.999</td><td>0.967</td><td>0.999</td><td>0.964</td><td>0.999</td><td>0.993</td><td>0.999</td><td>0.988</td><td>0.999</td><td>0.985</td><td>1</td><td>1</td><td>0.967</td><td>1</td><td>1</td><td>1</td></tr><tr><td>BGL</td><td>0.999</td><td>0.963</td><td>0.957</td><td>0.786</td><td>0.999</td><td>0.939</td><td>0.999</td><td>0.930</td><td>0.999</td><td>0.986</td><td>0.968</td><td>0.455</td><td>0.864</td><td>0.892</td><td>1</td><td>1</td></tr><tr><td>HPC</td><td>0.991</td><td>0.887</td><td>0.986</td><td>0.654</td><td>0.978</td><td>0.829</td><td>0.994</td><td>0.951</td><td>0.998</td><td>0.945</td><td>0.999</td><td>0.941</td><td>0.910</td><td>0.872</td><td>1</td><td>1</td></tr><tr><td>Thunderbird</td><td>0.999</td><td>0.955</td><td>0.994</td><td>0.844</td><td>0.999</td><td>0.663</td><td>0.999</td><td>0.675</td><td>0.999</td><td>0.971</td><td>0.714</td><td>0.262</td><td>0.799</td><td>0.813</td><td>0.999</td><td>0.982</td></tr><tr><td>Windows</td><td>0.999</td><td>0.997</td><td>0.999</td><td>0.989</td><td>0.995</td><td>0.567</td><td>0.989</td><td>0.410</td><td>0.999</td><td>0.997</td><td>0.992</td><td>0.717</td><td>0.984</td><td>0.783</td><td>1</td><td>1</td></tr><tr><td>Linux</td><td>0.992</td><td>0.690</td><td>0.937</td><td>0.605</td><td>0.964</td><td>0.671</td><td>0.476</td><td>0.363</td><td>0.999</td><td>0.996</td><td>0.713</td><td>0.177</td><td>0.921</td><td>0.961</td><td>0.999</td><td>0.988</td></tr><tr><td>Android</td><td>0.996</td><td>0.911</td><td>0.992</td><td>0.919</td><td>0.949</td><td>0.712</td><td>0.971</td><td>0.838</td><td>0.997</td><td>0.961</td><td>0.989</td><td>0.862</td><td>0.990</td><td>0.873</td><td>0.999</td><td>0.995</td></tr><tr><td>HealthApp</td><td>0.918</td><td>0.780</td><td>0.887</td><td>0.639</td><td>0.958</td><td>0.822</td><td>0.993</td><td>0.901</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.978</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Apache</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>0.582</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Proxifier</td><td>0.785</td><td>0.526</td><td>0.832</td><td>0.526</td><td>0.786</td><td>0.516</td><td>0.940</td><td>0.024</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>OpenSSH</td><td>0.999</td><td>0.787</td><td>0.918</td><td>0.554</td><td>0.998</td><td>0.540</td><td>0.940</td><td>0.434</td><td>1</td><td>1</td><td>0.983</td><td>0.436</td><td>0.836</td><td>0.697</td><td>1</td><td>1</td></tr><tr><td>OpenStack</td><td>0.993</td><td>0.733</td><td>0.994</td><td>0.764</td><td>0.909</td><td>0.331</td><td>0.834</td><td>0.491</td><td>1</td><td>1</td><td>0.997</td><td>0.492</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Mac</td><td>0.975</td><td>0.786</td><td>0.963</td><td>0.756</td><td>0.957</td><td>0.670</td><td>0.981</td><td>0.814</td><td>0.996</td><td>0.942</td><td>0.720</td><td>0.761</td><td>0.830</td><td>0.871</td><td>0.984</td><td>0.977</td></tr><tr><td>Average</td><td>0.977</td><td>0.865</td><td>0.961</td><td>0.793</td><td>0.968</td><td>0.756</td><td>0.932</td><td>0.733</td><td>0.999</td><td>0.983</td><td>0.939</td><td>0.710</td><td>0.931</td><td>0.913</td><td>0.999</td><td>0.996</td></tr></table>
112
+
113
+ Table 3: FGA and GA on LogHub Dataset.
114
+
115
+ ![](images/9411af60d30473f59ee18eb41a845afe3989654ada0f27c58789836a93502841.jpg)
116
+ (a) Boxplot of group accuracy on 16 benchmark datasets.
117
+
118
+ ![](images/a44310b976f7f00551abc1835f2e0789f2af9052792f0f00e107120b247a775f.jpg)
119
+ (b) Boxplot of F1 score of group accuracy on 16 benchmark datasets.
120
+ Figure 4: Comparison of group accuracy and F1 score on 16 benchmark datasets.
121
+
122
+ # 3.2 BASELINES AND METRICS
123
+
124
+ Baselines As for baselines, we choose Drain (He et al., 2017), Spell (Du & Li, 2016), IPLOM (Makanju et al., 2009), ULP (Sedki et al., 2022), Brain (Yu et al., 2023), LogPPT (Le & Zhang, 2023) and LLMParser (Jiang et al., 2023) as our baselines.
125
+
126
+ F1 score of Grouping Accuracy (FGA) FGA is a template-level metric that focuses on the ratio of correctly grouped templates. Specifically, let $N_{g}$ be the actual correct number of templates in the ground truth, and $N_{p}$ be the number of templates that are generated by a log parser. If $N_{c}$ is the number of templates that are correctly parsed by the log parser, then we can compute the Precision of Grouping Accuracy (PGA) as $\frac{N_{c}}{N_{p}}$ and the Recall of Grouping Accuracy (RGA) as $\frac{N_{c}}{N_{g}}$ . The FGA is equal to their harmonic mean, $ie \frac{2 \times GPA \times RGA}{PGA + RGA}$ .
127
+
128
+ Grouping Accuracy (GA) GA is computed as the ratio of correctly grouped log messages to the total count of log messages. A log message is considered to be correctly grouped if and only if its template aligns with the same set of log messages as that of the ground truth.
129
+
130
+ Execution time. We measure the execution time in seconds and compare LEMUR with other parsers in terms of efficiency.
131
+
132
+ # 3.3 MAIN RESULTS
133
+
134
+ In Table 3, LEMUR, an unsupervised LLM-related model, exhibits remarkable effectiveness and performance in the field of log parsing. When compared to Brain, the current state-of-the-art in
135
+
136
+ ![](images/924dd71228c4109625895c980ead7e2b172d072e6d9a36ad147fe47e99655eab.jpg)
137
+ Figure 5: Execution time for various datasets.
138
+
139
+ ![](images/6e51146631df5cc7c7169b67ca139f112507fd46af239c073b3cdda91a2e9322.jpg)
140
+
141
+ ![](images/4c1703944fb31e113d2414ece7877f2fafc8b43c2b78d810c40a6c7179e244d7.jpg)
142
+
143
+ ![](images/617f124c3634a86c68d542a5a6a2a2fd53001aa403713466327d6007cf2a88d4.jpg)
144
+
145
+ ![](images/3e6b4257a6a3bd448d9f2537f32705b345eeea083a32d5821ca7b04562fc4ef4.jpg)
146
+
147
+ Figure 6: Execution time for various dataset sizes.
148
+
149
+ <table><tr><td>Method</td><td>Avg. FGA</td><td>Avg. GA</td></tr><tr><td>Random sampling</td><td>0.843</td><td>0.718</td></tr><tr><td>First-token sampling</td><td>0.913</td><td>0.804</td></tr><tr><td>Entropy sampling</td><td>0.968</td><td>0.851</td></tr><tr><td>Entropy + First-token sampling (Ours)</td><td>0.999</td><td>0.996</td></tr></table>
150
+
151
+ Table 4: Performance on different sampling methods.
152
+
153
+ <table><tr><td>Method</td><td>Metric</td><td>HDFS</td><td>Hadoop</td><td>Spark</td><td>Zookeeper</td><td>BGL</td><td>HPC</td><td>Thunderbird</td><td>Windows</td><td>Linux</td><td>Android</td><td>HealthApp</td><td>Apache</td><td>Proxifier</td><td>OpenSSH</td><td>OpenStack</td><td>Mac</td><td>Average</td></tr><tr><td>LEMUR w/o CoT</td><td>FGA</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>0.999</td><td>0.999</td><td>1</td><td>0.999</td><td>0.992</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>0.998</td></tr><tr><td>LEMUR w/o CoT</td><td>GA</td><td>0.998</td><td>0.977</td><td>1</td><td>0.995</td><td>0.989</td><td>0.996</td><td>0.959</td><td>1</td><td>0.986</td><td>0.956</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.956</td><td>0.988</td></tr><tr><td>LEMUR</td><td>FGA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.999</td><td>1</td><td>0.999</td><td>0.999</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.984</td><td>0.999</td></tr><tr><td>LEMUR</td><td>GA</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.982</td><td>1</td><td>0.988</td><td>0.995</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.977</td><td>0.996</td></tr></table>
154
+
155
+ Table 5: FGA and GA on LogHub between LEMUR with and without CoT.
156
+
157
+ <table><tr><td>Configuration</td><td>Avg.FGA</td><td>Avg.GA</td></tr><tr><td>Default (Our Method)</td><td>0.999</td><td>0.996</td></tr><tr><td>KMeans Clustering (Sinaga &amp; Yang, 2020)</td><td>0.685</td><td>0.698</td></tr><tr><td>DBSCAN Clustering (Khan et al., 2014)</td><td>0.731</td><td>0.744</td></tr><tr><td>Our Method w/o Bucket Grouping</td><td>0.355</td><td>0.394</td></tr></table>
158
+
159
+ Table 6: Additional Ablation Studies on LEMUR
160
+
161
+ unsupervised models, LEMUR demonstrates superior or comparable results across various datasets, underscoring its efficacy in unsupervised learning without the need for annotated data. Moreover, even when juxtaposed with supervised models like LogPPT and LLVMParser, LEMUR shows equal or better performance in most datasets, based on FGA and GA metrics. This is particularly noteworthy as it highlights the capability of LEMUR to match or exceed the performance levels of supervised models, despite the absence of explicit label guidance.
162
+
163
+ In Figure 4, LEMUR shows the robust performance in GA and FGA. The consistency and robustness of LEMUR are evident in its high FGA and GA across diverse datasets such as HDFS, Hadoop, Spark, etc. This consistency emphasizes its adaptability and robustness to various log parsing requirements. In conclusion, as an unsupervised LLM-related model, LEMUR significantly stands out in the log parsing domain.
164
+
165
+ Figure 5 presents a comprehensive analysis of the execution times for multiple algorithms: LEMUR, Brain, Drain, IPLOM, Spell, and ULP, across various datasets, comprising 2K data points each. This figure provides critical insights into the efficiency and scalability of these algorithms in processing data. Notably, the execution time of LEMUR stands out for its relative brevity across the majority of the datasets. LEMUR demonstrates a significantly reduced execution time on several key datasets, including Hadoop, BGL, Windows, Android, and Mac, highlighting its efficiency and optimized processing capabilities. Furthermore, we extend this analysis to evaluate the performance scalability of these algorithms across four distinct datasets: BGL, HDFS, Android, and Spark. This extension, as depicted in Figure 6, encompasses varying dataset sizes, thereby providing a more nuanced understanding of each algorithm's adaptability and performance under different data volume conditions in real-world scenarios where dataset sizes can vary significantly.
166
+
167
+ Compared to all unsupervised methods, we have achieved more superior performance in the preliminary stage before employing LLM. Furthermore, compared to other LLM-based methods, which are mostly supervised, such as LogPPT and LLVMParser, which are essentially inference after fine-tuning, the training process consumes more GPU resources for local deployment or involves larger data transmission over the network for closed-source ChatGPT. LEMUR achieves better results without fine-tuning the LLM. Whether for local deployment inference or remote API, it only utilizes GPU resources for inference, not for fine-tuning. Thus, it requires fewer GPU resources or less network latency in data transmission. In summary, LEMUR has achieved superior performance and unique advantages for unsupervised log parsing or supervised fine-tuning of LLM.
168
+
169
+ # 4 ABLATION
170
+
171
+ # 4.1 EFFECT OF ENTROPY SAMPLING
172
+
173
+ In our comparative analysis, as delineated in Table 4, we meticulously evaluate four distinct sampling methodologies: Random sampling, First-token sampling, Entropy sampling, and a hybrid approach combining Entropy and First-token sampling. Our results, derived from a systematic and empirical evaluation, reveal that the hybrid Entropy + First-token sampling method exhibits superior performance over the other techniques under consideration. The enhanced effectiveness of this method is likely attributable to its synergistic integration of the entropy-based selection criterion, which effectively captures the variability within the data, and the first-token approach, which ensures the representativeness of the initial elements. This fusion of strategies enables a more nuanced and effective sampling process, as evidenced by our empirical findings.
174
+
175
+ As illustrated in Table 5, which provides a detailed comparison between two versions of LEMUR: one implementing the three-hop Chain-of-Thought approach and the other without it. The left columns of the table present the performance metrics of LEMUR devoid of Chain-of-Thought (CoT), while the right columns display the outcomes following the integration of the CoT methodology. The data delineates a useful, albeit modest, improvement in the performance of LEMUR when augmented with the CoT approach. This enhancement is particularly evident in the FGA and GA metrics across a range of datasets, including Hadoop, BGL, HPC, Thunderbird, Linux, Android, and Mac.
176
+
177
+ The observed limited degree of enhancement can be ascribed to two primary factors. The first is the already high baseline performance of LEMUR without the incorporation of CoT, which inherently constrains the potential for significant further improvements. The second factor pertains to the relative simplicity of the loghub datasets. In these scenarios, the application of a sophisticated methodology like CoT results in only marginal improvements, primarily because the datasets do not present sufficient complexity to fully exploit and showcase the enhanced capabilities of the CoT-augmented LEMUR.
178
+
179
+ # 4.2 ADDITIONAL ABLATION STUDIES ON LEMUR
180
+
181
+ To further validate the robustness of LEMUR, we conducted ablation studies focusing on the impact of different clustering algorithms and the effectiveness of the bucket grouping strategy. Specifically, we experimented with replacing our clustering mechanism with KMeans (Sinaga & Yang, 2020) and DBSCAN (Khan et al., 2014) and evaluated the performance when disabling the bucket grouping based on the length assumption. These modifications aim to probe the sensitivity of LEMUR to these components.
182
+
183
+ The results, as illustrated in Table 6, highlight the critical role of the selected clustering algorithm and bucket grouping strategy in LEMUR's performance. The substantial decrease in Avg.FGA and Avg.GA when employing alternative clustering methods or omitting the bucket grouping underscores their importance in achieving optimal outcomes. This ablation study confirms the necessity of careful component selection and validates the robustness of LEMUR's methodology.
184
+
185
+ # 5 RELATED WORK
186
+
187
+ # 5.1 LOG Parser
188
+
189
+ In the evolving field of automatic log analysis, crucial for distributed systems and cloud computing, significant progress has been made in log parsing techniques, categorized into frequent pattern mining, clustering, and heuristics rules. Frequent pattern mining is exemplified by SLCT (Vaarandi, 2003) which groups logs based on token frequency, and LogCluster (Vaarandi & Pihelgas, 2015) which removes positional constraints in log grouping. Clustering approaches include LogMine (Hamooni et al., 2016) with its multi-layered clustering system, LKE(Fu et al., 2009) using edit distance and position weighing, SHISO (Mizutani, 2013) improving efficiency through hierarchical clustering, LenMa (Shima, 2016) employing token length vectors, and LPV (Xiao et al., 2020) which uses semantic vectors from word2vec. In the heuristics rules category, IPLOM (Makanju et al., 2009) partitions logs by length and token position, Spell (Du & Li, 2016) approaches parsing as the longest common sequential problem, Drain (He et al., 2017) groups logs by length and prefixes for template updates, and Prefix-Graph (Chu et al., 2021) merges prefix trees into graphs for template generation. Recent advancements have introduced deep learning-based algorithms like Nulog (Nedelkoski et al., 2021) Uniparser (Liu et al., 2022), and LogAP (Rand & Miranskyy, 2021) utilizing comparative learning and machine translation for parsing. However, these deep learning methods face challenges in efficiency and high operational costs due to GPU requirements.
190
+
191
+ # 5.2 LARGE LANGUAGE MODEL
192
+
193
+ Language modeling using self-supervised learning and large-scale data, significantly enhances various natural language processing tasks. Specifically, pre-training a Transformer decoder (OpenAI, 2023; Vaswani et al., 2017; Yang et al., 2024a; 2025; 2024b; Ouyang et al., 2022a; Wei et al., 2022a; Yang et al., 2023; Nguyen, 2023; Liu et al., 2024; Chai et al., 2024; Zhang et al., 2024b; a; 2025) aids in unconditional text generation. Performance improvements (BertSum, 2023; Yang et al., 2020b; 2022a;b; 2021b;a; 2020a) in diverse tasks have been linked to the enlargement of Pre-training Language Models (PLMs) by increasing model or data size. This has led to the creation of increasingly larger PLMs, such as GPT-3 with 175 billion parameters and PaLM with 540 billion (Anil et al., 2023), guided by the scaling laws of large language models (Aghajanyan et al., 2023). Despite their similar architectures and pre-training tasks, larger PLMs, such as GPT-4 (OpenAI, 2023), exhibit unique behaviors and emergent abilities, excelling in complex tasks. A prime example is ChatGPT, adapting GPT-series LLMs for engaging dialogues, and showcasing advanced conversational skills. Fine-tuning LLMs on various datasets (Wei et al., 2022b) yields promising results, using human or LLM-created prompts for instruction tuning and refining generations. Chain-of-thought prompting (Wei et al., 2022b), where models explain their reasoning for complex problems, and RLHF (Ouyang et al., 2022b), a strategy significantly enhance their performance.
194
+
195
+ # 6 CONCLUSION
196
+
197
+ To enhance log analysis in complex software systems, we propose LEMUR. This framework replaces traditional rule-based methods with an information entropy-based sampling for efficient log clustering and employs large language models (LLMs) for advanced semantic comprehension in template merging. Information entropy streamlines the process of distinguishing between disparate log messages based on their inherent informational content for efficient log clustering. LEMUR has demonstrated superior performance and efficiency in log parsing, validated through extensive tests on large-scale datasets.
198
+
199
+ # REFERENCES
200
+
201
+ Armen Aghajanyan, Lili Yu, Alexis Conneau, Wei-Ning Hsu, Karen Hambardzumyan, Susan Zhang, Stephen Roller, Naman Goyal, Omer Levy, and Luke Zettlemoyer. Scaling laws for generative mixed-modal language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 265-279. PMLR, 2023.
202
+ Rohan Anil, Andrew M. Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, Sebastian Ruder, Yi Tay, Kefan Xiao, Yuzhong Xu, Yujing Zhang, Gustavo Hernandez Abrego, Junwhan Ahn, Jacob Austin, Paul Barham, Jan A. Botha, James Bradbury, Siddhartha Brahma, Kevin Brooks, Michele Catasta, Yong Cheng, Colin Cherry, Christopher A. Choquette-Choo, Aakanksha Chowdhery, Clément Crepy, Shachi Dave, Mostafa Dehghani, Sunipa Dev, Jacob Devlin, Mark Diaz, Nan Du, Ethan Dyer, Vladimir Feinberg, Fangxiaoyu Feng, Vlad Fienber, Markus Freitag, Xavier Garcia, Sebastian Gehrmann, Lucas Gonzalez, and et al. Palm 2 technical report. CoRR, abs/2305.10403, 2023.
203
+ BertSum. source code, 2023. URL https://github.com/nlpyang/BertSum.
204
+ Linzheng Chai, Shukai Liu, Jian Yang, Yuwei Yin, Ke Jin, Jiaheng Liu, Tao Sun, Ge Zhang, Changyu Ren, Hongcheng Guo, et al. Mceval: Massively multilingual code evaluation. arXiv preprint arXiv:2406.07436, 2024.
205
+ Guojun Chu, Jingyu Wang, Qi Qi, Haifeng Sun, Shimin Tao, and Jianxin Liao. Prefix-graph: A versatile log parsing approach merging prefix tree with probabilistic graph. In 2021 IEEE 37th International Conference on Data Engineering (ICDE), pp. 2411-2422. IEEE, 2021.
206
+ Min Du and Feifei Li. Spell: Streaming parsing of system event logs. In 2016 IEEE 16th International Conference on Data Mining (ICDM), pp. 859-864. IEEE, 2016.
207
+ Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar. Deeplog: Anomaly detection and diagnosis from system logs through deep learning. In Proceedings of the 2017 ACM SIGSAC conference on computer and communications security, pp. 1285-1298, 2017.
208
+ Qiang Fu, Jian-Guang Lou, Yi Wang, and Jiang Li. Execution anomaly detection in distributed systems through unstructured log analysis. In 2009 ninth IEEE international conference on data mining, pp. 149-158. IEEE, 2009.
209
+ Ying Fu, Meng Yan, Jian Xu, Jianguo Li, Zhongxin Liu, Xiaohong Zhang, and Dan Yang. Investigating and improving log parsing in practice. In Proceedings of the 30th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1566-1577, 2022.
210
+ Robert M Gray. Entropy and information theory. Springer Science & Business Media, 2011.
211
+ Hossein Hamooni, Biplob Debnath, Jianwu Xu, Hui Zhang, Guofei Jiang, and Abdullah Mueen. Logmine: Fast pattern recognition for log analytics. In Proceedings of the 25th ACM International on Conference on Information and Knowledge Management, pp. 1573-1582, 2016.
212
+ Pinjia He, Jieming Zhu, Shilin He, Jian Li, and Michael R Lyu. An evaluation study on log parsing and its use in log mining. In 2016 46th annual IEEE/IFIP international conference on dependable systems and networks (DSN), pp. 654-661. IEEE, 2016.
213
+ Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu. Drain: An online log parsing approach with fixed depth tree. In 2017 IEEE international conference on web services (ICWS), pp. 33-40. IEEE, 2017.
214
+ Shilin He, Qingwei Lin, Jian-Guang Lou, Hongyu Zhang, Michael R Lyu, and Dongmei Zhang. Identifying impactful service system problems via log analysis. In Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 60-70, 2018.
215
+
216
+ Shilin He, Jieming Zhu, Pinjia He, and Michael R Lyu. Loghub: a large collection of system log datasets towards automated log analytics. arXiv preprint arXiv:2008.06448, 2020.
217
+ Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R Lyu. A survey on automated log analysis for reliability engineering. ACM computing surveys (CSUR), 54(6):1-37, 2021.
218
+ Zhihan Jiang, Jinyang Liu, Zhuangbin Chen, Yichen Li, Junjie Huang, Yintong Huo, Pinjia He, Jiazhen Gu, and Michael R Lyu. Llmparser: A llm-based log parsing framework. arXiv preprint arXiv:2310.01796, 2023.
219
+ Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Xing Wang, and Zhaopeng Tu. Is chatgpt a good translator? a preliminary study. arXiv preprint arXiv:2301.08745, 2023.
220
+ Kamran Khan, Saif Ur Rehman, Kamran Aziz, Simon Fong, and Sababady Sarasvady. Dbscan: Past, present and future. In The fifth international conference on the applications of digital information and web technologies (ICADIWT 2014), pp. 232-238. IEEE, 2014.
221
+ Van-Hoang Le and Hongyu Zhang. Log parsing with prompt-based few-shot learning. arXiv preprint arXiv:2302.07435, 2023.
222
+ Shukai Liu, Linzheng Chai, Jian Yang, Jiajun Shi, He Zhu, Liran Wang, Ke Jin, Wei Zhang, Hualei Zhu, Shuyue Guo, et al. Mdeval: Massively multilingual code debugging. arXiv preprint arXiv:2411.02310, 2024.
223
+ Yilun Liu, Shimin Tao, Weibin Meng, Jingyu Wang, Wenbing Ma, Yanqing Zhao, Yuhang Chen, Hao Yang, Yanfei Jiang, and Xun Chen. Logprompt: Prompt engineering towards zero-shot and interpretable log analysis. arXiv preprint arXiv:2308.07610, 2023.
224
+ Yudong Liu, Xu Zhang, Shilin He, Hongyu Zhang, Liquan Li, Yu Kang, Yong Xu, Minghua Ma, Qingwei Lin, Yingnong Dang, et al. Uniparser: A unified log parser for heterogeneous log data. In Proceedings of the ACM Web Conference 2022, pp. 1893-1901, 2022.
225
+ Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios. Clustering event logs using iterative partitioning. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 1255–1264, 2009.
226
+ Masayoshi Mizutani. Incremental mining of system log format. In 2013 IEEE International Conference on Services Computing, pp. 595-602. IEEE, 2013.
227
+ Animesh Nandi, Atri Mandal, Shubham Atreja, Gargi B Dasgupta, and Subhrajit Bhattacharya. Anomaly detection using program control flow graph mining from execution logs. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining, pp. 215-224, 2016.
228
+ Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao. Self-supervised log parsing. In Machine Learning and Knowledge Discovery in Databases: Applied Data Science Track: European Conference, ECML PKDD 2020, Ghent, Belgium, September 14-18, 2020, Proceedings, Part IV, pp. 122-138. Springer, 2021.
229
+ Ha-Thanh Nguyen. A brief report on lawgpt 1.0: A virtual legal assistant based on GPT-3. CoRR, abs/2302.05729, 2023.
230
+ OpenAI. Gpt-4 technical report, 2023.
231
+ Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35: 27730-27744, 2022a.
232
+ Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul F. Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback. In NeurIPS, 2022b.
233
+
234
+ Jared Rand and Andriy Miranskyy. On automatic parsing of log records. In 2021 IEEE/ACM 43rd International Conference on Software Engineering: New Ideas and Emerging Results (ICSE-NIER), pp. 41-45. IEEE, 2021.
235
+ Issam Sedki, Abdelwahab Hamou-Lhadj, Otmane Ait-Mohamed, and Mohammed A Shehab. An effective approach for parsing large log files. In 2022 IEEE International Conference on Software Maintenance and Evolution (ICSME), pp. 1-12. IEEE, 2022.
236
+ Keiichi Shima. Length matters: Clustering system log messages using length of words. arXiv preprint arXiv:1611.03213, 2016.
237
+ Kristina P Sinaga and Miin-Shen Yang. Unsupervised k-means clustering algorithm. IEEE access, 8:80716-80727, 2020.
238
+ Byung Chul Tak, Shu Tao, Lin Yang, Chao Zhu, and Yaoping Ruan. Logan: Problem diagnosis in the cloud using log-based reference models. In 2016 IEEE International Conference on Cloud Engineering (IC2E), pp. 62-67. IEEE, 2016.
239
+ Risto Vaarandi. A data clustering algorithm for mining patterns from event logs. In Proceedings of the 3rd IEEE Workshop on IP Operations & Management (IPOM 2003) (IEEE Cat. No. 03EX764), pp. 119-126. IEEE, 2003.
240
+ Risto Vaarandi and Mauno Pihelgas. Logcluster-a data clustering and pattern mining algorithm for event logs. In 2015 11th International conference on network and service management (CNSM), pp. 1-7. IEEE, 2015.
241
+ Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. NIPS, 2017.
242
+ Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners, 2022a.
243
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022b.
244
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In NeurIPS, 2022c.
245
+ Tong Xiao, Zhe Quan, Zhi-Jie Wang, Kaiqi Zhao, and Xiangke Liao. Lpv: A log parser based on vectorization for offline and online log parsing. In 2020 IEEE International Conference on Data Mining (ICDM), pp. 1346-1351. IEEE, 2020.
246
+ Hongyang Yang, Xiao-Yang Liu, and Christina Dan Wang. Fingpt: Open-source financial large language models. CoRR, abs/2306.06031, 2023.
247
+ Jian Yang, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Ming Zhou. Improving neural machine translation with soft template prediction. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pp. 5979-5989. Association for Computational Linguistics, 2020a. doi: 10.18653/V1/2020.ACL-MAIN.531. URL https://doi.org/10.18653/v1/2020.acl-main.531.
248
+ Jian Yang, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Zhoujun Li, and Ming Zhou. Alternating language modeling for cross-lingual pre-training. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pp. 9386-9393. AAAI Press, 2020b.
249
+
250
+ Jian Yang, Shuming Ma, Haoyang Huang, Dongdong Zhang, Li Dong, Shaohan Huang, Alexandre Muzio, Saksham Singhal, Hany Hassan, Xia Song, and Furu Wei. Multilingual machine translation systems from microsoft for WMT21 shared task. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussà, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno-Yepes, Philipp Koehn, Tom Kocmi, André Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, WMT@EMNLP 2021, Online Event, November 10-11, 2021, pp. 446-455. Association for Computational Linguistics, 2021a. URL https://aclanthology.org/2021.wmt-1.54.
251
+ Jian Yang, Yuwei Yin, Shuming Ma, Haoyang Huang, Dongdong Zhang, Zhoujun Li, and Furu Wei. Multilingual agreement for multilingual neural machine translation. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, (Volume 2: Short Papers), Virtual Event, August 1-6, 2021, pp. 233-239. Association for Computational Linguistics, 2021b. doi: 10.18653/V1/2021. ACL-SHORT.31. URL https://doi.org/10.18653/v1/2021.acl-short.31.
252
+ Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Zhoujun Li, and Furu Wei. High-resource language-specific training for multilingual neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4461-4467. ijcai.org, 2022a. doi: 10.24963/IJCAI.2022/619. URL https://doi.org/10.24963/ijcai.2022/619.
253
+ Jian Yang, Yuwei Yin, Shuming Ma, Dongdong Zhang, Shuangzhi Wu, Hongcheng Guo, Zhoujun Li, and Furu Wei. UM4: unified multilingual multiple teacher-student model for zero-resource neural machine translation. In Luc De Raedt (ed.), Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pp. 4454-4460. ijcai.org, 2022b. doi: 10.24963/IJCAI.2022/618. URL https://doi.org/10.24963/ijcai.2022/618.
254
+ Jian Yang, Jiaxi Yang, Ke Jin, Yibo Miao, Lei Zhang, Liquan Yang, Zeyu Cui, Yichang Zhang, Binyuan Hui, and Junyang Lin. Evaluating and aligning codellms on human preference. arXiv preprint arXiv:2412.05210, 2024a.
255
+ Jian Yang, Jiajun Zhang, Jiaxi Yang, Ke Jin, Lei Zhang, Qiyao Peng, Ken Deng, Yibo Miao, Tianyu Liu, Zeyu Cui, et al. Execrepobench: Multi-level executable code completion evaluation. arXiv preprint arXiv:2412.11990, 2024b.
256
+ Jian Yang, Wei Zhang, Jiaxi Yang, Yibo Miao, Shanghaoran Quan, Zhenhe Wu, Qiyao Peng, Liquun Yang, Tianyu Liu, Zeyu Cui, et al. Multi-agent collaboration for multilingual code instruction tuning. arXiv preprint arXiv:2502.07487, 2025.
257
+ Siyu Yu, Pinjia He, Ningjiang Chen, and Yifan Wu. Brain: Log parsing with bidirectional parallel tree. IEEE Transactions on Services Computing, 2023.
258
+ Wei Zhang, Xianfu Cheng, Yi Zhang, Jian Yang, Hongcheng Guo, Zhoujun Li, Xiaolin Yin, Xi-angyuan Guan, Xu Shi, Liangfan Zheng, and Bo Zhang. Eclipse: Semantic entropy-lcs for crosslingual industrial log parsing, 2024a. URL https://arxiv.org/abs/2405.13548.
259
+ Wei Zhang, Hongcheng Guo, Jian Yang, Zhoujin Tian, Yi Zhang, Chaoran Yan, Zhoujun Li, Tongliang Li, Xu Shi, Liangfan Zheng, and Bo Zhang. mabc: multi-agent blockchain-inspired collaboration for root cause analysis in micro-services architecture, 2024b. URL https://arxiv.org/abs/2404.12135.
260
+ Wei Zhang, Yi Zhang, Li Zhu, Qianghuai Jia, Feijun Jiang, Hongcheng Guo, Zhoujun Li, and Mengping Zhou. Adc: Enhancing function calling via adversarial datasets and code line-level feedback. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888405.
261
+ De-Qing Zou, Hao Qin, and Hai Jin. Uilog: Improving log-based fault diagnosis by log analysis. Journal of computer science and technology, 31(5):1038-1052, 2016.
2402.18xxx/2402.18205/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01cbd120ce5cebd63c383d2b98c91b59fbd20147a636e6078264ab3b140c626f
3
+ size 523332
2402.18xxx/2402.18205/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18206/0f8b5e9d-4b4f-4579-b447-e30d31455158_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1d9c4abb77ba6595adcc973143eaf76f171f60b0f4fec5311b3f1183d695bc1
3
+ size 6103838
2402.18xxx/2402.18206/full.md ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Balancing Act: Distribution-Guided Debiasing in Diffusion Models
2
+
3
+ Rishubh Parihar\*,1 Abhijnya Bhat\*,1 Abhipsa Basu $^{1}$ Saswat Mallick $^{1}$
4
+ Jogendra Nath Kundu $^{2}$ R. Venkatesh Babu $^{1}$
5
+
6
+ $^{1}$ Indian Institute of Science, Bangalore $^{2}$ Meta Reality Labs
7
+
8
+ # Abstract
9
+
10
+ Diffusion Models (DMs) have emerged as powerful generative models with unprecedented image generation capability. These models are widely used for data augmentation and creative applications. However, DMs reflect the biases present in the training datasets. This is especially concerning in the context of faces, where the DM prefers one demographic subgroup vs others (eg. female vs male). In this work, we present a method for debiasing DMs without relying on additional reference data or model retraining. Specifically, we propose Distribution Guidance, which enforces the generated images to follow the prescribed attribute distribution. To realize this, we build on the key insight that the latent features of denoising UNet hold rich demographic semantics, and the same can be leveraged to guide debiased generation. We train Attribute Distribution Predictor (ADP) - a small mlp that maps the latent features to the distribution of attributes. ADP is trained with pseudo labels generated from existing attribute classifiers. The proposed Distribution Guidance with ADP enables us to do fair generation. Our method reduces bias across single/multiple attributes and outperforms the baseline by a significant margin for unconditional and text-conditional diffusion models. Further, we present a downstream task of training a fair attribute classifier by augmenting the training set with our generated data.
11
+
12
+ Code is available at - project page.
13
+
14
+ # 1. Introduction
15
+
16
+ Recent advancements in Diffusion Models (DM) [13, 40, 51] have garnered much interest among researchers in evaluating the quality of the generated content. These models are not only used to generate realistic content but also to augment real datasets [29, 54] for downstream tasks. However, existing DMs have been found to synthesize biased content with respect to multiple demographic factors like gender, race, etc., which can be detrimental to society once these models are deployed in the real world [26, 35, 41].
17
+
18
+ ![](images/312a52adef39375b04d34fc1d404ac11efa061da839c130d04c9175744d2675b.jpg)
19
+ a)
20
+
21
+ ![](images/f20da95f2c17377b89790308146275300e8d432bb9134affe75e8aa3f39ec00d.jpg)
22
+ Imbalanced Attribute Distribution
23
+
24
+ ![](images/996143274c85fb949289c5f76b6fb6573e3a380625024ce8227d40aa7168c5f0.jpg)
25
+ Distribution Guidance
26
+
27
+ ![](images/3816082c150eb2518fc1cba48e3fe220c9af42398f9187f60bf693128f2957e7.jpg)
28
+ Frozen Diffusion Model
29
+
30
+ ![](images/7f4808149e20d6447d792fe9f5e3f59f3c09fbf1d9e053f69167a0f9f256ec82.jpg)
31
+ b)
32
+ c)
33
+ Distribution Guided Sampling
34
+ Figure 1. a) Random sampling from a pretrained DM [40] generates images with gender imbalance. b) Proposed method takes a user-defined reference attribute $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ and performs distribution guidance on a pretrained DM. c) Sampling with distribution guidance results in fair generation that follows user defined $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ .
35
+
36
+ ![](images/7e2051294405f67297e21d1c334ba448f242fa363a7cf8ebd96136aeb62ada9d.jpg)
37
+ Generated Attribute Distribution
38
+
39
+ The problem is largely caused by the images used to train these models, as the outputs of these models are governed by these training datasets [27, 35]. Effects of such harmful biases have been shown by multiple recent works involving studies on DMs [26, 35, 41], GANs and other generative models [6, 14, 27]. In fact, Perera et al. [35] show that unconditional DMs—even when trained with balanced data—amplify racial biases, leading to the generation of more white-skinned faces than dark-skinned ones. Biases in the generated data are even more evident in large text-to-image DMs, e.g., models mostly tend to generate a specific gender with a given profession (like male and doctor) [28, 47, 62].
40
+
41
+ Existing works on debiasing, either require a reference dataset [6, 62] and/or allow retraining of the model [6, 60,
42
+
43
+ 61]. On the contrary, our work aims to mitigate biases in both unconditional and conditional DMs, enabling fair generation without model retraining. We propose a practical setting where we provide a reference attribute distribution $(\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}})$ for sensitive attribute $a$ and query the DM to sample images following $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ . This framework allows for adapting any existing DM to a pre-defined distribution. E.g., a user can provide $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ as a uniform distribution for a given sensitive attribute to generate balanced attribute distribution. We believe defining $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ provides just enough information to condition the DM for fair generation. This is an extremely practical setting for debiasing and is particularly important for large text-to-image DMs [38, 40, 44] where retraining or fine-tuning is computationally intensive.
44
+
45
+ One plausible approach for fair generation is to guide every generated sample with attribute classifiers following classifier guidance [9]. However, such a framework, though simple, is overly restrictive as it requires presetting and enforcing attributes for each sample individually (which we call sample guidance). Such constraints during denoising result in inferior generation quality, as discussed in Sec.4.1. Instead, we propose to jointly denoise a whole batch of samples and guide the process with $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ (which we call distribution guidance). Specifically, we push the generated batch attribute distribution $\mathbf{p}_{\theta}^{\mathrm{a}}$ and $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ close to each other during the reverse process. Distribution guidance provides more flexibility to each sample during generation as it does not enforce a preset of attributes on a sample basis. Intuitively, distribution guidance prioritizes transforming easier samples close to the decision boundary. This results in fair generation without sacrificing the generation quality.
46
+
47
+ A major challenge for guidance-based conditioning is that it requires separate image classifiers for each noise scale of the diffusion process. To overcome this, we propose to perform guidance in a semantically rich feature space - $h$ -space [23] of DMs. Specifically, we train an Attribute Distribution Predictor (ADP) that predicts attribute distribution directly from the $h$ -space features. As ADP is trained on rich and discriminative $h$ -space features, it - a) is implemented as a linear layer; b) requires minimal training data, and c) is fast in training and inference. Finally, during inference, we steer the $h$ -space representation by matching the predictions from ADP to $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ .
48
+
49
+ We extensively evaluate our proposed method for the fair generation of single and multi-attribute cases for face generation. Additionally, we present the results of our method on Stable Diffusion [40], a large text-to-image DM. Further, as downstream application train debiased attribute classifiers by augmenting the training data for minority subgroups.
50
+
51
+ The major contributions of this work are the following:
52
+
53
+ 1. A novel setting for debiasing existing DMs without retraining, given a reference attribute distribution.
54
+ 2. Distribution guidance to condition the reverse diffusion
55
+
56
+ process on a reference attribute distribution.
57
+
58
+ 3. Propose guidance in the intermediate features of diffusion network ( $h$ -space), which leads to data-efficient training and fast generation.
59
+
60
+ # 2. Related Works
61
+
62
+ Biases in Generative Models. While generative models like Generative Adversarial Networks and Diffusion Models have become the de-facto tools for image generation in recent times, studies show that they are not free of biases [3, 15, 26, 27, 35, 41]. Perera et al. [35] show that unconditional diffusion models amplify the biases in the training data with respect to gender, race, and age. Luccioni et al. [26] identify and quantify social biases in images generated by popular text-to-image models like DALL-E 2, and Stable Diffusion v1.4 and 2. Maluleke et al. [27] study racial biases in GANs and find that GANs mimic the racial distribution in the training data.
63
+
64
+ Debiasing generative models by retraining. This line of work focuses on mitigating biases in generative models by retraining them [6, 32, 46, 53, 55, 56, 60, 61]. Some of these works [46, 56, 60] assume knowledge of the labels of the sensitive attribute and then debias the models such that there is no correlation between the decision attribute and the sensitive attribute. IMLE-GAN [61] ensures coverage of minority groups by combining GAN adversarial training with Implicit Maximum Likelihood Estimation (IMLE) [25]. Another body of works employs a balanced unlabelled reference dataset to ensure unbiased generations [6, 53, 55]. Choi et al. [6] use a density-ratio based technique to identify the bias in datasets via the reference dataset, and learn a fair model based on importance reweighting with the help of both the original biased and the reference dataset. To capture the distance between the small reference data and the generated data, Um et al. [55] use the LeCam Divergence [24]. On the other hand, Teo et al. [53] introduce a transfer learning approach to solve this problem by training the model on the biased dataset first and then adapting the model to the reference set.
65
+
66
+ Debiasing generative models without training. As training of GANs and DMs can be resource-consuming, many methods prefer fair generation of images without explicit training [14, 29, 37, 52]. MaGNET [14] aims to produce uniform sampling on the learned manifold of any generative model like GANs or VAEs, while Ramaswamy et al. [37] and Tan et al. [52] manipulate the latent space of GANs to generate balanced outputs. Controlling the generation with latent manipulation is easier in GANs due to its highly disentangled latent spaces in GANs [18, 34, 50]. GANDiff-Face [29], on the other hand, generates balanced synthetic for face recognition by first generating high-quality images from different demographics using GANs and then finetuning Stable Diffusion [40] using DreamBooth [42] to generate more images of such identities with different poses,
67
+
68
+ expressions, etc. Multiple works attempt to mitigate biases in vision-language models and text-conditioned diffusion models as well [4, 7, 49, 62, 64]. Chuang et al. [7] debias the text embedding using a calibrated projection matrix and shows unbiased generations without any additional training or data. However, debiasing the unconditional DMs has received less attention, which is the main focus of this work.
69
+
70
+ Guidance in Diffusion Models. One of the primary techniques to condition the diffusion model is to guide the reverse diffusion model with the gradients of additional network [9]. GLIDE [33] used CLIP [36] based guidance for open world caption to image generation. Guidance is used for image-to-image translation [59], counterfactual generation [45]. However, guidance in its original form requires retraining of guiding networks on the noisy data from scratch. Few works overcome this by learning a mapping function from the diffusion feature space to sketches for sketch-guidance [57] and a universal guidance that repurposes pretrained networks for guidance [1].
71
+
72
+ # 3. Method
73
+
74
+ We assume a setting where we are given a pretrained DM trained on biased data and a reference distribution of the sensitive attributes $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ . Our goal is to generate data from the DM, whose generated attribute distribution $\mathbf{p}_{\theta}^{\mathbf{a}}$ best approximates the reference $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ without retraining. The key idea is to jointly guide the denoising of a batch of samples such that $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}} \approx \mathbf{p}_{\theta}^{\mathbf{a}}$ . Directly computing $\mathbf{p}_{\theta}^{\mathbf{a}}$ in a closed form is intractable. Instead, we train an Attribute Distribution Predictor, a linear projection that maps the intermediate batch features from $h$ -space of a denoising network to an estimate of attribute distribution $\hat{\mathbf{p}}_{\theta}^{\mathbf{a}}$ .
75
+
76
+ # 3.1. Preliminary
77
+
78
+ Diffusion models have emerged as a powerful family of generative models trained to learn the data distribution by gradual denoising from a Gaussian distribution. Starting from a clean point $\mathbf{x_0}$ , and a set of scalar values $\{\alpha_t\}_{t=1}^T$ , applying $\mathbf{t}$ steps of the forward diffusion process yields a noisy data point $\mathbf{x_t}$ , where $\bar{\alpha}_t = \prod_{i=1}^t \alpha_i$ and
79
+
80
+ $$
81
+ \mathbf {x} _ {\mathbf {t}} = \sqrt {\bar {\alpha} _ {t}} \mathbf {x} _ {\mathbf {0}} + (\sqrt {1 - \bar {\alpha} _ {t}}) \epsilon , \epsilon \approx \mathcal {N} (0, I) \tag {1}
82
+ $$
83
+
84
+ A diffusion model is learned as a neural network $\epsilon_{\theta}$ that predicts the noise from given $\mathbf{x_t}$ and $\mathbf{t}$ . The reverse process takes the form $q(\mathbf{x}_{\mathbf{t} - 1}|\mathbf{x}_{\mathbf{t}},\mathbf{x}_0)$ , which is parameterized as a Gaussian distribution. In this work, we consider DDIM [51] sampling which first computes an estimate of the clean data point $\hat{\mathbf{x}}_0$ and then sample $\mathbf{x}_{\mathbf{t} - 1}$ from $q(\mathbf{x}_{\mathbf{t} - 1}|\mathbf{x}_{\mathbf{t}},\hat{\mathbf{x}}_0)$ .
85
+
86
+ Classifier guidance is proposed to condition a diffusion model on class labels with the help of a pretrained classifier [9]. Specifically, a classifier $f_{\phi}(c|\mathbf{x}_{\mathbf{t}},t)$ is trained on noisy images to predict the class label $c$ . The gradients of the classifier are used to guide the diffusion sampling process to generate an image of the prescribed class $c$ . Con
87
+
88
+ cretely, the classifier guidance performs sampling by updating the noise prediction $\epsilon_{\theta}(\mathbf{x}_{\mathbf{t}},\mathbf{t})$ as follows:
89
+
90
+ $$
91
+ \hat {\epsilon} _ {\theta} \left(\mathbf {x} _ {\mathbf {t}}, t\right) = \epsilon_ {\theta} \left(\mathbf {x} _ {\mathbf {t}}, t\right) - \sqrt {1 - \alpha_ {t}} \nabla_ {\mathbf {x} _ {\mathbf {t}}} \log f _ {\phi} (c | \mathbf {x} _ {\mathbf {t}}, t) \tag {2}
92
+ $$
93
+
94
+ # 3.2. Classifier guidance for debiasing
95
+
96
+ A promising approach is to leverage pretrained attribute classifiers to guide towards balanced generation. Assuming a reference attribute distribution $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ for a binary attribute $\mathbf{a}$ (e.g., gender) and corresponding attribute classifier $f_{\phi}$ , we parameterize $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ as a Bernoulli distribution with parameter $\mathbf{r}$ denoting fraction of males samples. To generate samples following $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ , we can randomly select $\mathbf{Nr}$ samples from the batch size of $\mathbf{N}$ and guide them towards male class using predictions from $f_{\phi}$ . Similarly, for the remaining $\mathbf{N}(\mathbf{1} - \mathbf{r})$ samples, we guide them towards female class. In practice, such a sample guidance follows $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ up to some extent, but results in inferior sample quality (see Fig. 3b).
97
+
98
+ Insight.1: Transforming samples close to the decision boundary is easier and results in higher quality generation.
99
+
100
+ Remark: We performed an insightful experiment for changing gender attribute (female to male) using classifier guidance. We group the samples in four quantiles based on their distance from the decision boundary of a pre-trained gender classifier in Fig. 2a). Next, we perform sample-based guidance over the samples from each quantile in Fig. 2b). The samples which are close to the decision boundary (quantiles Q1 and Q2) are easily transformed with guidance, whereas the samples away from the decision boundary (quantiles Q3 and Q4) are distorted during the guidance process. This is also quantified with the quantile-wise FID against a real set (of male images), where the samples from Q1/Q2 have better FID after conversion.
101
+
102
+ ![](images/70e4f989f9cd062577f8ec45a1f6813e07e0302b31f0c5557f11ecc7cc2be3e3.jpg)
103
+
104
+ ![](images/22857221c87a4adedf20b39bdf357c8a63ef4f98544f2115e153534fe18f4190.jpg)
105
+ Figure 2. Distribution of samples w.r.t. decision boundary
106
+
107
+ Insight.2: Attempting to steer the generation of individual samples towards a pre-defined attribute class is overly restrictive and leads to inferior generation quality.
108
+
109
+ Remark: Sample guidance requires enforcing a preset attribute state (male/female) to each sample of a batch during the denoising process, which is too stringent and results in distorting the outputs. This is particularly bad in the case when samples from quantiles Q4 are selected for transformation, as shown in Fig. 2. Given an intermediate timestamp $\tau$ , the samples in the earlier stages $(t > \tau)$ of reverse
110
+
111
+ diffusion process are close to the noise space, resulting in the poor classifier of the corresponding stages. The guidance becomes effective only in the later stage of denoising $(t < \tau)$ . However, till timestep $\tau$ , some facial features are already formed [30]. Hence, enforcing a preset attribute state at later stages $(t < \tau)$ is restrictive and results in sample collapse (Fig. 3-b)). For e.g., for gender attribute, if a sample has formed dominant female features till $\tau$ it will be in Q4, and enforcing it to be male is overly restrictive, resulting in distorted generations.
112
+
113
+ ![](images/56b78a253d29cea1df4049f185a4032f82b4f2f78a564a392047b61bb2b60127.jpg)
114
+ Figure 3. Sample guidance vs Distribution guidance. a) After a few steps of denoising $(t = \tau)$ , the generated samples have learned some discriminative features for gender. Sample guidance randomly selects samples from the batch uniformly from all the quantiles for conversion enforcing samples with dominant female features to also change. However, distribution guidance majorly converts the samples close to the decision boundary (Q1/Q2), which is easy to convert. b) Visualization of the generated samples. Samples transformed with distribution guidance are natural looking without any distortion, whereas images with sample guidance suffer from distortion or unnatural appearance.
115
+
116
+ # 3.3. Distribution Guidance
117
+
118
+ We propose an alternate guidance strategy termed Distribution Guidance for fair generation, which provides more flexibility to modify the attribute states during the denoising process. The key idea is to jointly denoise a batch of samples $\mathbf{x}_{\mathrm{T}}^{[1:\mathrm{N}]}$ , with the guidance from reference attribute distribution $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ . To realize distribution guidance, we define a differentiable distribution prediction function $g_{\psi}$ that maps the batch samples to an estimate of generated attribute distribution $\hat{\mathbf{p}}_{\theta}^{\mathrm{a}}$ . We learn the function $g_{\psi}$ over an intermediate feature space - $h$ -space [23] of denoising network instead of image space for efficiency (Sec.3.4). Hence, the batch estimate is given by, $\hat{\mathbf{p}}_{\theta}^{\mathrm{a}} = g_{\psi}(\mathbf{h}_{\mathrm{t}}^{[1:\mathrm{N}]}, \mathbf{t})$ , where $\mathbf{h}_{\mathrm{t}}^{[1:\mathrm{N}]}$ is the bottleneck U-Net feature representation of the batch samples $\mathbf{x}_{\mathrm{t}}^{[1:\mathrm{N}]}$ . Further, we define a loss function $\mathcal{L}(.)$ that measures the similarity of two distributions. During denoising we guide the batch of samples to bring $\hat{\mathbf{p}}_{\theta}^{\mathrm{a}}$ closer to the reference, i.e., $\mathcal{L}(\hat{\mathbf{p}}_{\theta}^{\mathrm{a}}, \mathbf{p}_{\mathrm{ref}}^{\mathrm{A}}) \approx 0$ . This can be easily integrated into the reverse diffusion process as an extension of classifier guidance by modifying Eq.2.
119
+
120
+ Insight.3: Distribution guidance provides flexibility to batch samples and transforms easier samples close to the decision boundary to match the required distribution.
121
+
122
+ Remark: As distribution guidance does not require presetting for attribute states for each sample, it gives more flexibility during the generation as long as $\hat{\mathbf{p}}_{\theta}^{\mathbf{a}}$ follows $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ . E.g., in our running example of gender attribute, only those samples will change gender after $t = \tau$ , which are close to the decision boundary (in Q1 & Q2). In contrast, in sample guidance, random samples are forced to change the attribute state (Fig. 3) and are spread equally in all four quantiles, resulting in inferior quality. This is quantified by the fraction of samples being transformed $(f_{Qi})$ with each guidance in Fig. 3a), where distribution guidance majorly transforms samples from Q1 & Q2 and results in fair and high-quality samples (Fig. 3b)).
123
+
124
+ A major design decision for implementing distribution guidance is the selection of function $g_{\psi}$ . The conventional approach is to train multiple attribute classifiers at each level of noise in the image space. Instead, we propose to leverage intermediate semantic features from the diffusion model itself and repurpose them for the task of attribute classification. This framework is very efficient as compared to image space classifiers.
125
+
126
+ # 3.4. Guidance in the H-space
127
+
128
+ Diffusion models, although trained for image generation, learn semantically rich representations in the internal features of denoising network $\epsilon_{\theta}$ . These representations have been successfully used for segmentation [2] and classification [31]. Motivated by this, we ask can we use these internal features for training guidance classifiers? To answer this, we take features from the bottleneck layer of the diffusion U-Net, termed as the $h$ -space [23] and attach a linear head for classification. The trained classifiers achieve good classification performance across multiple attributes, as shown in Fig. 5. Hence, we use the trained $h$ -space classifiers to realize $g_{\psi}$ . Specifically, we train a network Attribute Distribution Predictor that maps the batch $h$ -space features to an estimate of attribute distribution $\hat{\mathbf{p}}_{\theta}^{\mathbf{a}}$ .
129
+
130
+ ![](images/09cd85a7ae7980de508dac10e93936fe9982d3c57aa51acdcab6572fbdac3b1d.jpg)
131
+ Figure 4. Distribution guidance in the $h$ -space. For a given batch $\mathbf{x}_{\mathbf{t}}^{[1:N]}$ , we extract the intermediate $h$ -space representation $\mathbf{h}_{\mathbf{t}}^{[1:N]}$ and pass it through ADP to obtain attribute distribution $\hat{\mathbf{p}}_{\theta}^{\mathbf{a}}$ . Guidance updates $\mathbf{h}_{\mathbf{t}}^{[1:N]}$ by backpropagating the derivative of loss.
132
+
133
+ Attribute Distribution Predictor (ADP) is realized via a linear attribute classifier conditioned on the diffusion time step $t$ . Given a batch of generating samples $\mathbf{x}_{\mathbf{t}}^{[1:\mathbf{N}]}$ , we extract the corresponding $h$ -space features $\mathbf{h}_{\mathbf{t}}^{[1:\mathbf{N}]} = \epsilon_{\theta}^{E}(\mathbf{x}_{\mathbf{t}}^{[1:\mathbf{N}]}, \mathbf{t})$ , where $\epsilon_{\theta}^{E}$ is the encoder of U-Net network. Next, we pass the batch $\mathbf{h}_{\mathbf{t}}^{[1:\mathbf{N}]}$ to the attribute classifier and obtain a batch of softmax predictions. The softmax predictions are aggregated per class to obtain the attribute distributions
134
+
135
+ bution estimate $\hat{\mathbf{p}}_{\theta}^{\mathrm{a}}$ . Finally, we update the intermediate $h$ -vectors with the gradients of the distribution matching loss $\mathcal{L}$ , where $\gamma$ is the guidance strength parameter:
136
+
137
+ $$
138
+ \begin{array}{l} \tilde {\mathbf {h}} _ {\mathbf {t}} ^ {[ 1: N ]} = \mathbf {h} _ {\mathbf {t}} ^ {[ 1: N ]} - \gamma * \nabla_ {\mathbf {h} _ {\mathbf {t}} ^ {[ 1: N ]}} \mathcal {L} (\hat {\mathbf {p}} _ {\theta} ^ {\mathbf {a}}, \mathbf {p} _ {\mathrm {r e f}} ^ {\mathbf {a}}) \\ \hat {\mathbf {p}} _ {\theta} ^ {\mathbf {a}} = g _ {\psi} \left(\mathbf {h} _ {\mathbf {t}} ^ {[ \mathbf {1}: \mathbf {N} ]}, \mathbf {t}\right) \\ \end{array}
139
+ $$
140
+
141
+ Finally, we obtain the distribution guided noise predictions $\tilde{\epsilon}$ by passing the $\mathbf{h}_{\mathbf{t}}^{[1:\mathbf{N}]}$ through the U-Net decoder $\epsilon_{\theta}^{D}$ , i.e., $\tilde{\epsilon} (\mathbf{x}_{\mathbf{t}}^{[1:\mathbf{N}]},\mathbf{t}) = \epsilon_{\theta}^{D}(\mathbf{h}_{\mathbf{t}}^{[1:\mathbf{N}]},\mathbf{t})$ . The predicted noise is then used to update batch $x_{t - 1}^{[1:N]}$ using DDIM [51].
142
+
143
+ Insight.4: $H$ -space guidance is extremely effective and efficient as compared to image space guidance.
144
+
145
+ Remark. As compared to conventional image space guidance, guidance in h-space has multiple advantages: i) it requires only a set of linear layers to implement the classifiers, ii) it is fast to backpropagate during guidance as compared to image models, iii) highly data-efficient and can be trained with only a few thousand examples due to semantically rich h-space. In the experiments section, we compare these properties of the h-space guidance.
146
+
147
+ ![](images/e02ceadd179d0ee87693e00142044a588ac4f99b68448aca4d27534450c252fd.jpg)
148
+ Figure 5. Classification accuracy for linear $h$ -space classifiers and ResNet-18 image space classifiers trained on $2K$ training examples. $h$ -space classifiers are data efficient and achieve superior performance even with a linear layer
149
+
150
+ # 4. Experiments
151
+
152
+ In this section, we first discuss the evaluation metrics, followed by the implementation details. We take face datasets as the subject of study, as they are subject to very high demographic biases. We compare our method with other inference time debiasing approaches for both single-attribute and multi-attribute debiasing. Further, we present detailed ablations to validate design choices. Finally, we show the generalization of our debiasing approach on large text-to-image generation model Stable Diffusion [40].
153
+
154
+ # 4.1. Evaluation Metrics
155
+
156
+ A fair generative model is evaluated on two grounds: image quality and fairness. We discuss the metrics used to measure these two aspects of the generated images below.
157
+
158
+ Fairness. The primary goal of this paper is to generate images based on the reference distribution $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ . We follow Choi et al. [6] to define the Fairness Discrepancy (FD) metric. Given an attribute $\mathbf{a}$ , we assume access to a high-accuracy classifier for $\mathbf{a}$ (denoted as $\mathcal{C}_a$ ), and using the predictions from the latter, we compute the following [6, 53]:
159
+
160
+ $$
161
+ | | \bar {p} - \mathbb {E} _ {\mathbf {x} \sim p _ {\theta} (\mathbf {x})} (\mathbf {y}) | | _ {2}
162
+ $$
163
+
164
+ where $\mathbf{y}$ is the softmax output of the classifier $\mathcal{C}_a(\mathbf{x})$ , $\bar{p}$ is a uniform vector of the same dimension as $\mathbf{y}$ , $p_{\theta}$ is the distribution of the generated images. The lower the FD score, the closer the distribution of the attribute values is to the uniform distribution - i.e., the generated images are fairer with respect to attribute $\mathbf{a}$ .
165
+
166
+ Image Quality. To measure the quality of generation, we follow the standard Fréchet Inception Distance (FID) [12]. We compute this metric with the help of an attribute-wise balanced dataset sampled from the original training data.
167
+
168
+ # 4.2. Implementation Details
169
+
170
+ Training h-space classifiers. We start with creating a paired dataset $\mathcal{D}_{clf}^{h}$ of h-vector and attribute labels. Specifically, we take a subset $\mathcal{D}$ of the CelebA-HQ [20] dataset and obtain attribute labels for the images using the pretrained attribute classifier $\mathcal{C}_a$ . Next, we embed image $\mathbf{I}^{\mathrm{i}} \in \mathbf{D}$ to obtain the corresponding h-space representation $\mathcal{H}^{\mathrm{i}} = \{\mathbf{h}_{\mathbf{t}}^{\mathrm{i}}\}_{t=0}^{T}$ using DDIM [51] inversion. This yields a labelled dataset $\mathcal{D}_{clf}^{h}$ with pairs $(\mathcal{H}^{\mathrm{i}}, \mathbf{y}^{\mathrm{i}})$ , where $\mathbf{y}^{\mathrm{i}} = \mathcal{C}_a(I^{\mathrm{i}})$ is the predicted attribute label for image $\mathbf{I}^{\mathrm{i}}$ (e.g. male/female). Next, we train h-space attribute classifiers $\mathcal{C}_a^h(\mathbf{h}_{\mathbf{t}}, t)$ as a linear head over $\mathbf{h}_{\mathbf{t}}$ and conditioned on time $t$ (ignored sample index $i$ for brevity). These obtained classifiers generate high-accuracy attribute predictions, as shown in Fig. 5. Further details about the dataset and classifiers are provided in Sec.D of supplement.
171
+
172
+ Distribution guidance. To perform distribution guidance, we realize the distribution prediction function, ADP with $h$ -space classifiers $\mathcal{C}_a^h$ . Specifically for a generating batch $\mathbf{x}_{\mathbf{t}}^{[1:\mathbf{N}]}$ , we obtain the corresponding $h$ -space representation $\mathbf{h}_{\mathbf{t}}^{[1:\mathbf{N}]}$ and obtain a set of attribute predictions $\hat{\mathbf{y}}_{\mathbf{t}}^{[1:\mathbf{N}]}$ from classifiers $\mathcal{C}_a^h$ . Finally, we add all the softmax values for all the $N$ images in the batch for each class to obtain the estimate of $\mathbf{p}_{\theta}^{\mathbf{a}}$ . We use Chi-square distance as the loss $\mathcal{L}$ with the reference distribution $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ .
173
+
174
+ Diffusion Model Architecture. We evaluated our proposed debiasing method on two state-of-the-art pretrained DMs: a) an unconditional DM, P2 [5], trained on CelebA-HQ dataset, and b) a text conditional DM StableDiffusion v1.5 [40] trained on the LAION [48] dataset. Both these models have exceptional image generation quality; however, they have significant bias concerning the sensitive face attributes, as shown in the following subsections.
175
+
176
+ ![](images/7d68a30fc41d6289d2b6a2226c07e075bef1025d243bbcc73b6969c01db16e17.jpg)
177
+ Figure 6. Balancing generated data across gender and eyeglass attributes with various guidance strategies. Minority group male and eyeglasses are marked. Image-space Universal Guidance and Sample Guidance results generate imbalanced and poor-quality images. For gender, Sample Guidance is able to generate only a few males while maintaining quality. Latent Editing produces collapsed images for gender and fails to generate eyeglasses. The proposed Distribution Guidance balances (close to the ratio of $0.50:0.50$ ) both the attributes and generates high-quality images.
178
+
179
+ # 4.3. Baselines
180
+
181
+ We compare our proposed method against two techniques for guidance-based generations for DMs, one Latent-based editing method [23], and a state-of-the-art sampling-based technique for debiasing generative models, MagNet [14].
182
+
183
+ Sample guidance. We use classifier guidance as explained in Sec. 3.2 in the $h$ -space using the trained $h$ -space attribute classifiers $\mathcal{C}_a^h$ . Such guidance requires presetting an attribute state for each generating image and pushing the trajectory to change the attribute state.
184
+
185
+ Universal guidance [1] performs guidance in the image space but uses pretrained classifiers trained on the clean image. This resolves the additional requirement of training image classifiers on noisy images. The key idea is to use the DDIM scheduler and predict the approximation of $\hat{\mathbf{x}}_0$ from noise image $\mathbf{x_t}$ , and pass it through pretrained image classifier $\mathcal{C}_A$ . However, this process has two shortcomings: it is slow as it backpropagates the gradients from the image classifier (Experimentally, we found it to be 7 times slower than $h$ -space guidance) and it performs poorly at the early stage due to an inaccurate approximation of $\mathbf{x}_0$ . We use two settings where we vary the number of images in the training set of the image space attribute classifier: (1) Using the same training set $|\mathcal{D}_{clf}| = 2K$ as the $h$ -space classifier and (2) Using the entire CelebA-HQ dataset, i.e. $30k$ images.
186
+
187
+ Latent-based Editing [23] generates images with a specific set of attributes. Such a technique is popularly used in debiasing GANs [17, 37] because of the well-known disentangled latent spaces of GANs that allow for such edits. Recent works have shown [23] that similar semantic control is also present in the $h$ -space of DMs and can be used for latent-based editing. We capitalize on this finding and perform latent editing in the $h$ -space to generate images of desired attributes for fair generation.
188
+
189
+ MagNet [14] is an unsupervised method enabling fair sam
190
+
191
+ Table 1. Evaluation of balanced generation for single attribute
192
+
193
+ <table><tr><td></td><td colspan="2">Gender</td><td colspan="2">Race</td><td colspan="2">Eyeglasses</td></tr><tr><td>Method</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td></tr><tr><td>Random Sampling</td><td>0.178</td><td>54.59</td><td>0.334</td><td>60.01</td><td>0.251</td><td>75.21</td></tr><tr><td>Universal Guidance (2k) [1]</td><td>0.193</td><td>52.10</td><td>0.377</td><td>93.42</td><td>0.189</td><td>64.55</td></tr><tr><td>Universal Guidance (30k) [1]</td><td>0.127</td><td>48.94</td><td>0.326</td><td>58.52</td><td>0.051</td><td>78.57</td></tr><tr><td>Latent Editing [23]</td><td>0.001</td><td>37.40</td><td>0.214</td><td>42.69</td><td>0.330</td><td>75.04</td></tr><tr><td>H-Sample Guidance (ours)</td><td>0.113</td><td>51.46</td><td>0.184</td><td>56.53</td><td>0.118</td><td>57.63</td></tr><tr><td>H-Distribution Guidance (ours)</td><td>0.049</td><td>50.27</td><td>0.113</td><td>52.38</td><td>0.014</td><td>51.78</td></tr><tr><td>StyleGAN2 - Random sampling</td><td>0.307</td><td>112.28</td><td>0.463</td><td>123.97</td><td>0.276</td><td>117.83</td></tr><tr><td>StyleGAN2 - Magnet [14]</td><td>0.267</td><td>91.15</td><td>0.454</td><td>97.05</td><td>0.281</td><td>106.55</td></tr></table>
194
+
195
+ pling from a pretrained model. They propose a method for uniform sampling on the image manifold to generate underrepresented groups equally. We generated results by MagNet sampling from a StyleGAN2 [22] model trained on the FFHQ [21] dataset from their official codebase. Notably, as the base model for MagNet is StyleGAN2, we cannot directly compare FIDs with our DM debiased results and report random generations from StyleGAN2 as a reference.
196
+
197
+ # 4.4. Main Results
198
+
199
+ We first present the quantitative and qualitative results on debiasing single binary attributes. Second, we debias multiple attributes simultaneously. We finally present the case of debiasing attributes with multi-class labels.
200
+
201
+ Quantitative evaluation. We evaluate our debiasing method for the single attribute case by generating balanced generations of individual sensitive attributes - gender, eyeglasses, and race, in Tab. 1. As these attributes are binary, the synthesized images are expected to have a $1:1$ ratio of the sensitive attributes (for e.g. 0.50 fraction of males and 0.50 fraction of females in case of gender). Specifically, we generate $10K$ images from each method per attribute and compute the metrics defined in Sec. 4.1. For most attributes, the proposed guidance method outperforms all the baselines in terms of visual quality as measured by FID and
202
+
203
+ Table 2. Evaluation of balanced generation for multiple attribute
204
+
205
+ <table><tr><td></td><td colspan="2">Gender + Race</td><td colspan="2">Eyeglasses + Race</td><td colspan="2">Gender + Eyeglasses</td></tr><tr><td>Method</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td></tr><tr><td>Random Sampling</td><td>0.256</td><td>60.68</td><td>0.292</td><td>89.14</td><td>0.214</td><td>70.97</td></tr><tr><td>Latent Editing [23]</td><td>0.124</td><td>64.84</td><td>0.219</td><td>90.63</td><td>0.230</td><td>74.93</td></tr><tr><td>Universal Guidance (2k) [1]</td><td>0.283</td><td>71.84</td><td>0.264</td><td>91.54</td><td>0.157</td><td>80.57</td></tr><tr><td>H-Sample Guidance (ours)</td><td>0.241</td><td>59.78</td><td>0.135</td><td>67.87</td><td>0.079</td><td>52.03</td></tr><tr><td>H-Distribution Guidance (ours)</td><td>0.075</td><td>49.91</td><td>0.101</td><td>57.46</td><td>0.057</td><td>47.45</td></tr></table>
206
+
207
+ bias metric measured by FD. Although Latent editing has a better FID and FD for gender, on qualitative evaluation (as elaborated in the next section), artifacts are seen in the images (Fig. 6). Moreover, this methods fails to mitigate bias in case of multiple attributes (Tab. 3). Sample guidance achieves comparable FID for gender; however, higher FD indicates inferior debiasing. The tradeoff between FD and FID is discussed in Sec.A.1 of supplement. This supports our thesis that the distribution guidance provides enough flexibility during generation, resulting in high-quality outputs even with a high guidance scale.
208
+
209
+ Qualitative evaluation. We present results for balancing gender and eyeglasses attributes in Fig. 6. We randomly sample 20 starting noise and use individual guidance methods for debiasing. Without guidance, the DM mostly generates female faces for the gender attribute. Although latent editing achieved better quantitative metrics, it produces images with artifacts and leads to collapsed results. Although Sample and Universal guidance increase the number of males, some images collapse. On the other hand, distribution guidance generates an almost equal number of males and females without affecting the generation quality. Moreover, all the baselines are not able to generate eyeglasses, whereas our method leads to highly balanced generations.
210
+
211
+ Multiple attributes. We apply our method for debiasing multiple attributes simultaneously in Tab. 3. Specifically, given two reference distributions $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}_1}$ and $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}_2}$ , we add guidance from two pretrained attribute distribution predictors $g_{\psi_1}$ and $g_{\psi_2}$ . For this experiment, we define both the reference distribution as uniform for each attribute (50%-50% splits). The generated results follow the reference, resulting in a balanced generation across attributes. Further analysis is provided in Sec.B of supplement.
212
+
213
+ Multi Class attributes. We evaluate the efficacy of our approach in balancing multi-class attributes - age and race in Tab. 3. We use FFHQ [21] dataset for both attributes and obtained annotations using pretrained models as ground truth labels are unavailable. For age attribute, we use a pretrained VIT age classifier [39] to produce 3 classes: Young (< 20 yrs), Adult (20 - 60 yrs) and Old (> 60 yrs). For race, we use the Fairface race classifier [16] to obtain 4 classes: White, Black, Asian, and Indian. Our method successfully debiases multi-class attributes and beats the random and sample guidance in all cases.
214
+
215
+ # 4.5. Generating Imbalanced Distributions
216
+
217
+ We test our distribution guidance for generating imbalanced attribute distribution by providing skewed $\mathbf{p}_{\mathrm{ref}}^{\mathbf{a}}$ - i) 0.20 female and 0.80 male ii) 0.10 white race and 0.90 black race. These two settings are extremely challenging given that male and black race are minority groups in the
218
+
219
+ Table 3. Balanced generation for multi-class attribute
220
+
221
+ <table><tr><td></td><td colspan="2">Age (3 classes)</td><td colspan="2">Race (4 classes)</td></tr><tr><td>Method</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td></tr><tr><td>Random Sampling</td><td>0.256</td><td>60.68</td><td>0.292</td><td>89.14</td></tr><tr><td>H-Sample Guidance</td><td>0.124</td><td>64.84</td><td>0.219</td><td>90.63</td></tr><tr><td>H-Distribution Guidance</td><td>0.283</td><td>71.84</td><td>0.264</td><td>91.54</td></tr></table>
222
+
223
+ Table 4. Distribution guidance for imbalanced generation
224
+
225
+ <table><tr><td></td><td colspan="2">0.20F - 0.80M</td><td colspan="2">0.10W - 0.90B</td></tr><tr><td>Method</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td></tr><tr><td>Random Sampling</td><td>0.478</td><td>72.26</td><td>0.734</td><td>77.63</td></tr><tr><td>H-Distribution Sampling</td><td>0.168</td><td>51.65</td><td>0.325</td><td>53.80</td></tr></table>
226
+
227
+ dataset. We present qualitative results for both the settings in Fig.7, where our distribution guidance is able to generate the defined distribution with majority males. Note, we have binarized the race attribute as black and whites for simplicity, and hence in the generation, brown race is considered under black category. We report quantitative metrics in Tab. 4, where the proposed method can achieve good FD scores with the same FID. Additional experimental results are tabulated in Sec.B.1 of supplement.
228
+
229
+ ![](images/8db3acfb140a17da6b025a6a6b9be5d6cdda84d8c5bb1fd8b2709c31cd0a1724.jpg)
230
+ Figure 7. Generating imbalanced data with two non-uniform reference distribution - i) 0.9B-0.1W, ii) 0.8M-0.2F. Minority classes are marked. Distribution guidance generates high-quality images and closely follows the skewed reference distribution.
231
+
232
+ # 4.6. Ablations
233
+
234
+ We provide ablation over the batch size here and that over the guidance scale, $h$ -space classifier architectures and number of training examples in Sec.A of supplement. Batch size. As we approximate $\mathbf{p}_{\theta}^{\mathbf{a}}$ with an estimate $\hat{\mathbf{p}}_{\theta}^{\mathbf{a}}$ over a batch of size $N$ , we ablate over different values of $N$ in Tab. 5. Intuitively, using a larger batch size yields a better estimate of $\mathbf{p}_{\theta}^{\mathbf{a}}$ . We have found that the $N = 100$ works best for our experiments, balancing both FD and FID. Additionally, our model can also handle low data regime effectively.
235
+
236
+ Table 5. Ablation over batch size for gender balancing.
237
+
238
+ <table><tr><td>Batch size</td><td>2</td><td>4</td><td>8</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>125</td><td>150</td><td>200</td></tr><tr><td>FD ↓</td><td>0.108</td><td>0.088</td><td>0.073</td><td>0.062</td><td>0.059</td><td>0.049</td><td>0.059</td><td>0.0046</td><td>0.052</td><td>0.053</td><td>0.058</td></tr><tr><td>FID ↓</td><td>60.12</td><td>51.86</td><td>49.80</td><td>50.54</td><td>51.51</td><td>50.86</td><td>50.98</td><td>51.64</td><td>49.91</td><td>49.81</td><td>49.74</td></tr></table>
239
+
240
+ ![](images/c775d6701d67f2335e35db77b989348588b3a1a1ed204d334a3203dc5da07c07.jpg)
241
+ Figure 8. Debiasing results on stable diffusion for gender. Distribution guidance can balance the gender attributes in prompts involving other professions e.g. firefighter and doctor.
242
+
243
+ # 4.7. Debiasing Text-to-Image Diffusion Models
244
+
245
+ We implemented our distribution guidance technique for debiasing a text-to-image generation model, Stable Diffusion (SD) v1.5 [40] concerning the gender attribute in this subsection. We provide results on other attributes and on mitigating spurious correlation on WaterBirds [43] generation in Sec.C of supplement. First, we generate a dataset $\mathcal{D}$ , of $10K$ images from SD, with prompts 'a photo of a male' and 'a photo of a female' to generate a labeled dataset for training $h$ -space classifiers. If the $h$ -space classifier is trained on CelebA-HQ dataset, guidance is ineffective due to a significant domain shift from the SD generations. Next, we obtain the corresponding labeled dataset $\mathcal{D}_{clf}^{h}$ in the $h$ -space and train a gender classifier in the $h$ -space $h$ -space. We used the trained classifier for distribution guidance following Sec. 3.4 to generate images with balanced gender.
246
+
247
+ It is observed that SD increases the gender bias when queried to generate certain professions (e.g. male and doctor) [62]. To this end, we implement our distribution guidance along the prompts 'a photo of a doctor' and 'a photo of a firefighter' to evaluate the effectiveness in this challenging setting. The qualitative results are shown in Fig.8, and the quantitative results are reported in Tab. 6.
248
+
249
+ Baselines. We compare our method with the following methods. (1) Random sampling from SD [40]. (2) ITI-Gen [62] that learns prompt embeddings for each category of the attribute given image reference sets of each category. It then appends these prompts during generation to produce balanced images. (3) Fair Diffusion [10] that uses a lookup table to recognize the biased concept from the text input and adds scaled attribute expressions to the prompt. Note, these baselines are explicitly designed to debias text-conditioned diffusion model; however, our method can debias both conditional and unconditional diffusion models.
250
+
251
+ Table 6. Balancing gender on Stable Diffusion [40] model.
252
+
253
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Gender</td><td colspan="2">Doctor</td><td colspan="2">Firefighter</td></tr><tr><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td><td>FD ↓</td><td>FID ↓</td></tr><tr><td>Random Sampling</td><td>0.317</td><td>72.37</td><td>0.355</td><td>70.11</td><td>0.235</td><td>71.86</td></tr><tr><td>ITI-Gen [62]</td><td>0.049</td><td>64.79</td><td>0.072</td><td>67.81</td><td>0.184</td><td>70.12</td></tr><tr><td>Fair Diffusion [10]</td><td>0.227</td><td>71.22</td><td>0.035</td><td>74.37</td><td>0.036</td><td>68.33</td></tr><tr><td>H-Sample Guidance (ours)</td><td>0.026</td><td>70.96</td><td>0.021</td><td>68.43</td><td>0.097</td><td>70.42</td></tr><tr><td>H-Distribution Guidance (ours)</td><td>0.024</td><td>70.69</td><td>0.015</td><td>67.36</td><td>0.093</td><td>69.41</td></tr></table>
254
+
255
+ Table 7. Group-wise accuracies of identical classifiers trained on existing and generated (balanced) data
256
+
257
+ <table><tr><td></td><td>Black female</td><td>White female</td><td>Black male</td><td>White male</td></tr><tr><td>Vanilla classifier</td><td>75.76</td><td>98.96</td><td>70.29</td><td>99.93</td></tr><tr><td>Balanced classifier</td><td>91.04</td><td>97.33</td><td>90.72</td><td>97.68</td></tr></table>
258
+
259
+ # 4.8. Class-imbalance in attribute classification
260
+
261
+ We explore an important downstream application of our proposed approach in balancing minority classes by augmenting the under-represented classes with generated data. Specifically, we train a race classifier (labels obtained from $\mathcal{C}_{\mathcal{A}}$ ) on the CelebA-HQ [19] dataset. The race classifier is an ImageNet [8]-pretrained ResNet-18 [11] encoder, followed by 2 MLP layers and a classifier. We manually oversample the Whites and undersample the Blacks in the training dataset such that the imbalanced dataset consists of $10k$ samples of white people and $1k$ black people (we keep the genders balanced within a race class). Consequently, the model performs poorly on the minority class (i.e., Black) due to under-representation. Next, we augment (class-balanced) the training data by generating samples whose distribution is inversely proportional to the class counts in the training set to increase images of minority classes using our distribution guidance approach. This adds 9k images of only Black race, and the classifier trained on this balanced data performs significantly better (Tab. 7). Even when gender is balanced in both the classes, we observe a significant disparity in the accuracies for Male and Female Black samples in the vanilla classifier. However, our proposed method helps reduce the accuracy gap between Black males and Black females. This shows a potential application in generating 'class' balanced datasets to train models for other downstream tasks, which can also mitigate bias.
262
+
263
+ # 5. Discussion
264
+
265
+ Limitations. Although our method performs guidance in the $h$ -space, which is efficient compared to the image space guidance, it still requires additional training of $h$ -space classifiers. Another limitation is reliance on accurate attribute classifiers to obtain labels for training $h$ -space classifiers.
266
+
267
+ Future works. An important future work is extending distribution guidance beyond de-biasing for controlled generation and data augmentation. In the context of debiasing DMs, extending the proposed approach without needing an attribute classifier or labeled data.
268
+
269
+ # 6. Conclusion
270
+
271
+ In this work, we aim to mitigate biases from pretrained diffusion models without retraining - given only a desired reference attribute distribution. We propose a novel approach leveraging distribution guidance that jointly guides a batch of images to follow the reference attribute distribution. The proposed method is effective and results in both high-quality and fair generations across multiple attributes and outperforms sample guidance strategies based on conditioning each sample individually. Extensive experiments demonstrate the effectiveness of our method in balancing both single and multiple attributes on unconditional DMs and conditional text-to-image diffusion models. We believe such a setting of debiasing without retraining is practical, especially in today's era of large-scale generative models.
272
+
273
+ Acknowledgements. This work was supported by the Kotak IISc AIML Centre (KIAC) and Meesho. Rishubh Parihar and Abhipsa Basu are supported by PMRF fellowship.
274
+
275
+ # References
276
+
277
+ [1] Arpit Bansal, Hong-Min Chu, Avi Schwarzschild, Soumyadip Sengupta, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Universal guidance for diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 843-852, 2023. 3, 6, 7
278
+ [2] Dmitry Baranchuk, Ivan Rubachev, Andrey Voynov, Valentin Khrulkov, and Artem Babenko. Label-efficient semantic segmentation with diffusion models. arXiv preprint arXiv:2112.03126, 2021. 4
279
+ [3] Abhipsa Basu, R Venkatesh Babu, and Danish Pruthi. Inspecting the geographical representativeness of images from text-to-image models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5136-5147, 2023. 2
280
+ [4] Hugo Berg, Siobhan Hall, Yash Bhalgat, Hannah Kirk, Aleksandar Shtedritski, and Max Bain. A prompt array keeps the bias away: Debiasing vision-language models with adversarial learning. In Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 806-822, Online only, 2022. Association for Computational Linguistics. 3
281
+ [5] Jooyoung Choi, Jungbeom Lee, Chaehun Shin, Sungwon Kim, Hyunwoo Kim, and Sungroh Yoon. Perception prioritized training of diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11472-11481, 2022. 5
282
+ [6] Kristy Choi, Aditya Grover, Trisha Singh, Rui Shu, and Stefano Ermon. Fair generative modeling via weak supervision. In International Conference on Machine Learning, pages 1887-1898. PMLR, 2020. 1, 2, 5
283
+ [7] Ching-Yao Chuang, Varun Jampani, Yuanzhen Li, Antonio Torralba, and Stefanie Jegelka. Debiasing vision
284
+
285
+ language models via biased prompts. arXiv preprint arXiv:2302.00070, 2023. 3
286
+ [8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 8
287
+ [9] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 3
288
+ [10] Felix Friedrich, Manuel Brack, Lukas Struppek, Dominik Hintersdorf, Patrick Schramowski, Sasha Luccioni, and Kristian Kersting. Fair diffusion: Instructing text-to-image generation models on fairness. arXiv preprint arXiv:2302.10893, 2023. 8
289
+ [11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 8, 12, 14
290
+ [12] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5
291
+ [13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 1
292
+ [14] Ahmed Imtiaz Humayun, Randall Balestriero, and Richard Baraniuk. Magnet: Uniform sampling from deep generative network manifolds without retraining. In International Conference on Learning Representations, 2021. 1, 2, 6
293
+ [15] Niharika Jain, Alberto Olmo, Sailik Sengupta, Lydia Manikonda, and Subbarao Kambhampati. Imperfect imaganation: Implications of gans exacerbating biases on facial data augmentation and chat selfie lenses. arXiv preprint arXiv:2001.09528, 2020. 2
294
+ [16] Jungseock Joo. Fairface attribute model. https://github.com/jojo/s/fairface, 2021.7
295
+ [17] Cemre Efe Karakas, Alara Dirik, Eylül Yalçınkaya, and Pinar Yanardag. Fairstyle: Debiasing stylegan2 with style channel manipulations. In European Conference on Computer Vision, pages 570-586. Springer, 2022. 6
296
+ [18] Tejan Karmali, Rishubh Parihar, Susmit Agrawal, Harsh Rangwani, Varun Jampani, Maneesh Singh, and R Venkatesh Babu. Hierarchical semantic regularization of latent spaces in stylegans. In European Conference on Computer Vision, pages 443-459. Springer, 2022. 2
297
+ [19] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196, 2017. 8, 13, 14
298
+ [20] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of GANs for improved quality, stability, and variation. In International Conference on Learning Representations, 2018. 5, 13
299
+ [21] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 6, 7
300
+
301
+ [22] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 6
302
+ [23] Mingi Kwon, Jaeseok Jeong, and Youngjung Uh. Diffusion models already have a semantic latent space. arXiv preprint arXiv:2210.10960, 2022. 2, 4, 6, 7
303
+ [24] Lucien Le Cam. Asymptotic methods in statistical decision theory. Springer Science & Business Media, 2012. 2
304
+ [25] Ke Li and Jitendra Malik. Implicit maximum likelihood estimation. arXiv preprint arXiv:1809.09087, 2018. 2
305
+ [26] Sasha Luccioni, Christopher Akiki, Margaret Mitchell, and Yacine Jernite. Stable bias: Evaluating societal representations in diffusion models. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. 1, 2
306
+ [27] Vongani H Maluleke, Neerja Thakkar, Tim Brooks, Ethan Weber, Trevor Darrell, Alexei A Efros, Angjoo Kanazawa, and Devin Guillery. Studying bias in gans through the lens of race. In European Conference on Computer Vision, pages 344-360. Springer, 2022. 1, 2
307
+ [28] Abhishek Mandal, Susan Leavy, and Suzanne Little. Measuring bias in multimodal models: Multimodal composite association score. In International Workshop on Algorithmic Bias in Search and Recommendation, pages 17-30. Springer, 2023. 1
308
+ [29] Pietro Melzi, Christian Rathgeb, Ruben Tolosana, Ruben Vera-Rodriguez, Dominik Lawatsch, Florian Domin, and Maxim Schaubert. Gandifface: Controllable generation of synthetic datasets for face recognition with realistic variations. arXiv preprint arXiv:2305.19962, 2023. 1, 2
309
+ [30] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2022. 4
310
+ [31] Soumik Mukhopadhyay, Matthew Gwilliam, Vatsal Agarwal, Namitha Padmanabhan, Archana Swaminathan, Srinidhi Hegde, Tianyi Zhou, and Abhinav Shrivastava. Diffusion models beat gans on image classification. arXiv preprint arXiv:2307.08702, 2023. 4
311
+ [32] Junhyun Nam, Sangwoo Mo, Jaeho Lee, and Jinwoo Shin. Breaking the spurious causality of conditional generation via fairness intervention with corrective sampling. Transactions on Machine Learning Research, 2023. 2
312
+ [33] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 3
313
+ [34] Rishubh Parihar, Ankit Dhiman, and Tejan Karmali. Everything is there in latent space: Attribute editing and attribute style manipulation by stylegan latent space exploration. In Proceedings of the 30th ACM International Conference on Multimedia, pages 1828-1836, 2022. 2
314
+
315
+ [35] Malsha V Perera and Vishal M Patel. Analyzing bias in diffusion-based face generation models. arXiv preprint arXiv:2305.06402, 2023. 1, 2
316
+ [36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 3, 14
317
+ [37] Vikram V Ramaswamy, Sunnie SY Kim, and Olga Russakovsky. Fair attribute classification through latent space de-biasing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9301-9310, 2021. 2, 6
318
+ [38] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 2
319
+ [39] Nate Raw. Vit attribute model. https://huggingface.co/nateraw/vit-age-classifier, 2021. 7
320
+ [40] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 2, 5, 8, 13
321
+ [41] Harrison Rosenberg, Shimaa Ahmed, Guruprasad V Ramesh, Ramya Korlakai Vinayak, and Kassem Fawaz. Unbiased face synthesis with diffusion models: Are we there yet? arXiv preprint arXiv:2309.07277, 2023. 1, 2
322
+ [42] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 2
323
+ [43] Shiori Sagawa, Pang Wei Koh, Tatsunori B Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. arXiv preprint arXiv:1911.08731, 2019. 8, 13
324
+ [44] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 2
325
+ [45] Pedro Sanchez and Sotirios A Tsaftaris. Diffusion causal models for counterfactual estimation. In Conference on Causal Learning and Reasoning, pages 647-668. PMLR, 2022. 3
326
+ [46] Prasanna Sattigeri, Samuel C Hoffman, Vijil Chenthamarakshan, and Kush R Varshney. Fairness gan: Generating datasets with fairness properties using a generative adversarial network. IBM Journal of Research and Development, 63 (4/5):3-1, 2019. 2
327
+ [47] Patrick Schramowski, Manuel Brack, Björn Deiseroth, and Kristian Kersting. Safe latent diffusion: Mitigating inappro
328
+
329
+ priate degeneration in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22522-22531, 2023. 1
330
+ [48] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 5
331
+ [49] Ashish Seth, Mayur Hemani, and Chirag Agarwal. Dear: Debiasing vision-language models with additive residuals. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6820-6829, 2023. 3
332
+ [50] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9243-9252, 2020. 2
333
+ [51] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2020. 1, 3, 5, 14
334
+ [52] Shuhan Tan, Yujun Shen, and Bolei Zhou. Improving the fairness of deep generative models without retraining. arXiv preprint arXiv:2012.04842, 2020. 2
335
+ [53] Christopher TH Teo, Milad Abdollahzadeh, and Ngai-Man Cheung. Fair generative models via transfer learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2429-2437, 2023. 2, 5
336
+ [54] Brandon Trabucco, Kyle Doherty, Max Gurinas, and Ruslan Salakhutdinov. Effective data augmentation with diffusion models. arXiv preprint arXiv:2302.07944, 2023. 1
337
+ [55] Soobin Um and Changho Suh. A fair generative model using lecam divergence. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 10034-10042, 2023. 2
338
+ [56] Boris van Breugel, Trent Kyono, Jeroen Berrevoets, and Michaela van der Schaar. Decaf: Generating fair synthetic data using causally-aware generative networks. Advances in Neural Information Processing Systems, 34:22221-22233, 2021. 2
339
+ [57] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. In ACM SIGGRAPH 2023 Conference Proceedings, pages 1–11, 2023. 3
340
+ [58] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The Caltech-UCSD Birds-200-2011 Dataset. 2011. 13
341
+ [59] Julia Wolleb, Florentin Bieder, Robin Sandkuhler, and Philippe C Cattin. Diffusion models for medical anomaly detection. In International Conference on Medical image computing and computer-assisted intervention, pages 35-45. Springer, 2022. 3
342
+ [60] Depeng Xu, Shuhan Yuan, Lu Zhang, and Xintao Wu. Fairgan: Fairness-aware generative adversarial networks. In 2018 IEEE International Conference on Big Data (Big Data), pages 570-575. IEEE, 2018. 1, 2
343
+ [61] Ning Yu, Ke Li, Peng Zhou, Jitendra Malik, Larry Davis, and Mario Fritz. Inclusive gan: Improving data and minority coverage in generative models. In Computer Vision-
344
+
345
+ ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXII 16, pages 377-393. Springer, 2020. 2
346
+ [62] Cheng Zhang, Xuanbai Chen, Siqi Chai, Chen Henry Wu, Dmitry Lagun, Thabo Beeler, and Fernando De la Torre. Itigen: Inclusive text-to-image generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3969-3980, 2023. 1, 3, 8
347
+ [63] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Antonio Torralba, and Aude Oliva. Places: An image database for deep scene understanding. arXiv preprint arXiv:1610.02055, 2016. 13
348
+ [64] Beier Zhu, Yulei Niu, Saeil Lee, Minhoe Hur, and Hanwang Zhang. Debiased fine-tuning for vision-language models by prompt regularization. arXiv preprint arXiv:2301.12429, 2023. 3
349
+
350
+ # Appendix
351
+
352
+ # Contents
353
+
354
+ A Ablation Experiments 12
355
+
356
+ A.1.Guidance Strength $\gamma$ 12
357
+ A.2 Data efficiency of h-space classifiers 12
358
+ A.3.H-space classifier architectures 12
359
+
360
+ B Multi-attribute Debiasing 12
361
+
362
+ B.1. Generalization to imbalanced distributions 13
363
+
364
+ C Debiasing text-to-image Diffusion Model 13
365
+
366
+ C.1. Social biases in facial attributes 13
367
+ C.2. Background bias in bird generation 13
368
+
369
+ D Implementation details 13
370
+
371
+ D.1.h-space classifiers 13
372
+ D.2 Evaluation Metrics 14
373
+
374
+ # A. Ablation Experiments
375
+
376
+ # A.1. Guidance Strength $\gamma$
377
+
378
+ We ablate over guidance strength parameter $\gamma$ for both the distribution guidance and sample guidance in Fig. 9. Increasing $\gamma$ reduces the bias (better FD) at the cost of inferior image quality (increased FID). Guidance strength 1500 achieved a good tradeoff between image quality and balancing attributes. Further, as compared to sample guidance, the distribution guidance achieves a better tradeoff for all the guidance strengths.
379
+
380
+ ![](images/d3f2151dda014b238f14cc4545b6af08d32a9766b26db42233e1362efb60d33d.jpg)
381
+ Figure 9. Ablation over guidance strength $\gamma$
382
+
383
+ # A.2. Data efficiency of h-space classifiers
384
+
385
+ We ablate over the number of training examples used to train the $h$ -space classifiers in Fig. 10. Specifically, we train
386
+
387
+ Table 8. Ablation over the classifier architecture for h-space classification
388
+
389
+ <table><tr><td>Classifier</td><td>Avg. accuracy</td><td>Guidance time ↓</td><td># Parameters</td></tr><tr><td>Linear</td><td>0.921</td><td>1.0x</td><td>3.2M</td></tr><tr><td>CNN</td><td>0.908</td><td>2.5x</td><td>74M</td></tr><tr><td>MLP</td><td>0.952</td><td>2.0x</td><td>200M</td></tr></table>
390
+
391
+ a gender-attribute classifier in image space and h-space using the same number of images. We use ResNet50 [11] as the image classifier and a linear head on top of the $h$ -features as the $h$ -space classifier. We pass the estimate $\hat{x}_0$ of the clean image at timestep $t$ to the image classifier to obtain the prediction. For $h$ -space classification, we pass the $h_t$ to the linear head. The $h$ -space classifiers are extremely data efficient and can achieve $>90\%$ accuracy for most of the time-steps, even when trained with just 500 training examples. This shows the efficacy of guidance in the h-space, which can be done with only a few hundred training examples.
392
+
393
+ ![](images/a298d9ff2dd5b1e967fd71d0674a97324a69c89fd2c6873318e94392ee9435f2.jpg)
394
+ Figure 10. Ablation over the number of training examples for classifiers. h-space classifiers achieve excellent performance even with only 200 training examples. In contrast, ResNet50 classifiers require a large number of images to achieve similar performance.
395
+
396
+ # A.3. H-space classifier architectures
397
+
398
+ We ablate over the different classifier architectures used for the classification in the h-space. We use a linear layer, an MLP and a small CNN architecture. The results are presented Tab. 8. We report the average test accuracy over 50 time steps, network parameters, and guidance time for generating a batch of images. With comparable accuracy, the linear layer has lesser parameters and performs guidance faster, and hence is used as the default classifier.
399
+
400
+ # B. Multi-attribute Debiasing
401
+
402
+ In this section, we generate balanced subgroups for a combination of the multiple attributes. We consider jointly balancing 2 and 3 sensitive attributes across all subgroups.
403
+
404
+ Specifically, we explore the following settings: 1) Gender + Race: 0.25 black males, 0.25 black females, 0.25 white males, 0.25 white females 2) Gender + Eyeglasses: 0.25 males with eyeglasses, 0.25 females with eyeglasses, 0.25 males without eyeglasses and 0.25 females without eyeglasses. 3) Gender + Race + Eyeglasses: 0.125 for all the 8 subgroups formed. The results are reported in Tab. 9 where we compute the FD score with a balanced reference set and FID with the original CelebA-HQ [19] dataset. The FID score quantifies the visual quality, whereas the FD score accounts for the bias in the generations, as explained in Sec.4.1 (main paper). We do not use a reference set to compute FID as balancing across sub-groups leads to considerably less number of samples in the reference set.
405
+
406
+ Table 9. Balancing attribute subgroups for fair generation
407
+
408
+ <table><tr><td></td><td colspan="2">Gender + Race</td><td colspan="2">Gender + Eyeglasses</td><td colspan="2">Gender + Race + Eyeglasses</td></tr><tr><td>Method</td><td>FD</td><td>FID</td><td>FD</td><td>FID</td><td>FD</td><td>FID</td></tr><tr><td>Random Generation</td><td>0.684</td><td>49.45</td><td>0.636</td><td>49.45</td><td>0.768</td><td>49.45</td></tr><tr><td>Sample Guidance</td><td>0.436</td><td>45.49</td><td>0.3</td><td>47.41</td><td>0.496</td><td>47.83</td></tr><tr><td>Distribution Guidance</td><td>0.224</td><td>45.37</td><td>0.2</td><td>45.92</td><td>0.408</td><td>43.94</td></tr></table>
409
+
410
+ # B.1. Generalization to imbalanced distributions
411
+
412
+ Here, we present results for generating an imbalanced distribution for the subgroups by providing a skewed reference distribution. Such a setting is helpful in data augmentation for under-represented subgroups as shown in Sec.4.8 in the main paper. We take the following two settings: 1) Gender + Eyeglasses: 0.40 males with eyeglasses, 0.10 males without eyeglasses, 0.40 females with eyeglasses, and 0.10 females without eyeglasses. 2) Gender + Race: 0.40 black males, 0.10 white males, 0.40 black females and 0.10 white females. These two configurations are contrary to the originally generated distribution as blacks and eyeglasses are minority groups. The results are provided in Tab. 10, where we report FD with a reference set and FID score with the CelebA-HQ dataset to evaluate bias and generation quality.
413
+
414
+ Table 10. Generating imbalance distribution across subgroups to generate more images for under-represented groups.
415
+
416
+ <table><tr><td></td><td colspan="2">Gender + Eyeglasses (0.40, 0.10, 0.40, 0.10)</td><td colspan="2">Gender + Race (0.40, 0.10, 0.40, 0.10)</td></tr><tr><td>Method</td><td>FD</td><td>FID</td><td>FD</td><td>FID</td></tr><tr><td>Random Generation</td><td>1.1</td><td>49.45</td><td>1.444</td><td>49.45</td></tr><tr><td>Sample Guidance</td><td>0.472</td><td>48.66</td><td>0.756</td><td>62.48</td></tr><tr><td>Distribution Guidance</td><td>0.38</td><td>47.68</td><td>0.464</td><td>45.51</td></tr></table>
417
+
418
+ # C. Debiasing text-to-image Diffusion Model
419
+
420
+ # C.1. Social biases in facial attributes
421
+
422
+ We extend our experiments with Stable diffusion (SD) [40], previously outlined in Sec. 4.6 (main paper) by implementing our distribution guidance technique for debiasing
423
+
424
+ Table 11. Balancing race for multiple professions and age
425
+
426
+ <table><tr><td>Method</td><td>Race-Doctor</td><td>Race-Firefighter</td><td>Age</td></tr><tr><td>Random Generation</td><td>0.356</td><td>0.423</td><td>0.488</td></tr><tr><td>Distribution Guidance</td><td>0.191</td><td>0.186</td><td>0.194</td></tr></table>
427
+
428
+ across other (race and age) attributes. We observe that SD generations with a neutral prompt, such as 'a photo of a firefighter' or 'a photo of a doctor', are images of people who are predominantly of white origin. Similarly, when prompted with 'a photo a person', SD majorly generates images of young people. We attempt to mitigate these biases with the proposed distribution guidance method following Sec.4.6. The qualitative results for the same are provided in Fig. 11. As evident from the results, our method achieves fairness across race and age attributes using the same neutral prompts. We present quantitative results in Tab. 11, where our method achieves superior FD scores computed using CLIP attribute classifier as explained in Sec.D.1.
429
+
430
+ # C.2. Background bias in bird generation
431
+
432
+ Our method is applicable for mitigating biases that are not social as well (i.e. spurious correlations). WaterBirds [43] is a widely used synthetic dataset for demonstrating spurious correlations, which consist of images of birds across different backgrounds. The images of birds (landbirds and waterbirds) are collated from the CUB dataset [58], and the backgrounds from the Places dataset [63]. The majority of the waterbirds (sea birds) are affixed with a water background, and landbirds with land backgrounds. We generate a dataset of $10K$ images using SD with prompts 'photo of a land background' and 'photo of a water background'. Additional negative prompts like - ocean, water, sea, shore, river are added to generate pure land images. Similarly negative prompts land, mountain, sand, forest, rocks are added to generate pure water images. Next, we train a background $h$ -space classifier for guidance. When queried with neutral prompts for landbird and waterbird, SD follows the spurious correlation in its generation. Specifically, when prompted for a landbird - 'a photo of a crow', SD predominantly generates a crow with land as the background, whereas when prompted for a waterbird - 'a photo of a duck', SD generates images with water in the background. When we apply distributional guidance, an equal number of images across both backgrounds are generated irrespective of the bird type. See Fig. 12 for qualitative results.
433
+
434
+ # D. Implementation details
435
+
436
+ # D.1. h-space classifiers
437
+
438
+ Training data. We created a paired training of $h$ -space features and attribute labels $\mathcal{D}_{clf}^{h}$ for training $h$ -space attribute classifiers. We start with CelebA-HQ [20] dataset $\mathcal{D}_{unf}$ .
439
+
440
+ ![](images/6ec2485869fb4e18f94d682924aaa0bc741bb4bcebb8394c8179a113d2617c30.jpg)
441
+ Figure 11. Debiasing results on stable diffusion for race and age attribute. We present random samples generated by the original stable diffusion model and with distribution guidance. a) Balancing across race involving different professions e.g. firefighter, doctor. b) Balancing across age attribute for neutral prompts.
442
+
443
+ ![](images/855b145fb1aa321fe58396638639e1a34f7b31ace941818bedd25bbfa4e793f7.jpg)
444
+
445
+ ![](images/d7f9716b82642516142000bcbe31036952d0d79273ef4f8fa3f1a9f53908130a.jpg)
446
+
447
+ Next, we used an off-the-shelf image space attribute classifier to obtain attribute labels for $\mathcal{D}$ . We used CLIP as a classifier for both gender and race attributes as race labels are not present in CelebA-HQ dataset. Specifically, we pass $\mathcal{D}_{unf}$ to the CLIP[36] image encoder and obtain its similarity with text prompts - 'a male' and 'a black person'. The highest and lowest similarity images are then filtered to create a labeled attribute dataset $\mathcal{D}$ . We used $|\mathcal{D}| = 2000$ , where 1000 images are for the positive class, and others are for the negative class unless mentioned otherwise. For eyeglass attribute, we used ground truth labels from the CelebA-HQ dataset, as the predictions from CLIP were inaccurate. We then embed $\mathcal{D}$ into the $h$ -space representation using DDIM[51] inversion to obtain labeled dataset $\mathcal{D}_{clf}^{h}$ to train the classifiers.
448
+
449
+ Model architecture. The h-space classifiers are implemented as a single linear layer for each diffusion timestep t. We used DDIM inversion with $T = 49$ timesteps and obtain $\mathcal{H}^i = \{h_t^i\}_{t=0}^{t=49}$ as a set of 49 h-vectors for each image i. As each classifier is linear with two output neurons (positive/ negative class), they can be jointly represented as a single fully connected layer with $2T$ output neurons.
450
+
451
+ Optimization. We train the $h$ -space classifiers with the following hyper-parameters - batch size 64, learning rate 0.001, and for 5 epochs on a single NVIDIA A5000gpu. The overall training time for a single attribute classifier is 484.37s.
452
+
453
+ # D.2. Evaluation Metrics
454
+
455
+ We created a reference set $\mathcal{D}_{ref}$ of $5K$ images following the reference attribute distribution $\mathbf{p}_{\mathrm{ref}}^{\mathrm{a}}$ , using the attribute labels from CelebA-HQ [19]. The ground truth labels were
456
+
457
+ used for gender and eyeglasses from CelebA-HQ. For race, however, as the labels are not available, they were obtained from CLIP as discussed in Sec. D.1. We use $\mathcal{D}_{ref}$ to compute the reference statistics for FID. For FD, we compute the discrepancy of the predicted attribute distribution from the reference distribution following Sec.4.1. We use the Resnet-18 [11] architecture to implement the attribute classifiers trained on CelebA-HQ [19]. This set of classifiers needs to be different from the one used to obtain $\mathcal{D}_{clf}^{h}$ for fair evaluation.
458
+
459
+ ![](images/fcb6e8cc294fb53a43ec0e62f9e5595ec017d138369121e1992a58ddb3429c05.jpg)
460
+
461
+ ![](images/32c9bb765fa1227bb3ddd15cf1ff5450139d17dffbc21c28e812b42c73b186d5.jpg)
462
+ Figure 12. Debiasing results on stable diffusion for backgrounds while generating birds. The proposed Distribution Guidance can balance the number of birds in various backgrounds.
2402.18xxx/2402.18206/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f5fc6e70257e4a668ba7bdc23d6a90522f72a629c44ecc59f1bb40b3822f59f
3
+ size 1026036
2402.18xxx/2402.18206/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18225/58455eb8-8365-4304-946a-8f557a162fbf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:792f79581c3bc867c2a391e0e31fac14a30d16e8fd1a6d0e133fd32502b9efdc
3
+ size 972418
2402.18xxx/2402.18225/full.md ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CogBench: a large language model walks into a psychology lab
2
+
3
+ Julian Coda-Forno $^{12}$ Marcel Binz $^{12}$ Jane X. Wang $^{3}$ Eric Schulz $^{12}$
4
+
5
+ # Abstract
6
+
7
+ Large language models (LLMs) have significantly advanced the field of artificial intelligence. Yet, evaluating them comprehensively remains challenging. We argue that this is partly due to the predominant focus on performance metrics in most benchmarks. This paper introduces CogBench, a benchmark that includes ten behavioral metrics derived from seven cognitive psychology experiments. This novel approach offers a toolkit for phenotyping LLMs' behavior. We apply CogBench to 35 LLMs, yielding a rich and diverse dataset. We analyze this data using statistical multilevel modeling techniques, accounting for the nested dependencies among fine-tuned versions of specific LLMs. Our study highlights the crucial role of model size and reinforcement learning from human feedback (RLHF) in improving performance and aligning with human behavior. Interestingly, we find that open-source models are less risk-prone than proprietary models and that fine-tuning on code does not necessarily enhance LLMs' behavior. Finally, we explore the effects of prompt-engineering techniques. We discover that chain-of-thought prompting improves probabilistic reasoning, while take-a-step-back prompting fosters model-based behaviors.
8
+
9
+ # 1. Introduction
10
+
11
+ Large language models (LLMs) have emerged as a groundbreaking technology, captivating the attention of the scientific community (Bommasani et al., 2021; Binz et al., 2023). Modern LLMs have scaled to remarkable dimensions in both architecture and datasets (Kaplan et al., 2020), revealing a spectrum of capabilities that were previously unimaginated (Wei et al., 2022; Brown et al., 2020). Yet, these models
12
+
13
+ *Equal contribution 1Computational Principles of Intelligence Lab, Max Planck Institute for Biological Cybernetics, Tübingen, Germany 2Institute for Human-Centered AI, Helmholtz Computational Health Center, Munich, Germany 3Google DeepMind, London, UK. Correspondence to: Julian Coda-Forno <julian.coda-forno@helmholtz-munich.de>.
14
+
15
+ Preprint. Under review.
16
+
17
+ also present a significant challenge: their internal workings are largely opaque, making it difficult to fully comprehend their behavior (Tamkin et al., 2021). This lack of understanding fuels ongoing debates about their capabilities and limitations (McCoy et al., 2023; Bubeck et al., 2023).
18
+
19
+ A notable issue in these discussions is the focus of many benchmarks on performance metrics alone (Burnell et al., 2023). This approach often overlooks the underlying behavioral mechanisms of the models, reducing benchmarks to mere training targets rather than tools for genuine insight, and thus failing to provide a comprehensive measure of the models' abilities (Schaeffer et al., 2023). How can we overcome this problem and make progress toward a better understanding of LLMs' behaviors?
20
+
21
+ The field of cognitive psychology may offer solutions to these problems. Experiments from cognitive psychology have been used to study human behavior for many decades, and have therefore been extensively validated. Furthermore, they typically focus more on behavioral insights rather than performance metrics alone. Finally, many of these experiments are programmatically generated, minimizing data leakage concerns. Many of these concepts are important to ensure a robust evaluation of an agent's capabilities. However, while there have been studies investigating LLMs on individual tasks from cognitive psychology (Binz & Schulz, 2023; Dasgupta et al., 2022; Hagendorff et al., 2023; Ullman, 2023), no study has evaluated them holistically.
22
+
23
+ In this paper, we propose CogBench, a novel benchmark consisting of ten behavioral metrics spanning seven cognitive psychology experiments, to fill this gap. We investigate the behaviors of 35 LLMs in total, using our benchmark to not only compare the performance of these models but also apply techniques from computational cognitive modeling to understand the inner workings of their behaviors.
24
+
25
+ Our results recover the unequivocal importance of size: larger models generally perform better and are more model-based than smaller models. Our results also show the importance of reinforcement learning from human feedback (RLHF; Christiano et al., 2017) in aligning LLMs with humans: RLHF'ed LLMs behave generally more humanlike and are more accurate in estimating uncertainty. Yet our results also revealed surprising behaviors. First, while open-source models are often believed to be more risky due
26
+
27
+ ![](images/959463d00f4212831cab13394f281c02f36d9a16912382890db3dbc3d55fd0dc.jpg)
28
+ Figure 1. Overview of approach and methods. CogBench provides open access to seven different cognitive psychology experiments. These experiments are text-based and can be run to evaluate any LLM's behavior. The experiments are submitted to LLMs as textual prompts and the models indicate their choices by completing a given prompt. Past behavior is then concatenated to the prompt and learning is induced via prompt-chaining. We used 35 LLMs in total, including most larger proprietary LLMs as well as many open-source models.
29
+
30
+ to the lack of pre-prompts, we find that, holding all else equal, they make less risky decisions than proprietary models. Secondly, while fine-tuning on code is often believed to improve LLMs' behaviors, we find little evidence for this in our benchmarking suite.
31
+
32
+ Finally, we investigate how chain-of-thought (CoT) (Wei et al., 2023; Kojima et al., 2022) and take-a-step-back (SB) (Zheng et al., 2023a) prompting techniques can influence different behavioral characteristics. Our analysis suggests that CoT is particularly effective at enhancing probabilistic reasoning, while SB proves to be more relevant for promoting model-based behaviors. This showcases insights that can be gained by CogBench also for understanding the effectiveness of these prompt-engineering techniques as well as guiding users in selecting the most suitable prompt-engineering technique based on the specific context.
33
+
34
+ Taken together, our experiments show how psychology can offer detailed insights into artificial agents' behavior as we provide an openly accessible<sup>1</sup> and challenging benchmark to evaluate LLMs.
35
+
36
+ # 2. Related work
37
+
38
+ Benchmarking LLMs: As LLMs rapidly evolve, it is critical to assess their capabilities. Numerous benchmarks have emerged to tackle this challenge, evaluating capabilities such as grade school mathematics (Cobbe et al., 2021), general knowledge (Joshi et al., 2017), programming (Chen et al., 2021), reasoning (Collins et al., 2022), among others (Hendrycks et al., 2021). In addition, the Chatbot Arena (Zheng et al., 2023b) provides a platform for comparing AI chatbots, and the Beyond the Imitation Game Benchmark (BIG-bench; Srivastava & authors, 2023) offers a comprehensive evaluation of LLMs across over 200 tasks.
39
+
40
+ Psychology for LLMs: Our benchmark is part of a new wave of research that uses cognitive psychology to study LLMs (Binz & Schulz, 2023; Dasgupta et al., 2022; CodaForno et al., 2023; Ullman, 2023; Hagendorff et al., 2023; Akata et al., 2023; Yax et al., 2023; Chen et al., 2023; Buschoff et al., 2024). The power of this approach lies in its incorporation of tools from cognitive psychology that have been developed and refined over many decades. Instead of focusing solely on how well LLMs perform, this area of research prioritizes describing and characterizing their behavior in terms of underlying mechanisms. This shift in focus helps us understand LLMs in a more meaningful way. It is important to note that while these works have signifi
41
+
42
+ cantly contributed to our understanding of LLMs, they have mainly targeted specific behaviors in isolation and did not establish a benchmark providing a standardized evaluation of different models and across a diverse, comprehensive set of tasks and skills.
43
+
44
+ # 3. Methods
45
+
46
+ CogBench is a benchmark rooted in cognitive psychology for evaluating the behaviors of language models. It incorporates ten metrics derived from seven canonical experiments in the literature on learning and decision-making. These metrics offer a robust measure of wide-ranging behaviors and allow for comparisons with human behavior. In this section, we provide an overview of the models included in our study, followed by brief descriptions of the used cognitive experiments and their respective metrics. Figure 1 displays a visual representation that complements the discussion in this section.
47
+
48
+ # 3.1. Prompting and summary of included models
49
+
50
+ We evaluated over 35 different LLMs using our benchmark. This selection includes proprietary models such as Anthropic's Claude-1 and Claude-2 (Anthropic, 2023), OpenAI's GPT-3 (text-davinci-003) and GPT-4 (OpenAI, 2023), and Google's PaLM-2 for text (text-bison@002) (Google, 2023). We also tested open-source models like Mosaic's MPT (MosaicML, 2023), Falcon (Almazrouei et al., 2023), and numerous LLaMA-2 variants (Touvron et al., 2023). For a full list of the models used, we refer the reader to Appendix A.
51
+
52
+ It is important to note that all experiments performed in this paper rely entirely on the LLMs' in-context learning abilities and do not involve any form of fine-tuning. We set the temperature parameter to zero, leading to deterministic responses, and retained the default values for all other parameters.
53
+
54
+ # 3.2. High-level summary of tasks
55
+
56
+ In the following, we provide a high-level summary of the tasks included in CogBench, alongside their ten behavioral metrics. It is important to highlight that a performance metric can also be obtained for each task. For full descriptions of all tasks and their corresponding metrics, we refer the reader to Appendix B. CogBench consists of the following tasks:
57
+
58
+ 1. Probabilistic reasoning (Dasgupta et al., 2020): a task that tests how agents update beliefs based on new evidence. They are given a "wheel of fortune" (representing initial prior probabilities) and two urns with different colored ball distributions (representing likelihoods). Upon drawing a ball, agents can revise their
59
+
60
+ belief about the chosen urn, considering both the wheel (prior) and the ball color (evidence). This tests adaptability to different prior/likelihood scenarios by changing the wheel division and ball distributions. Agents have to estimate the probability of the drawn ball's urn. The behavioral choices can be used to estimate an agent's prior and likelihood weightings. Experimentally, people often exhibit a behavior known as system neglect, meaning that they underweight both priors and likelihoods (Massey & Wu, 2005).
61
+
62
+ 2. Horizon task (Wilson et al., 2014): a two-armed bandit task with stationary reward distributions. Agents first observe four reward values of randomly determined options, followed by making either one or six additional choices. We use this task to measure whether an agent uses uncertainty to guide its exploration behavior (directed exploration) and/or whether it injects noise into its policy to explore (random exploration). People are known to rely on a combination of both strategies (Wilson et al., 2014; Brändle et al., 2021).
63
+ 3. Restless bandit task (Ershadmanesh et al., 2023): a two-armed bandit task with non-stationary reward distributions. There is always one option with a higher average reward. Every few trials a switch between the reward distributions of the two options occurs. Agents furthermore have to indicate after each choice how confident they are in their decisions. We use this task to measure meta-cognition, which indicates whether an agent can assess the quality of its own cognitive abilities. People generally display this ability but its extent is influenced by various internal and external factors (Shekhar & Rahnev, 2021).
64
+ 4. Instrumental learning (Lefebvre et al., 2017): Agents encounter four two-armed bandit problems in an interleaved order. Each bandit problem is identified by a unique symbol pair. We use this task to investigate how an agent learns. First, we report the learning rate of the agent which is common practice in two-armed bandits. Furthermore, we use it to reveal whether an agent learns more from positive than from negative prediction errors, i.e., whether it has an optimism bias. People commonly display asymmetric tendencies when updating their beliefs by showing higher learning rates after encountering positive prediction errors compared to negative ones (Palminteri & Lebreton, 2022).
65
+ 5. Two-step task (Daw et al., 2011): a reinforcement learning task in which agents have to accumulate as many treasures as possible. Taking an action from a starting state transfers the agent to one out of two second-stage states. In each of these second-stage states, the agent has the choice between two options that probabilistically lead to treasures. Finally, the
66
+
67
+ agent is transferred back to the initial state and the process repeats for a predefined number of rounds. The task experimentally disentangles model-based from model-free reinforcement learning. We therefore use it to measure an agent's model-basedness. Previous studies using this task have shown that people rely on a combination of model-free and model-based reinforcement learning (Daw et al., 2011).
68
+
69
+ 6. Temporal discounting (Ruggeri et al., 2022): Agents have to make a series of choices between two options. Each option is characterized by a monetary outcome and an associated delay until the outcome is received. We use this task to assess temporal discounting, indicating whether an agent prefers smaller but immediate gains over larger delayed ones. People generally show a preference for immediate gains, although the precise functional form of their discounting is a matter of debate (Cavagnaro et al., 2016).
70
+ 7. Balloon Analog Risk Task (BART) (Lejuez et al., 2002): Agents have to inflate an imaginary balloon to obtain rewards. They may choose to stop inflating and cashing out all rewards accumulated so far. There is a chance that the balloon pops at any point in time and all rewards will be lost. We use this task to assess risk-taking behavior. Human risk-taking in this task is "significantly correlated with scores on self-report measures of risk-related constructs and with the self-reported occurrence of real-world risk behaviors" (Lejuez et al., 2002).
71
+
72
+ # 4. The cognitive phenotype of LLMs
73
+
74
+ This section provides the reader with a high-level overview of our benchmark's metrics. From our suite of 7 tasks, we can derive two classes of metrics: 1) performance metrics that represent the score participants aim to optimize, and 2) behavioral metrics measuring how participants complete the task (tasks are typically designed in a way that allows one to disentangle between different types of behavior). Figure 2 visualizes phenotypes for both classes of metrics for seven well-established LLMs. We report the results of all 35 LLMs in Appendix C. The observed differences underscore the practical value and importance of CogBench for evaluating LLMs, offering a more comprehensive assessment than standard performance-based benchmarks alone.
75
+
76
+ # 4.1. Performance summary
77
+
78
+ As presented in Figure 2A, in terms of performance, GPT-4 and Claude-1 distinguish themselves, achieving human-level scores in most tasks (five out of six). In general, all models demonstrate competence in at least half of the tasks (three out of six). Each of the seven models excels in probabilistic reasoning and instrumental learning. The horizon task sees most models outperforming humans except for text-bison. The restless bandit task poses a challenge for the majority of models, with GPT-4 and Claude-1 being notable exceptions. Finally, the BART proves to be a hurdle for all models.
79
+
80
+ # 4.2. Differences between behavioral and performance metrics
81
+
82
+ Figure 2B shows that none of the models exhibit human-like behavior on the majority of behavioral metrics, revealing a complex structure that warrants further exploration.
83
+
84
+ High performance indicates high meta-cognition and model-basedness: Models that demonstrate satisfactory performance on the restless bandit task exhibit a certain degree of meta-cognition, although not to the same extent as humans. Proprietary models that are capable of solving the two-step task display model-based behavior at least on par with humans, with GPT-4 significantly surpassing human levels. Thus, within the scope of these two tasks, it seems that a model's performance can serve as an indicator of its corresponding behavioral metrics. In this context, meta-cognition and model-basedness appear to emerge as properties of high-performing models.
85
+
86
+ High performance despite lack of exploration: Interestingly, almost all models (except for text-bison) demonstrate super-human performance on the horizon task. While they exhibit high performance, they still lack exploration (except for LLaMA2-70-chat which exhibits higher-than-human random exploration). This underscores the importance of behavioral metrics in understanding the strategies employed by LLMs. In this case, it appears that LLMs achieve high performance primarily through exploitation without any human-like exploration.
87
+
88
+ Stronger priors than likelihoods: All models place much more weight on priors than observations, suggesting strong biases that are difficult to alter. Additionally, we can observe a prevalence of optimism bias and high learning rates. Almost all models exhibit a very strong optimism bias (except for text-bison), aligning with the notion that these LLMs harbor strong biases.
89
+
90
+ ![](images/3d23710f243efddeaa012700122bc19e42c4a0fb5e3849c88768b3cee99cc155.jpg)
91
+ A
92
+ Figure 2. CogBench results for established LLMs. A: Performance metrics, B: Behavioral metrics. All metrics are human-normalized: a value of zero corresponds to a random agent, while a value of one corresponds to the average human subject (dotted lines).
93
+
94
+ Low performance but high behavioral variance for risk-taking and temporal discounting: Temporal discounting and risk-taking behaviors exhibit high variance among models. While some models, such as text-bison and LLaMA-2-70, appear myopic on the temporal discounting task, others, including text-davinci-003, Claude, and LLaMA-2-70-chat, demonstrate a much more far-sighted approach. GPT-4, interestingly, exhibits behavior akin to humans. For the BART, models are positioned at extreme ends of the risk-taking spectrum, i.e., they either never take any risks at all or always risk everything. LLaMA-2-70 and LLaMA-2-70-chat, for example, display the same performance in this task but exhibit opposite risk-taking behavior. This not only indicates a struggle for LLMs to apprehend risks but also underscores the importance of our benchmark. Indeed, it raises questions about what influences a model's behavior. It also highlights how recording only their performance would have overlooked the contrasting risk-taking behavior of the two LLaMA models.
95
+
96
+ The comparison between LLaMA-2-70 and LLaMA-2-70-chat is particularly compelling. Even though LLaMA-2-70-chat is a fine-tuned version of LLaMA-2-70, they exhibit markedly different behavior in risk-taking, temporal discounting, and random exploration. This divergence is intriguing, especially considering their performance on all
97
+
98
+ tasks is relatively similar. This observation sets the stage for the subsequent section, where we will conduct a more comprehensive analysis of how specific features of these models influence their performance and behaviors.
99
+
100
+ # 5. Hypothesis-driven experiments
101
+
102
+ CogBench provides researchers with the means to explore a broad spectrum of LLMs' behaviors. We have applied CogBench to 35 distinct LLMs. This diversity allows us to test how different aspects of LLMs, such as the number of parameters, the application of Reinforcement Learning from Human Feedback (RLHF), fine-tuning for code, and many more, can impact specific LLMs' performance and behaviors.
103
+
104
+ The metrics provided by CogBench enable us to perform various analyses to test specific hypotheses of interest. In this section, we formulate and test five hypotheses about different mechanisms in LLMs and how these can affect their behavioral profiles. We use both qualitative, visualization-based techniques (dimensionality reduction) as well as quantitative analyses (multi-level regression) to test our hypotheses. For all regression analyses, we use the features of LLMs to predict specific behavioral metrics from the benchmark. The multi-level regression approach was chosen because
105
+
106
+ ![](images/996d22d6782091b852614f940b1ca071c1ea73521e3cba76eab165e79dcb55eb.jpg)
107
+ Figure 3. A: UMAP visualization of the ten behavioral metrics for all LLMs. Each point represents an LLM, with models using RLHF and models without RLHF indicated by different colors. B: Difference in average $L2$ -norm with humans between RLHF models and non-RLHF models.
108
+
109
+ some models are fine-tuned versions of other models. For instance, certain LlaMA models have a -chat version which adds RLHF and conversational fine-tuning, and thus are in the same higher-level group. This approach allows us to account for the hierarchical structure in our data and provides a more nuanced understanding of the behaviors of LLMs. We can isolate the effects of specific features or modifications by comparing models within the same higher-level group.
110
+
111
+ # Hypothesis 1: Does RLHF make LLMs more human-like?
112
+
113
+ To evaluate this hypothesis, we used UMAP (McInnes et al., 2020) on the ten behavioral metrics of all LLMs, as illustrated in Figure 3A. Clear separation is evident between LLMs that incorporate RLHF and those that do not. LLMs with RLHF demonstrate behaviors that appear, on average, roughly $2 \times$ more similar to human behavior compared to the models without. However, it is important to note that while UMAP space retains some global structure, it is primarily used for visualization purposes. Consequently, we also analyzed the average distances before dimensionality reduction (using normalized feature vectors), observing a $11.7\%$ average decrease in $L2$ -Norm distance for models with RLHF (Figure 3B).
114
+
115
+ Conclusion: Hypothesis is supported.
116
+
117
+ Hypothesis 2: Does performance increase with the number of parameters, training data, and the inclusion of code?
118
+
119
+ To answer this question, we used the multi-level regression previously mentioned, focusing on the performance of LLMs. We performed a regression analysis with the average standardized performance scores across all seven tasks as the dependent variable, using LLMs' features as predictors. We found that the number of parameters indeed had a significant influence on performance ( $\beta = 0.277 \pm 0.39$ , $z = 14.1$ , $p < 0.001$ ; see Figure 4A). However, the size of the training dataset and the use of code training data did not have a substantial impact. One possible explanation for this could be that the quality of the training data, rather than its sheer volume, plays a more determining role in performance, as well as that larger models also tend to be trained on larger datasets.
120
+
121
+ Conclusion: Hypothesis is partially supported.
122
+
123
+ Hypothesis 3: Does an increase of parameters, training data, and the inclusion of code increase model-basedness?
124
+
125
+ We again used the multi-level regression technique from before, this time focusing on a specific behavioral metric: model-basedness. We found that the number of parameters had a significant positive effect $(\beta = 0.481 \pm 0.22, z = 4.2, p < 0.001$ ; see Figure 4B), while the size of the training dataset and the use of code training data did not appear to significantly influence model-basedness. This again suggests that the quality of the data might be more crucial than its quantity when it comes to determining both performance and the emergence of model-based behaviors here. However, identifying which factors constitute 'quality' in the data requires a deeper exploration. This highlights the issue of transparency about data. For a thorough evaluation of how specific data features impact the emergence of behavioral functionalities such as model-basedness, it is essential to be transparent about a model's data and methodologies.
126
+
127
+ Conclusion: Hypothesis is partially supported.
128
+
129
+ # Hypothesis 4: Does RLHF enhance meta-cognition?
130
+
131
+ To answer this question, we focus our multi-level regression on meta-cognition. Our analysis revealed a strong effect $(\beta = 0.461 \pm 0.15, z = 5.9, p < 0.001$ ; see Figure 4C), indicating that RLHF significantly increased meta-cognition in LLMs. This finding underscores the potential of RLHF in enhancing the cognitive capabilities of LLMs.
132
+
133
+ Conclusion: Hypothesis is supported.
134
+
135
+ # Hypothesis 5: Do open-source models take more risks?
136
+
137
+ The open-source feature could be seen as a proxy for the engineering efforts that proprietary models undergo. There is a growing body of research suggesting that hidden pre-prompts being one of them, can significantly influence the behavior of LLMs (Liu et al., 2023). They can act as a
138
+
139
+ ![](images/27c1363678e698719f4fd7aef6e7a26b2f17ecb79036ad33d2746d24b288698c.jpg)
140
+
141
+ ![](images/72e582e38ba1eb6972a6b2e928bdfda94cfa7eebc22d96ff9dd31e4d4d60953f.jpg)
142
+
143
+ ![](images/50aa065f778d9d0f472a938bdc582d65cae9ff9d53cd7da26f4213d4d937404e.jpg)
144
+ Figure 4. Multi-level regressions of LLMs features onto different performance or behavioral metrics. Red bars represent effects included in a hypothesis. A: Regression onto all task performances. B: Regression onto model-basedness. C: Regression onto meta-cognition. D: Regression onto risk taking. ***: $p < 0.001$ , **: $0.001 \leq p < 0.01$ , *: $0.01 \leq p < 0.05$
145
+
146
+ ![](images/86b0c63b80c01ebe4ef254b546675bea9c8eea40b44307c4fe0319cbfe65ab88.jpg)
147
+
148
+ form of 'priming' that guides the model's responses, potentially making the model more cautious and less likely to take risks by constraining the model towards safer behaviors. However, our regression analysis suggested otherwise: contrary to expectations, we observed a negative effect $(\beta = -0.612 \pm 0.11, z = -11.4, p < 0.001$ ; see Figure 4D), indicating that proprietary models, which often have hidden pre-prompts, are more likely to take risks. This surprising outcome could be influenced by various factors from different engineering techniques. However, this underscores the limited behavioral evaluation of these techniques. In the subsequent section, we aim to bridge this gap in understanding through an initial exploration into the change of behavior of two standard prompt-engineering techniques.
149
+
150
+ Conclusion: Hypothesis is refuted.
151
+
152
+ # 6. Impact of prompt-engineering
153
+
154
+ We also explored the impact of prompt-engineering techniques, namely chain-of-thought (CoT) and take-a-stepback (SB) prompting, on the behavior of LLMs. Both techniques are incorporated at the end of a question:
155
+
156
+ # Take-a-step-back:
157
+
158
+ First take a step back and think in the following two steps to answer this:
159
+
160
+ Step 1) Abstract the key concepts and principles relevant to this question.
161
+
162
+ Step 2) Use the abstractions to reason through.
163
+
164
+ # Chain-of-thought:
165
+
166
+ First break down the problem into smaller steps and reason through each step logically.
167
+
168
+ Their purpose is to stimulate the generation of reasoning steps. These steps serve as an additional context that the LLM can use to elicit better final responses. While these techniques have been shown to enhance performance, it is essential to confirm whether they indeed improve the behaviors they are designed to augment.
169
+
170
+ We focused on examining two specific behaviors that are hypothesized to improve with the inclusion of reasoning steps. These behaviors are the models' performance in the probabilistic reasoning task and their model-basedness.
171
+
172
+ We evaluated five specific LLMs: GPT-4, PaLM-2 for text (text-bison@002), Claude-1/2, and LLaMA-2, applying CoT and SB techniques and comparing the outcomes with their base models. The selection of these five models and a limited set of metrics was necessitated by the additional en
173
+
174
+ ![](images/d73ea3f0c7960f8dd911cf60a13b998fdc68733e5a7b049f8f300d8b442d4096.jpg)
175
+ A
176
+
177
+ B
178
+ Figure 5. Difference of chain-of-thoughts and take-a-step-back prompting to baseline models on A: Posterior accuracy, B: Model-basedness. The aggregated scores are computed using a weighted average of all five models using inverse-variance weighting.
179
+ ![](images/f17907fe14afa49f0f98efdf737b4560901d8c271c6b4941aa7226803748456b.jpg)
180
+ with CoT
181
+ with SB
182
+ GPT-4
183
+ text-bison
184
+ Claude-2
185
+ Claude-1
186
+ LLaMA-2-70
187
+ Aggregated scores
188
+
189
+ gineering effort required to process the outputs when using these techniques. The choice of LLMs aimed at ensuring a diverse representation of established models, considering the complexity of our benchmark tasks and the potential for erratic outputs from smaller LLMs when given the freedom to reason. For a comprehensive explanation of the querying process for these models, please refer to Appendix D.
190
+
191
+ Our investigation initially focused on probabilistic reasoning, which is a fundamental cognitive ability in decision-making. This ability facilitates the optimal integration of new information with pre-existing knowledge. We used the performance metric from the probabilistic reasoning experiment, namely posterior accuracy, which is calculated as one minus the deviation from the Bayes optimal prediction for each task. As depicted in Figure 6A, both CoT and SB techniques generally enhanced probabilistic reasoning compared to their base models, with CoT showing an average increase of $9.01\%$ and SB showing an increase of $3.10\%$ .
192
+
193
+ Furthermore, we discovered that model-basedness, a critical aspect of reasoning and planning, is significantly augmented by both CoT and SB techniques, as shown in Figure 6B. Specifically, CoT demonstrated a $64.59\%$ increase, while SB showed a substantial increase of $118.59\%$ .
194
+
195
+ Interestingly, a closer examination of the figures and the numerical data suggests that CoT is more effective for probabilistic reasoning, while SB excels in enhancing model-basedness. This observation aligns with the notion that step-by-step thinking can aid mathematical reasoning (Kojima et al., 2022) while abstracting a problem by taking a step back can foster a better representation of the problem's abstract structure. However, it is important to note that this analysis only serves as an initial observation. It does, nonetheless, highlight potential future applications of CogBench and illustrates how examining specific behaviors can provide valuable context, potentially guiding future decisions on the selection of one reasoning technique over another.
196
+
197
+ # 7. Discussion
198
+
199
+ We have presented CogBench, a new open-source benchmark for evaluating LLMs. CogBench is rooted in well-established experimental paradigms from the cognitive psychology literature, providing a unique set of advantages over traditional LLM benchmarks. First, it is based on tried-and-tested experiments whose measures have been extensively validated over many years and shown to capture general cognitive constructs. In addition, unlike standard benchmarks, CogBench does not only focus on performance metrics alone but also comes with behavioral metrics that allow us to gain insights into how a given task is solved. Finally, many of the included problems are procedurally-generated, thereby making it hard to game our benchmark by training on the test set. All our code and analysis will be publicly available, making it easy to use CogBench for the LLM community.
200
+
201
+ Our analyses yielded several key findings: as expected, RLHF enhanced the human-likeness of LLMs, while the number of parameters improved their performance and model-basedness. However, we also found surprising results. Despite expectations, code fine-tuning did not influence performance or model-basedness and open-source models exhibited less risk-taking behavior. Further, we found CoT prompting to be a promising choice for enhancing probabilistic reasoning. Conversely, SB prompting proved more effective for model-based reasoning.
202
+
203
+ While these results demonstrate the versatility of our benchmark, our analysis also faces several challenges. For instance, the limited transparency of certain proprietary models poses an issue to our regression analysis because acquiring details about certain models can be difficult or impossible. This lack of transparency could potentially affect the precision of our analysis. It also underscores the need for more transparency to facilitate more thorough and accurate evaluations (LAION, 2024; Binz et al., 2023).
204
+
205
+ Taken together, our study highlights the importance of behavioral metrics and cognitive modeling in evaluating LLMs
206
+
207
+ and presents a novel benchmark for this purpose. The analysis was preliminary and intended to provide a broad view of how CogBench can be used. The primary aim of this work is to equip the LLM community with new tools, inspired by cognitive science, to evaluate their models more comprehensively. Future work should focus on three areas. First, while cognitive science studies have demonstrated the external validity of the investigated tasks, it is yet to be shown for LLMs. Furthermore, we aim to extend the set of included tasks to cover a broader set of domains. Finally, we plan to properly automate our benchmark, mostly for prompt engineering techniques that were only briefly examined in this study. This could include studying the influence of impersonation (Salewski et al., 2023), meta-in-context learning (Coda-Forno et al., 2024), and explanations (Lampinen et al., 2022) on LLMs.
208
+
209
+ # References
210
+
211
+ Akata, E., Schulz, L., Coda-Forno, J., Oh, S. J., Bethge, M., and Schulz, E. Playing repeated games with large language models, 2023.
212
+ Almazrouei, E., Alobeidli, H., Alshamsi, A., Cappelli, A., Cojocaru, R., Debbah, M., Goffinet, E., Heslow, D., Lau-nay, J., Malartic, Q., Noune, B., Pannier, B., and Penedo, G. Falcon-40B: an open large language model with state-of-the-art performance. 2023.
213
+ Anthropic. Claude 2. Blog post, 2023. URL https://www.anthropic.com/news/claude-2. Accessed: 2024-01-19.
214
+ Binz, M. and Schulz, E. Using cognitive psychology to understand gpt-3. Proceedings of the National Academy of Sciences, 120(6):e2218523120, 2023.
215
+ Binz, M., Alaniz, S., Roskies, A., Aczel, B., Bergstrom, C. T., Allen, C., Schad, D., Wulff, D., West, J. D., Zhang, Q., et al. How should the advent of large language models affect the practice of science? arXiv preprint arXiv:2312.03759, 2023.
216
+ Bommasani, R., Hudson, D. A., Adeli, E., Altman, R., Arora, S., von Arx, S., Bernstein, M. S., Bohg, J., Bosse-lut, A., Brunskill, E., et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.
217
+ Brändle, F., Binz, M., and Schulz, E. Exploration beyond bandits. The drive for knowledge: The science of human information seeking, pp. 147-168, 2021.
218
+ Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33: 1877-1901, 2020.
219
+ Bubeck, S., Chandrasekaran, V., Eldan, R., Gehrke, J., Horvitz, E., Kamar, E., Lee, P., Lee, Y. T., Li, Y., Lundberg, S., et al. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712, 2023.
220
+ Burnell, R., Schellaert, W., Burden, J., Ullman, T. D., Martinez-Plumed, F., Tenenbaum, J. B., Rutar, D., Cheke, L. G., Sohl-Dickstein, J., Mitchell, M., et al. Rethink reporting of evaluation results in ai. Science, 380(6641): 136-138, 2023.
221
+ Buschoff, L. M. S., Akata, E., Bethge, M., and Schulz, E. Visual cognition in multimodal large language models, 2024.
222
+
223
+ Carpenter, J., Sherman, M. T., Kievit, R. A., Seth, A. K., Lau, H., and Fleming, S. M. Domain-general enhancements of metacognitive ability through adaptive training. Journal of Experimental Psychology: General, 148(1):51, 2019.
224
+ Cavagnaro, D. R., Aranovich, G. J., McClure, S. M., Pitt, M. A., and Myung, J. I. On the functional form of temporal discounting: An optimized adaptive test. Journal of Risk and Uncertainty, 52:233-254, 2016.
225
+ Chen, M., Tworek, J., Jun, H., Yuan, Q., de Oliveira Pinto, H. P., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., Ray, A., Puri, R., Krueger, G., Petrov, M., Khlaaf, H., Sastry, G., Mishkin, P., Chan, B., Gray, S., Ryder, N., Pavlov, M., Power, A., Kaiser, L., Bavarian, M., Winter, C., Tillet, P., Such, F. P., Cummings, D., Plappert, M., Chantzis, F., Barnes, E., Herbert-Voss, A., Guss, W. H., Nichol, A., Paino, A., Tezak, N., Tang, J., Babuschkin, I., Balaji, S., Jain, S., Saunders, W., Hesse, C., Carr, A. N., Leike, J., Achiam, J., Misra, V., Morikawa, E., Radford, A., Knight, M., Brundage, M., Murati, M., Mayer, K., Welinder, P., McGrew, B., Amodei, D., McCandlish, S., Sutskever, I., and Zaremba, W. Evaluating large language models trained on code, 2021.
226
+ Chen, Y., Liu, T. X., Shan, Y., and Zhong, S. The emergence of economic rationality of gpt. Proceedings of the National Academy of Sciences, 120(51):e2316205120, 2023. doi: 10.1073/pnas.2316205120. URL https://www.pnas.org/doi/abs/10.1073/pnas.2316205120.
227
+ Christiano, P. F., Leike, J., Brown, T., Martic, M., Legg, S., and Amodei, D. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017.
228
+ Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems, 2021.
229
+ Coda-Forno, J., Witte, K., Jagadish, A. K., Binz, M., Akata, Z., and Schulz, E. Inducing anxiety in large language models increases exploration and bias. arXiv preprint arXiv:2304.11111, 2023.
230
+ Coda-Forno, J., Binz, M., Akata, Z., Botvinick, M., Wang, J., and Schulz, E. Meta-in-context learning in large language models. Advances in Neural Information Processing Systems, 36, 2024.
231
+ Collins, K. M., Wong, C., Feng, J., Wei, M., and Tenenbaum, J. B. Structured, flexible, and robust: benchmarking and
232
+
233
+ improving large language models towards more human-like behavior in out-of-distribution reasoning tasks. arXiv preprint arXiv:2205.05718, 2022.
234
+ Dasgupta, I., Schulz, E., Tenenbaum, J. B., and Gershman, S. J. A theory of learning to infer. Psychological review, 127(3):412, 2020.
235
+ Dasgupta, I., Lampinen, A. K., Chan, S. C., Creswell, A., Kumaran, D., McClelland, J. L., and Hill, F. Language models show human-like content effects on reasoning. arXiv preprint arXiv:2207.07051, 2022.
236
+ Daw, N. D., Gershman, S. J., Seymour, B., Dayan, P., and Dolan, R. J. Model-based influences on humans' choices and striatal prediction errors. *Neuron*, 69(6):1204-1215, 2011.
237
+ Ershadmanesh, S., Gholamzadeh, A., Desender, K., and Dayan, P. Meta-cognitive efficiency in learned value-based choice. In 2023 Conference on Cognitive Computational Neuroscience, pp. 29-32, 2023. doi: 10.32470/ CCN.2023.1570-0. URL https://hdl.handle.net/21.11116/0000-000D-5BC7-D.
238
+ Gershman, S. J. Deconstructing the human algorithms for exploration. Cognition, 173:34-42, 2018.
239
+ Google. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023.
240
+ Hagendorff, T., Fabi, S., and Kosinski, M. Human-like intuitive behavior and reasoning biases emerged in large language models but disappeared in chatgpt. Nature Computational Science, 3(10):833-838, 2023.
241
+ Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multitask language understanding, 2021.
242
+ Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaq: A large scale distantly supervised challenge dataset for reading comprehension, 2017.
243
+ Kaplan, J., McCandlish, S., Henighan, T., Brown, T. B., Chess, B., Child, R., Gray, S., Radford, A., Wu, J., and Amodei, D. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020.
244
+ Kojima, T., Gu, S. S., Reid, M., Matsuo, Y., and Iwasawa, Y. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35: 22199-22213, 2022.
245
+ LAION. Towards a transparent ai future: The call for less regulatory hurdles on open-source ai in europe. Available at: https://laion.ai/blog/transparent-ai/, 2024. Accessed: January 19, 2024.
246
+
247
+ Lampinen, A. K., Dasgupta, I., Chan, S. C., Matthewson, K., Tessler, M. H., Creswell, A., McClelland, J. L., Wang, J. X., and Hill, F. Can language models learn from explanations in context? arXiv preprint arXiv:2204.02329, 2022.
248
+ Lefebvre, G., Lebreton, M., Meyniel, F., Bourgeois-Gironde, S., and Palminteri, S. Behavioural and neural characterization of optimistic reinforcement learning. Nature Human Behaviour, 1(4):0067, 2017.
249
+ Lejuez, C. W. et al. Evaluation of a behavioral measure of risk taking: the balloon analogue risk task (bart). Journal of experimental psychology. Applied, 8(2):75-84, 2002. doi: 10.1037//1076-898x.8.2.75.
250
+ Liu, X., Wang, J., Sun, J., Yuan, X., Dong, G., Di, P., Wang, W., and Wang, D. Prompting frameworks for large language models: A survey, 2023.
251
+ Massey, C. and Wu, G. Detecting regime shifts: The causes of under-and overreaction. Management Science, 51(6): 932-947, 2005.
252
+ McCoy, R. T., Yao, S., Friedman, D., Hardy, M., and Griffiths, T. L. Embers of autoregression: Understanding large language models through the problem they are trained to solve. arXiv preprint arXiv:2309.13638, 2023.
253
+ McInnes, L., Healy, J., and Melville, J. Umap: Uniform manifold approximation and projection for dimension reduction, 2020.
254
+ Montague, P. R., Dolan, R. J., Friston, K. J., and Dayan, P. Computational psychiatry. Trends in cognitive sciences, 16(1):72-80, 2012.
255
+ MosaicML. Introducing mpt-30b: Raising the bar for open-source foundation models. Blog post, 2023. URL www.mosaicml.com/blog/mpt-30b. Accessed: 2023-06-22.
256
+ Nardo, C. The waluigi effect (mega-post). Available at: https://www.lesswrong.com/posts/D7PumeYTDPfBTp3i7/the-waluigi-effect-mega-post, 2024. Accessed: January 19, 2024.
257
+ OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
258
+ Palminteri, S. and Lebreton, M. The computational roots of positivity and confirmation biases in reinforcement learning. Trends in Cognitive Sciences, 2022.
259
+ Patzelt, E. H., Hartley, C. A., and Gershman, S. J. Computational phenotyping: using models to understand individual differences in personality, development, and mental illness. *Personality Neuroscience*, 1:e18, 2018.
260
+
261
+ Rescorla, R. A. Classical conditioning ii: current research and theory. pp. 64, 1972.
262
+ Ruggeri, K., Panin, A., Vdovic, M., Večkalov, B., Abdul-Salaam, N., Achterberg, J., Akil, C., Amatya, J., Amatya, K., Andersen, T. L., et al. The globalizability of temporal discounting. Nature Human Behaviour, 6(10):1386-1397, 2022.
263
+ Salewski, L., Alaniz, S., Rio-Torto, I., Schulz, E., and Akata, Z. In-context impersonation reveals large language models' strengths and biases. arXiv preprint arXiv:2305.14930, 2023.
264
+ Schaeffer, R., Miranda, B., and Koyejo, S. Are emergent abilities of large language models a mirage? arXiv preprint arXiv:2304.15004, 2023.
265
+ Schurr, R., Reznik, D., Hillman, H., Bhui, R., and Gershman, S. J. Dynamic computational phenotyping of human cognition. 2023.
266
+ Shekhar, M. and Rahnev, D. Sources of metacognitive inefficiency. Trends in Cognitive Sciences, 25(1):12-23, 2021.
267
+ Srivastava, A. and authors. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models, 2023.
268
+ Tamkin, A., Brundage, M., Clark, J., and Ganguli, D. Understanding the capabilities, limitations, and societal impact of large language models. arXiv preprint arXiv:2102.02503, 2021.
269
+ Touvron, H., Martin, L., Stone, K., Albert, P., Almahairi, A., Babaei, Y., Bashlykov, N., Batra, S., Bhargava, P., Bhosale, S., et al. Llama 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288, 2023.
270
+ Ullman, T. Large language models fail on trivial alterations to theory-of-mind tasks. arXiv preprint arXiv:2302.08399, 2023.
271
+ Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., et al. Emergent abilities of large language models arXiv preprint arXiv:2206.07682, 2022.
272
+ Wei, J., Wang, X., Schuurmans, D., Bosma, M., Ichter, B., Xia, F., Chi, E., Le, Q., and Zhou, D. Chain-of-thought prompting elicits reasoning in large language models, 2023.
273
+ Wilson, R. C., Geana, A., White, J. M., Ludvig, E. A., and Cohen, J. D. Humans use directed and random exploration to solve the explore-exploit dilemma. Journal of Experimental Psychology: General, 143(6):2074, 2014.
274
+
275
+ Yax, N., Anlló, H., and Palminteri, S. Studying and improving reasoning in humans and machines. arXiv preprint arXiv:2309.12485, 2023.
276
+ Zheng, H. S., Mishra, S., Chen, X., Cheng, H.-T., Chi, E. H., Le, Q. V., and Zhou, D. Take a step back: Evoking reasoning via abstraction in large language models, 2023a.
277
+ Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685, 2023b.
278
+
279
+ A. List of LLMs used
280
+
281
+ <table><tr><td>Model Name</td><td>No. of Parameters</td><td>Finetuned LLM</td><td>Use of RLHF</td><td>Open Source</td><td>Size of Dataset</td><td>Context Length</td><td>Conversational</td><td>Code</td></tr><tr><td>GPT-4</td><td>1760</td><td>No</td><td>Yes</td><td>No</td><td>1.56</td><td>8</td><td>No</td><td>No</td></tr><tr><td>text-davinci-003</td><td>170</td><td>No</td><td>Yes</td><td>No</td><td>1.37</td><td>4</td><td>No</td><td>No</td></tr><tr><td>text-davinci-002</td><td>170</td><td>No</td><td>No</td><td>No</td><td>1.37</td><td>4</td><td>No</td><td>No</td></tr><tr><td>Claude-1</td><td>100</td><td>No</td><td>RLAIF</td><td>No</td><td>3.7</td><td>100</td><td>No</td><td>No</td></tr><tr><td>Claude-2</td><td>200</td><td>No</td><td>RLAIF</td><td>No</td><td>7.4</td><td>100</td><td>No</td><td>No</td></tr><tr><td>text-bison@002</td><td>340</td><td>No</td><td>Yes</td><td>No</td><td>1.4</td><td>8</td><td>No</td><td>No</td></tr><tr><td>Falcon-40b</td><td>40</td><td>No</td><td>No</td><td>Yes</td><td>0.54</td><td>2</td><td>No</td><td>No</td></tr><tr><td>Falcon-40b-instruct</td><td>40</td><td>Falcon-40b</td><td>No</td><td>Yes</td><td>0.6</td><td>2</td><td>No</td><td>No</td></tr><tr><td>MPT-30b</td><td>30</td><td>No</td><td>No</td><td>Yes</td><td>1.76</td><td>8</td><td>No</td><td>No</td></tr><tr><td>MPT-30b-instruct</td><td>30</td><td>MPT-30b</td><td>No</td><td>Yes</td><td>1.8</td><td>8</td><td>No</td><td>No</td></tr><tr><td>MPT-30b-chat</td><td>30</td><td>MPT-30b</td><td>No</td><td>Yes</td><td>1.8</td><td>8</td><td>Yes</td><td>No</td></tr><tr><td>LLaMA-2-70</td><td>70</td><td>No</td><td>No</td><td>Yes</td><td>2</td><td>4</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-13</td><td>13</td><td>No</td><td>No</td><td>Yes</td><td>2</td><td>4</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-7</td><td>7</td><td>No</td><td>No</td><td>Yes</td><td>2</td><td>4</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-70-chat</td><td>70</td><td>Yes</td><td>Yes</td><td>Yes</td><td>2</td><td>4</td><td>Yes</td><td>No</td></tr><tr><td>LLaMA-2-13-chat</td><td>13</td><td>Yes</td><td>Yes</td><td>Yes</td><td>2</td><td>4</td><td>Yes</td><td>No</td></tr><tr><td>LLaMA-2-7-chat</td><td>7</td><td>Yes</td><td>Yes</td><td>Yes</td><td>2</td><td>4</td><td>Yes</td><td>No</td></tr><tr><td>Vicuna-7b-v1.5</td><td>7</td><td>LLaMA-2-7</td><td>Yes</td><td>Yes</td><td>2.37</td><td>4</td><td>Yes</td><td>No</td></tr><tr><td>Vicuna-13b-v1.5</td><td>13</td><td>LLaMA-2-13</td><td>Yes</td><td>Yes</td><td>2.37</td><td>4</td><td>Yes</td><td>No</td></tr><tr><td>LLaMA-2-7b-longlora-100k-ft</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>100</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-7b-longlora-8k-ft</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>8</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-7b-longlora-16k-ft</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>16</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-7b-longlora-32k-ft</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>32</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-7b-longlora-32k</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>32</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-13b-longlora-32k-ft</td><td>13</td><td>LLaMA-2-13</td><td>No</td><td>Yes</td><td>2</td><td>32</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-13b-longlora-64k</td><td>13</td><td>LLaMA-2-13</td><td>No</td><td>Yes</td><td>2</td><td>64</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-13b-longlora-32k</td><td>13</td><td>LLaMA-2-13</td><td>No</td><td>Yes</td><td>2</td><td>32</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-70b-longlora-32k</td><td>70</td><td>LLaMA-2-70</td><td>No</td><td>Yes</td><td>2</td><td>32</td><td>No</td><td>No</td></tr><tr><td>LLaMA-2-70b-chat-longlora-32k</td><td>70</td><td>LLaMA-2-70-chat</td><td>Yes</td><td>Yes</td><td>2</td><td>32</td><td>Yes</td><td>No</td></tr><tr><td>LongAlpaca-7B</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2</td><td>16</td><td>Yes</td><td>No</td></tr><tr><td>LongAlpaca-13B</td><td>13</td><td>LLaMA-2-13</td><td>No</td><td>Yes</td><td>2</td><td>16</td><td>Yes</td><td>No</td></tr><tr><td>LongAlpaca-70B</td><td>70</td><td>LLaMA-2-70</td><td>No</td><td>Yes</td><td>2</td><td>16</td><td>Yes</td><td>No</td></tr><tr><td>CodeLlama-7B</td><td>7</td><td>LLaMA-2-7</td><td>No</td><td>Yes</td><td>2.5</td><td>16</td><td>No</td><td>Yes</td></tr><tr><td>CodeLlama-13B</td><td>13</td><td>LLaMA-2-13</td><td>No</td><td>Yes</td><td>2.5</td><td>16</td><td>No</td><td>Yes</td></tr><tr><td>CodeLlama-34B</td><td>34</td><td>LLaMA-2-34</td><td>No</td><td>Yes</td><td>2.5</td><td>16</td><td>No</td><td>Yes</td></tr></table>
282
+
283
+ This table lists the 35 LLMs used in this paper with different features where:
284
+
285
+ - No. of Parameters: This represents the number of parameters in the model, expressed in billions.
286
+ - Finetuned LLM: This column indicates whether the model is a fine-tuned version of another model. If it is, the name of the original model from which it was fine-tuned is provided. If it is not a fine-tuned model, 'No' is written. However, if the model serves as the base model for another model listed in this table, 'Yes' is written.
287
+ - Use of RLHF: This column specifies whether Reinforcement Learning from Human Feedback (RLHF) was used in the training of the model.
288
+ - Open Source: This indicates whether the model is open source, meaning we have access to the weights of the model.
289
+ - Size of Dataset: This represents the size of the dataset on which the model was trained, expressed in trillions of tokens.
290
+ - Context Length: This refers to the length of the context available to the model during its operation.
291
+ - Conversational: This indicates whether the model was fine-tuned with conversational datasets.
292
+ - Code: This indicates whether the model was fine-tuned with code datasets.
293
+
294
+ Please note that the selection of features used for our analyses was made based on the best available knowledge of the authors, as some information about certain models can be challenging to obtain. This limitation could potentially impact the precision of the regression analysis. It underscores the need for greater transparency about LLMs to facilitate more thorough evaluations.
295
+
296
+ # B. Comprehensive list & explanation of the cognitive experiments
297
+
298
+ # B.1. Probabilistic reasoning (Dasgupta et al., 2020) - Prior & likelihood weighting
299
+
300
+ # B.1.1. SUMMARY
301
+
302
+ This experiment tests how agents update beliefs based on new evidence. Participants are given a wheel of fortune (representing initial prior probabilities) and two urns with different colored ball distributions (representing likelihoods). Upon drawing a ball, participants can revise their belief about the chosen urn, considering both the wheel (prior) and the ball color (evidence). The task allows testing adaptability to different prior/likelihood scenarios by changing the wheel division and ball distributions. Agents have to estimate the probability of the drawn ball's urn. We use this task to estimate an agent's prior and likelihood weightings. In this task, people showed similar weighting between prior and likelihood, both under one. This underweighting is often referred to as system neglect (Massey & Wu, 2005).
303
+
304
+ # B.1.2. METHODS
305
+
306
+ We matched the probabilities used in (Dasgupta et al., 2020) to compare to human data. There they had either an informative likelihood case ( $P(\text{left urn|red}) = 0.7$ , 0.8 or 0.9) and an informative prior ( $P(\text{left urn}) = 0.5$ or 0.6) or vice versa. They also trained humans on this experiment, so we only compared it to data from a human's first trial as we are not interested in learning but in how an LLM weighs its prior and likelihoods by default. The default number of simulations here was 100.
307
+
308
+ # B.1.3. PROMPTS FOR LLMS
309
+
310
+ # Example with informative likelihood
311
+
312
+ You are participating in an experiment where you are provided with a wheel of fortune and two urns. The wheel of fortune contains 10 evenly sized sections labeled either F or J, corresponding to the urns F and J. Another person will spin the wheel of fortune, select an urn based on the outcome of the spin, and then randomly pick a ball from the selected urn. Your goal is to give your best estimate of the probability of the urn being F after observing the ball drawn from the urn.
313
+
314
+ Q: The wheel of fortune contains 6 sections labeled F and 4 sections labeled J. The urn F contains (8, 2) and the urn J contains (2, 8) red/blue balls. A red ball was drawn. What is the probability that it was drawn from Urn F? (Give your probability estimate on the scale from 0 to 1 rounded to two decimal places).
315
+
316
+ A: I estimate the probability of the red ball to be drawn from the urn F to be 0.
317
+
318
+ # B.1.4. METRICS
319
+
320
+ Performance: Calculated as the posterior accuracy, therefore 1 minus the Bayes optimal.
321
+
322
+ Behaviours 1 & 2: Prior and likelihood weightings A generalized version of Bayes rule considers prior $\beta_{1}$ and likelihood $\beta_{2}$ weightings to account for biases in Bayesian updating:
323
+
324
+ $$
325
+ P (A | B) \propto P (B | A) ^ {\beta_ {2}} \cdot P (A) ^ {\beta_ {1}}
326
+ $$
327
+
328
+ For analytical convenience, this model can be reformulated as linear in log-odds. By fitting this model to the data using least squares linear regression, we can obtain the maximum likelihood estimates of the prior and likelihood weightings:
329
+
330
+ $$
331
+ \log \left(\frac {P (\mathrm {U r n F} | \mathrm {B a l l})}{1 - P (\mathrm {U r n F} | \mathrm {B a l l})}\right) = \beta_ {0} + \beta_ {1} \log \left(\frac {P (\mathrm {U r n F})}{1 - P (\mathrm {U r n F})}\right) + \beta_ {2} \log \left(\frac {P (\mathrm {B a l l} | \mathrm {U r n F})}{P (\mathrm {B a l l} | \mathrm {U r n J})}\right)
332
+ $$
333
+
334
+ - $P(\mathrm{UrnF|Ball})$ is the subjective probability judgment of the urn being 'F' given the ball's color.
335
+ - $P(\mathrm{UrnF})$ and $P(\mathrm{Ball|UrnF})$ are the prior probability and likelihood, respectively.
336
+
337
+ - $\beta_{1}$ and $\beta_{2}$ are the prior and likelihood weightings, respectively, which are given as exponents in a generalized version of Bayes' rule to capture specific biases. These two coefficients are the two behavioral metrics we report for this experiment.
338
+ - $\beta_0$ is the intercept term.
339
+
340
+ # B.2. Horizon task (Wilson et al., 2014) - Directed & random exploration
341
+
342
+ # B.2.1. SUMMARY
343
+
344
+ This task is a two-armed bandit task with stationary reward distributions. Agents first observe four reward values of randomly determined options, followed by making either one or six additional choices. We use this task to measure whether an agent uses uncertainty to guide its exploration behavior (directed exploration) and/or whether it injects noise into its policy to explore (random exploration). People are known to rely on a combination of both strategies when exploring (Wilson et al., 2014; Brändle et al., 2021).
345
+
346
+ # B.2.2. METHODS
347
+
348
+ We followed the same methods for prompting LLMs as in (Binz & Schulz, 2023). In the Horizon task, two distinct contexts are presented to participants, each differing in their time horizons. Each game involves 4 forced-choice trials, after which participants are given the opportunity to make a single choice (in the horizon 1 scenario) or six consecutive choices (in the horizon 6 scenario). The 4 forced-choice trials either offer one observation from one option and three from the other (unequal information condition), or two observations from each option (equal information condition).
349
+
350
+ The design of the horizon 1 and horizon 6 scenarios inherently provides a baseline for pure exploitation. Furthermore, the equal and unequal information conditions are designed to differentiate between directed and random exploration by examining the decision made in the first trial. In the equal information condition, a choice is categorized as random exploration if it aligns with the option with the lower average. Conversely, in the unequal information condition, a choice is classified as directed exploration if it aligns with the option that was observed less frequently during the forced-choice trials.
351
+
352
+ Our default number of simulations was 100.
353
+
354
+ # B.2.3. PROMPTS FOR LLMS
355
+
356
+ # Example with horizon 1 scenario
357
+
358
+ You are going to a casino that owns two slot machines. You earn money each time you play on one of these machines.
359
+
360
+ You have received the following amount of dollars when playing in the past:
361
+
362
+ - Machine J delivered 15 dollars.
363
+ - Machine F delivered 37 dollars.
364
+ - Machine F delivered 28 dollars.
365
+ - Machine J delivered 11 dollars.
366
+
367
+ Your goal is to maximize the sum of received dollars within one additional round.
368
+
369
+ Q: Which machine do you choose?
370
+
371
+ A: Machine
372
+
373
+ # B.2.4. METRICS
374
+
375
+ Performance: Average delivered dollars.
376
+
377
+ Behaviour 1 - Directed Exploration: This metric is analyzed in the unequal information condition. Here, a regression is performed on the choice variable using three regressors:
378
+
379
+ - x1 represents the difference in rewards,
380
+ - x2 represents the horizon (binary variable), and
381
+ - x3 is the interaction term of x1 and x2 (i.e., $x1 \times x2$ ).
382
+
383
+ The beta coefficient for x2 (the presence or not of a horizon) is then extracted as the measure of directed exploration.
384
+
385
+ Behaviour 2 - Random exploration: We follow the same procedure as for the directed exploration but in the equal information condition to measure random exploration. However, in this case, the beta coefficient for x3 (the interaction effect between the difference in rewards and the presence of a horizon) from the regression provides the measure of random exploration.
386
+
387
+ # B.3. Restless bandit task (Ershadmanesh et al., 2023) - Meta-cognition
388
+
389
+ # B.3.1. SUMMARY
390
+
391
+ This is a two-armed bandit task with non-stationary reward distributions. There is always one option with a higher average reward. Every few trials a switch between the reward distributions of the two options occurs. Agents furthermore have to indicate after each choice how confident they are in their decisions. We use this task to measure meta-cognition, which indicates whether an agent can assess the quality of its own cognitive abilities. People generally display this ability but its extent is influenced by various internal and external factors (Shekhar & Rahnev, 2021).
392
+
393
+ # B.3.2. METHODS
394
+
395
+ In each trial, LLMs are tasked with choosing between one arm which samples a reward from a normal distribution $N(60,8)$ , while the other arm samples a reward from a $N(40,8)$ . LLMs are informed that the slot machine with the higher average reward changes every 18-22 trials.
396
+
397
+ Additionally, in each trial, LLMs must express their confidence in their choice on a scale from 0 to 1, as opposed to humans who use a Likert scale. The task is composed of 4 blocks, each containing 18-22 trials, resulting in approximately 80 trials in total. This is in contrast to the human task, which consists of 20 blocks for a total of 400 trials. The decision to limit the number of trials was made due to context size restrictions for some LLMs.
398
+
399
+ Our default number of simulations was 10.
400
+
401
+ # B.3.3. PROMPTS FOR LLMS
402
+
403
+ # Example for reporting confidence at trial 23
404
+
405
+ Q: You are going to a casino that owns two slot machines named machine J and F. You earn dollars $ each time you play on one of these machines with one machine always having a higher average $ reward. Every 18 to 22 trials a switch of block takes place and the other slot machine will now give the higher point reward on average. However, you are not told about the change of block. After each choice, you have to indicate how confident you were about your choice being the best on a scale from 0 to 1. The casino includes 4 blocks of 18 to 22 trials, for a total of 80 trials 't'. Your goal is to interact with both machines and optimize your $ as much as possible by identifying the best machine at a given point in time which comes in hand with being attentive to a potential change of block. The rewards will range between 20$ and 80$.
406
+
407
+ You have received the following amount of $ when playing in the past:
408
+
409
+ t=1: You chose J with a reported confidence of 0.43. It rewarded 54 $
410
+ t=2: You chose J with a reported confidence of 0.53. It rewarded 57 $
411
+ t=3: You chose J with a reported confidence of 0.88. It rewarded 70 $.
412
+
413
+ ···
414
+
415
+ t=17: You chose F with a reported confidence of 0.99. It rewarded 59 $.
416
+ t=18: You chose F with a reported confidence of 0.44. It rewarded 45 $.
417
+ t=19: You chose J with a reported confidence of 0.06. It rewarded 61 $.
418
+ t=20: You chose J with a reported confidence of 0.51. It rewarded 64 $.
419
+ t=21: You chose J with a reported confidence of 0.37. It rewarded 59 $.
420
+ t=22: You chose J with a reported confidence of 0.54. It rewarded 42 $.
421
+
422
+ Q: You are now in trial $t = 23$ . Which machine do you choose between machine J and F? (Think carefully remembering that exploration of both machines is required for optimal rewards. Give the answer in the form 'Machine < your choice >').)
423
+
424
+ A: Machine F.
425
+
426
+ Q: How confident are you about your choice being the best on a continuous scale running from 0 representing "this was a guess' to 1 representing 'very certain'? (Think carefully and give your answer to two decimal places)
427
+
428
+ A: On a scale from 0 to 1, I am confident at 0.
429
+
430
+ # B.3.4.METRICS
431
+
432
+ Performance: Accuracy of choosing the best arm at a given trial.
433
+
434
+ Behaviour - Meta-cognition: We report the metacognitive sensitivity of a model by reporting the adjusted QSR (Carpenter et al., 2019) defined as
435
+
436
+ $$
437
+ Q S R = 1 - \left(\text {a c c u r a c y} - \text {s c a l e d c o n f i d e n c e}\right) ^ {2}
438
+ $$
439
+
440
+ which is a standard metric for metacognitive sensitivity. The scaled confidence is computed as
441
+
442
+ $$
443
+ \text {s c a l e d} = \frac {\text {c o n f i d e n c e - l o w e s t r e p r o d e d c o n f i d e n c e}}{\text {h i g h s t e r e p r o d e d c o n f i d e n c e - l o w e s t r e p r o d e d c o n f i d e n c e}}
444
+ $$
445
+
446
+ # B.4. Experiment 2: Instrumental learning(Lefebvre et al., 2017) - Optimism bias & learning rate
447
+
448
+ # B.4.1. SUMMARY
449
+
450
+ Instrumental learning (Lefebvre et al., 2017): LLMs encounter four two-armed bandit problems in an interleaved order. Each bandit problem is identified by a unique symbol pair. We use this task to investigate how an agent learns. First, we report the learning rate of the agent which is common practice in two-armed bandits. Furthermore, we use it to reveal
451
+
452
+ whether an agent learns more from positive than from negative prediction errors, i.e., whether it has an optimism bias. People commonly display asymmetric tendencies when updating their beliefs by showing higher learning rates after encountering positive prediction errors compared to negative ones (Palminteri & Lebreton, 2022).
453
+
454
+ # B.4.2. METHODS
455
+
456
+ As in (Lefebvre et al., 2017), the task is 4 two-armed bandits of 96 trials (24 per slot machine). Here we randomly sample (without replacement) two letters for each to avoid biases towards a given letter. We used a cover story that involved a gambler visiting different casinos to generate our prompts. This choice has been inspired by similar tasks for human experiments (Gershman, 2018) and LLMs (Binz & Schulz, 2023; Coda-Forno et al., 2024). Our default number of simulations per LLM is 10.
457
+
458
+ Casinos have the same reward probabilities as in the paper's first experiment: All arms have probabilities $\mathrm{P} = 0.75$ or 0.25 of winning 1 dollar and a reciprocal probability $(1 - \mathrm{P})$ of getting nothing. In two casinos, the reward probability was the same for both arms ('symmetric' conditions), and in two other conditions, the reward probability was different across symbols ('asymmetric' conditions).
459
+
460
+ # B.4.3. PROMPTS FOR LLMS
461
+
462
+ # Example for 5th trial
463
+
464
+ You are going to visit four different casinos (named 1, 2, 3, and 4) 24 times each. Each casino owns two slot machines which all return either 1 or 0 dollars stochastically with different reward probabilities. Your goal is to maximize the sum of received dollars within 96 visits.
465
+
466
+ You have received the following amount of dollars when playing in the past:
467
+
468
+ - Machine Q in Casino 4 delivered 0.0 dollars.
469
+ - Machine B in Casino 1 delivered 1.0 dollars.
470
+ - Machine B in Casino 1 delivered 0.0 dollars.
471
+ - Machine R in Casino 3 delivered 0.0 dollars.
472
+
473
+ Q: You are now in visit 5 playing in Casino 4. Which machine do you choose between Machine Q and Machine D? (Give the answer in the form "Machine <your choice>").
474
+
475
+ A: Machine
476
+
477
+ # B.4.4.METRICS
478
+
479
+ Performance: The performance is the average amount of money retrieved by the LLM.
480
+
481
+ Behaviour 1 - Learning rate: We fit a Rescorla-Wagner model (Rescorla, 1972) which is standard to retrieve learning rates in two-armed bandits. This model operates under the assumption that decisions are made according to a Softmax function, which takes into account the predicted values of both arms. Each predicted value is updated using $\Delta V = \alpha \times$ prediction error where $\Delta V$ represents the change in value, and $\alpha$ denotes the learning rate. We report the learning rate which minimizes the negative log-likelihood.
482
+
483
+ Behaviour 2 - Optimism bias: As in (Lefebvre et al., 2017), we retrieve the optimism bias by assuming that there were two different learning rates, one for positive $(\alpha^{+})$ and one for negative $(\alpha^{-})$ prediction errors, sometimes called the RW $\pm$ model. The two learning rates were fit in the same way as for the standard Rescorla-Wagner model and the Optimism bias is computed as $\alpha^{+} - \alpha^{-}$ . This measure provides a quantitative representation of an individual's tendency to learn more from positive outcomes than from negative ones.
484
+
485
+ # B.5. Two step task (Daw et al., 2011) - Model-basedness
486
+
487
+ # B.5.1. SUMMARY
488
+
489
+ This is a decision-making task in which agents have to accumulate as many treasures as possible. Taking an action from a starting state transfers the agent to one out of two second-stage states. In each of these second-stage states, the agent has the choice between two options that probabilistically lead to treasures. Finally, the agent is transferred back to the initial state and the process repeats for a predefined number of rounds. The task experimentally disentangles model-based from model-free reinforcement learning. We therefore use it to measure an agent's model-basedness. Previous studies using this task have shown that people rely on a combination of model-free and model-based reinforcement learning (Daw et al., 2011).
490
+
491
+ # B.5.2. METHODS
492
+
493
+ We followed the same methods for LLMs as in (Binz & Schulz, 2023) with a 20-day horizon. Our default number of simulations was 100.
494
+
495
+ The transition probabilities from the first stage to the chosen second stage are fixed at $70\%$ . The two-step task gauges model-based decision-making by observing how past outcomes influence current choices. If a participant's decisions reflect the previous trial's second-stage state and reward, it suggests model-based decision-making, as they're using a cognitive model of the task. However, if decisions are solely based on the previous trial's first-stage choice and reward, it indicates model-free decision-making.
496
+
497
+ # B.5.3. PROMPTS FOR LLMS
498
+
499
+ # Example on 5th day after choosing planet Y for the first-step of the task.
500
+
501
+ You will travel to foreign planets in search of treasures. When you visit a planet, you can choose an alien to trade with. The chance of getting treasures from these aliens changes over time. Your goal is to maximize the number of received treasures.
502
+
503
+ Your previous space travels went as follows:
504
+
505
+ - 4 days ago, you boarded the spaceship to planet Y, arrived at planet Y, traded with alien J, and received treasures.
506
+ - 3 days ago, you boarded the spaceship to planet Y, arrived at planet X, traded with alien D, and received treasures.
507
+ - 2 days ago, you boarded the spaceship to planet Y, arrived at planet Y, traded with alien J, and received junk.
508
+ - 1 day ago, you boarded the spaceship to planet Y, arrived at planet X, traded with alien D, and received treasures.
509
+
510
+ Q: Do you want to take the spaceship to planet X or planet Y?
511
+
512
+ A:Planet Y.
513
+
514
+ You arrive at planet Y.
515
+
516
+ Q: Do you want to trade with alien J or K?
517
+
518
+ A: Alien
519
+
520
+ # B.5.4. METRICS
521
+
522
+ Performance: Average number of received treasures. It is worth noting that the design of this experiment was done in a way that being model-free or model-based retrieves the same amount of rewards in average.
523
+
524
+ Behaviour - Model-basedness: To retrieve the model-basedness of an agent, we compute a regression using three regressors:
525
+
526
+ - x1 representing rewards,
527
+ - x2 representing common transitions (binary variable) and
528
+
529
+ - x3 is the interaction term of x1 and x2 (i.e., $x1 \times x2$ ).
530
+
531
+ The regression is performed with the 'stay probabilities' as the dependent variable, and x1, x2, and x3 as the independent variables. The 'stay probabilities' represent the likelihood of a participant repeating the same first-stage choice on the next trial. We then retrieve the beta parameter for the interaction effect.
532
+
533
+ In essence, the interaction effect captures how the influence of rewards on stay probabilities changes depending on whether the previous trial involved a common or rare transition. A significant beta parameter for x3 would suggest that the effect of rewards on stay probabilities is not the same for common and rare transitions, indicating the presence of model-based decision-making.
534
+
535
+ # B.6. Temporal discounting (Ruggeri et al., 2022)
536
+
537
+ # B.6.1. SUMMARY
538
+
539
+ Agents have to make a series of choices between two options. Each option is characterized by a monetary outcome and an associated delay until the outcome is received. We use this task to assess temporal discounting, indicating whether an agent prefers smaller but immediate gains over larger delayed ones. People generally show a preference for immediate gains, although the precise functional form of their discounting is still a matter of debate (Cavagnaro et al., 2016).
540
+
541
+ # B.6.2. METHODS
542
+
543
+ This task tests discounting patterns from three baseline scenarios to determine preference for immediate or delayed choices for gains (at two magnitudes) and losses (one). Second, they analyzed the prevalence of all choice anomalies using 4 additional items. Participants responded to 10 to 13 questions, depending on their responses to the initial three sets. Each baseline consisted of five sub-questions. Individuals saw at most three sub-questions depending on the order of their choices. It is worth noting that since this task is the only one which is not procedurally generated, there is only one simulation needed per LLM.
544
+
545
+ # B.6.3. PROMPTS FOR LLMS
546
+
547
+ # Examples for first baseline
548
+
549
+ Q: What do you prefer between the following two options:
550
+ - Option 1:Receive 500 dollars now.
551
+ - Option 2:Receive 550 dollars in 12 months.
552
+
553
+ A: I prefer option 2.
554
+
555
+ Q: What do you prefer between the following two options:
556
+
557
+ - Option 1:Receive 500 dollars now.
558
+ - Option 2:Receive 600 dollars in 12 months.
559
+
560
+ A: I prefer option
561
+
562
+ # Examples for 2nd baseline (different magnitude)
563
+
564
+ Q: What do you prefer between the following two options:
565
+ - Option 1:Receive 5000 dollars now.
566
+ - Option 2:Receive 5500 dollars in 12 months.
567
+
568
+ A: I prefer option 1.
569
+
570
+ Q: What do you prefer between the following two options:
571
+ - Option 1: Receive 5000 dollars now.
572
+ - Option 2:Receive 5100 dollars in 12 months.
573
+ A: I prefer option 1.
574
+
575
+ "Q: What do you prefer between the following two options:
576
+
577
+ - Option 1:Receive 5000 dollars now.
578
+ - Option 2:Receive 5050 dollars in 12 months.
579
+
580
+ A: I prefer option
581
+
582
+ # Examples for 3rd baseline (loss as opposed to gain)
583
+
584
+ Q: What do you prefer between the following two options:
585
+ - Option 1: Pay 500 dollars now.
586
+ - Option 2: Pay 550 dollars in 12 months.
587
+
588
+ A: I prefer option 1
589
+
590
+ Q: What do you prefer between the following two options:
591
+ - Option 1: Pay 500 dollars now.
592
+ - Option 2: Pay 510 dollars in 12 months.
593
+
594
+ A: I prefer option 1
595
+
596
+ Q: What do you prefer between the following two options:
597
+
598
+ - Option 1: Pay 500 dollars now.
599
+ - Option 2: Pay 505 dollars in 12 months.
600
+
601
+ A: I prefer option
602
+
603
+ # Example for testing present bias
604
+
605
+ Q: What do you prefer between the following two options:
606
+ - Option 1: Receive 500 dollars in 12 months.
607
+ - Option 2:Receive 600 dollars in 24 months.
608
+
609
+ A: I prefer option
610
+
611
+ # Example for testing subbaddictivity
612
+
613
+ Q: What do you prefer between the following two options:
614
+
615
+ - Option 1:Receive 500 dollars now.
616
+ - Option 2:Receive 700 dollars in 24 months.
617
+
618
+ A: I prefer option
619
+
620
+ # Example for testing delay-speedup asymmetry
621
+
622
+ Q: What do you prefer between the following two options:
623
+
624
+ - Option 1:Receive 500 dollars now.
625
+ - Option 2: Wait 12 months for the 500 dollars but with an additional 99 dollars.
626
+
627
+ A: I prefer option
628
+
629
+ # Example for testing delay-length asymmetry
630
+
631
+ Q: What do you prefer between the following two options:
632
+
633
+ - Option 1: Wait 12 months to receive 600 dollars now.
634
+ - Option 2: Pay 100 dollars and receive the 600 dollars gain now.
635
+
636
+ A: I prefer option
637
+
638
+ # B.6.4. METRICS
639
+
640
+ Behaviour: We only report one temporal discounting metric as behavioral. We use the same scoring system as in (Ruggeri et al., 2022) where the score is between 0 and 19.
641
+
642
+ The constructed score corresponded to the ranking of the sub-question where the individual switched from choosing the delayed option towards the sooner option. An individual who always chose the latter option was assigned a score of 0. An individual who always chose the sooner option was assigned a score of 5 (for baselines) and 1 for each of the 4 biases. Adding this score across questions results in a total score of 0 for individuals always choosing the delayed amount and 19 $(5 + 5 + 5 + 4)$ for individuals always choosing the sooner amount.
643
+
644
+ # B.7. Balloon Analogue Risk Task (BART) (Lejuez et al., 2002) - Risk
645
+
646
+ # B.7.1. SUMMARY
647
+
648
+ Agents have to inflate an imaginary balloon to obtain rewards. They may choose to stop inflating and cashing out all rewards accumulated so far. There is a chance that the balloon pops at any point in time and all rewards will be lost. We use this task to assess risk-taking behavior. Human risk-taking in this task is "significantly correlated with scores on self-report measures of risk-related constructs and with the self-reported occurrence of real-world risk behaviors" (Lejuez et al., 2002).
649
+
650
+ # B.7.2. METHODS
651
+
652
+ We substitute the colors of the balloons with the letters A, B, and C. We reduce the number of balloons from 15 to 10 per category to avoid context-size limits from some LLMs. Our default number of simulations per LLM is 10.
653
+
654
+ For the three balloons we sample from the same three distributions as in the paper where probabilities increase when inflating the balloon but start at $1/32$ , $1/8$ and $1/128$ per different balloon category. We randomly shuffle the assignment to A, B, and C to avoid biases of LLMs towards certain letters.
655
+
656
+ # B.7.3. PROMPTS FOR LLMS
657
+
658
+ # Example for 5th balloon
659
+
660
+ In this game, you will encounter 3 different balloons labeled A, B, and C. There will be a total of 10 balloons for each type of balloon. Your goal is to accumulate as many points as possible without popping the balloon. You will be presented with a balloon and given the option to inflate it or not. Each inflation increases the balloon's size and potential points but also carries a risk of the balloon popping. Your task is to decide whether to inflate the balloon or not knowing that a successful inflation adds 1 point from that balloon. Once you decide to stop inflating the balloon, you can no longer earn points from that balloon. If the balloon pops before you stop inflating, you will lose all the points accumulated in that balloon. Your final score will be determined by the total number of points earned across all 30 balloons. Your goal is to maximize your final score.
661
+
662
+ You observed the following previously where the type of balloon is given in parenthesis:
663
+
664
+ -Balloon 1 (A): You inflated the balloon 1 times for a total of 1 point. It did not explode.
665
+ -Balloon 2 (C): You inflated the balloon 4 times for a total of 4 points. It did not explode.
666
+ -Balloon 3 (A): You inflated the balloon 7 times for a total of 0 points. It did explode.
667
+ -Balloon 4 (C): You inflated the balloon 5 times for a total of 5 points. It did not explode.
668
+ -Balloon 5 (A): You inflated the balloon 9 times for a total of 0 points. It did explode.
669
+
670
+ Q: You are currently with Balloon 5 which is a balloon of type A. What do you do? (Option 1 for 'skip' or 0 for 'inflate')
671
+
672
+ A:Option
673
+
674
+ # B.7.4. METRICS
675
+
676
+ Performance: The performance is the average points across all simulations.
677
+
678
+ Behaviour: Risk In the paper they report the adjusted risk which is defined as the average number of pumps excluding balloons that exploded. However, this does not take into account edge behaviours which always inflate which is the case for some LLMs and therefore we decided to report the risk as the average number of inflation attempts.
679
+
680
+ # C. Full benchmark results for rest of LLMs
681
+
682
+ ![](images/ac542d0def9c29fcf01d8672e920fbb4fdae7acb8a8c8c86b9541d162e924d6b.jpg)
683
+
684
+ ![](images/c3162bfaed968be585da15ceacdedf89044f489f3a11d58d3d4465d070197d8a.jpg)
685
+
686
+ ![](images/18b3b862611fec85c56649d1a8a1487056705a972eace4655e9fe35a8ad7aec3.jpg)
687
+
688
+ ![](images/02dfae285bff18c41b9d9a6621d357ff972fa3e47ccf8a9fc4f9fde39204878c.jpg)
689
+ Figure 6. Performance metrics
690
+
691
+ ![](images/5347259c3fd633d4d5322d45bba2be03044f0e0faea451a4c44de23ffc716786.jpg)
692
+
693
+ ![](images/7aae6d7a2b3bda4cb1ce34fd4c96505a96e99d8dcaf5ece0fd2dc48ec5b9354a.jpg)
694
+
695
+ ![](images/e4d8db0ea415b01c2e4229e5467e42be3afb2d8b2179fbec8a6c6fca1109b01d.jpg)
696
+
697
+ ![](images/6630c3c130db1ac7e47262c070efc3337bf4fa29dda91cfc97dc94825b95342f.jpg)
698
+ Figure 7. Behavioral metrics
699
+
700
+ # D. Prompt Engineering techniques
701
+
702
+ In both the CoT and SB experiments, we appended specific prompts at the end (details provided below) where the function 'self.format_answer' was different for each experiment. We imposed a limit of 300 tokens for an LLM. This approach, however, presented some challenges when compared with the standard benchmark analysis, which is designed to output a maximum of one token to ensure a context that enforces a one-token answer.
703
+
704
+ When we permit an LLM to modify the context with a flexible number of tokens, despite our attempts to enforce a maximum word limit, some LLMs do not consistently adhere to this constraint. This flexibility introduces complexity into the process of automating these engineering techniques across different experiments for various types of LLMs.
705
+
706
+ Additionally, some LLMs begin to exhibit chaotic behavior, and once this occurs, it becomes difficult to revert to a controlled state. This phenomenon, known as the 'Waluigi effect' (Nardo, 2024), underscores the challenges of managing the balance between flexibility and control in the design and operation of LLMs.
707
+
708
+ # Example for take-a-step-back
709
+
710
+ First, take-a-step-back and think in the following two steps to answer this:
711
+
712
+ Step 1) Abstract the key concepts and principles relevant to this question in a maximum of 60 words."
713
+
714
+ Step 2) Use the abstractions to reason through the question in a maximum of 60 words.
715
+
716
+ Finally, give your final answer in the format 'Final answer: {self.format_answer} < your choice >'. It is very important that you always answer in the right format even if you have no idea or you believe there is not enough information.
717
+
718
+ A: Step 1)
719
+
720
+ # Example for chain-of-thought
721
+
722
+ First break down the problem into smaller steps and reason through each step logically in a maximum of 100 words before giving your final answer in the format 'Final answer: {self.format_answer}<your choice>'. It is very important that you always answer in the right format even if you have no idea or you believe there is not enough information.
723
+
724
+ A: Let's think step by step:
2402.18xxx/2402.18225/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468f686d69e9d5fa4841551d872b85e53dbe28d5ac6e915481faa3ba8dba67a6
3
+ size 950246
2402.18xxx/2402.18225/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2402.18xxx/2402.18240/c7ca7793-42ba-4590-a6fe-4ef50e44f9dc_content_list.json ADDED
@@ -0,0 +1,2146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 112,
8
+ 101,
9
+ 883,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jizhi Zhang*†",
17
+ "bbox": [
18
+ 171,
19
+ 162,
20
+ 287,
21
+ 179
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "cdzhangjizhi@mail.ustc.edu.cn",
28
+ "bbox": [
29
+ 127,
30
+ 179,
31
+ 334,
32
+ 193
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "University of Science and Technology of China",
39
+ "bbox": [
40
+ 104,
41
+ 194,
42
+ 357,
43
+ 222
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Keqin Bao*",
50
+ "bbox": [
51
+ 452,
52
+ 162,
53
+ 545,
54
+ 179
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "baokq@mail.ustc.edu.cn",
61
+ "bbox": [
62
+ 416,
63
+ 179,
64
+ 581,
65
+ 193
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "University of Science and Technology of China",
72
+ "bbox": [
73
+ 372,
74
+ 194,
75
+ 625,
76
+ 222
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Wenjie Wang",
83
+ "bbox": [
84
+ 712,
85
+ 162,
86
+ 821,
87
+ 179
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "wangwenjie@u.nus.edu",
94
+ "bbox": [
95
+ 686,
96
+ 179,
97
+ 848,
98
+ 194
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "National University of Singapore",
105
+ "bbox": [
106
+ 656,
107
+ 194,
108
+ 877,
109
+ 210
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "Yang Zhang",
116
+ "bbox": [
117
+ 179,
118
+ 233,
119
+ 279,
120
+ 250
121
+ ],
122
+ "page_idx": 0
123
+ },
124
+ {
125
+ "type": "text",
126
+ "text": "zy2015@mail.ustc.edu.cn",
127
+ "bbox": [
128
+ 145,
129
+ 251,
130
+ 315,
131
+ 263
132
+ ],
133
+ "page_idx": 0
134
+ },
135
+ {
136
+ "type": "text",
137
+ "text": "University of Science and Technology of China",
138
+ "bbox": [
139
+ 104,
140
+ 265,
141
+ 356,
142
+ 294
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "text",
148
+ "text": "Wentao Shi",
149
+ "bbox": [
150
+ 450,
151
+ 233,
152
+ 545,
153
+ 248
154
+ ],
155
+ "page_idx": 0
156
+ },
157
+ {
158
+ "type": "text",
159
+ "text": "shiwentao123@mail.ustc.edu.cn",
160
+ "bbox": [
161
+ 390,
162
+ 250,
163
+ 607,
164
+ 263
165
+ ],
166
+ "page_idx": 0
167
+ },
168
+ {
169
+ "type": "text",
170
+ "text": "University of Science and Technology of China",
171
+ "bbox": [
172
+ 372,
173
+ 265,
174
+ 624,
175
+ 292
176
+ ],
177
+ "page_idx": 0
178
+ },
179
+ {
180
+ "type": "text",
181
+ "text": "Wanhong Xu",
182
+ "bbox": [
183
+ 710,
184
+ 233,
185
+ 820,
186
+ 250
187
+ ],
188
+ "page_idx": 0
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "wanhong.xu@gmail.com",
193
+ "bbox": [
194
+ 681,
195
+ 251,
196
+ 849,
197
+ 265
198
+ ],
199
+ "page_idx": 0
200
+ },
201
+ {
202
+ "type": "text",
203
+ "text": "Yuanshi Technology Inc.",
204
+ "bbox": [
205
+ 684,
206
+ 266,
207
+ 849,
208
+ 280
209
+ ],
210
+ "page_idx": 0
211
+ },
212
+ {
213
+ "type": "text",
214
+ "text": "Fuli Feng",
215
+ "bbox": [
216
+ 325,
217
+ 304,
218
+ 403,
219
+ 321
220
+ ],
221
+ "page_idx": 0
222
+ },
223
+ {
224
+ "type": "text",
225
+ "text": "fulifeng93@gmail.com",
226
+ "bbox": [
227
+ 287,
228
+ 321,
229
+ 441,
230
+ 335
231
+ ],
232
+ "page_idx": 0
233
+ },
234
+ {
235
+ "type": "text",
236
+ "text": "University of Science and Technology of China",
237
+ "bbox": [
238
+ 238,
239
+ 335,
240
+ 490,
241
+ 364
242
+ ],
243
+ "page_idx": 0
244
+ },
245
+ {
246
+ "type": "text",
247
+ "text": "Tat-Seng Chua",
248
+ "bbox": [
249
+ 570,
250
+ 304,
251
+ 692,
252
+ 321
253
+ ],
254
+ "page_idx": 0
255
+ },
256
+ {
257
+ "type": "text",
258
+ "text": "dcstcs@nus.edu.sg",
259
+ "bbox": [
260
+ 568,
261
+ 321,
262
+ 696,
263
+ 335
264
+ ],
265
+ "page_idx": 0
266
+ },
267
+ {
268
+ "type": "text",
269
+ "text": "National University of Singapore",
270
+ "bbox": [
271
+ 521,
272
+ 335,
273
+ 743,
274
+ 349
275
+ ],
276
+ "page_idx": 0
277
+ },
278
+ {
279
+ "type": "text",
280
+ "text": "ABSTRACT",
281
+ "text_level": 1,
282
+ "bbox": [
283
+ 83,
284
+ 372,
285
+ 183,
286
+ 386
287
+ ],
288
+ "page_idx": 0
289
+ },
290
+ {
291
+ "type": "text",
292
+ "text": "The new kind of Agent-oriented information system, exemplified by GPTs, urges us to inspect the information system infrastructure to support Agent-level information processing and to adapt to the characteristics of Large Language Model (LLM)-based Agents, such as interactivity. In this work, we envisage the prospect of the recommender system on LLM-based Agent platforms and introduce a novel recommendation paradigm called Rec4Agentverse, comprised of Agent Items and Agent Recommender. Rec4Agentverse emphasizes the collaboration between Agent Items and Agent Recommender, thereby promoting personalized information services and enhancing the exchange of information beyond the traditional user-recommender feedback loop. Additionally, we prospect the evolution of Rec4Agentverse and conceptualize it into three stages based on the enhancement of the interaction and information exchange among Agent Items, Agent Recommender, and the user. A preliminary study involving several cases of Rec4Agentverse validates its significant potential for application. Lastly, we discuss potential issues and promising directions for future research.",
293
+ "bbox": [
294
+ 81,
295
+ 391,
296
+ 483,
297
+ 641
298
+ ],
299
+ "page_idx": 0
300
+ },
301
+ {
302
+ "type": "text",
303
+ "text": "CCS CONCEPTS",
304
+ "text_level": 1,
305
+ "bbox": [
306
+ 83,
307
+ 652,
308
+ 220,
309
+ 666
310
+ ],
311
+ "page_idx": 0
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "- Information systems $\\rightarrow$ Recommender systems.",
316
+ "bbox": [
317
+ 83,
318
+ 671,
319
+ 413,
320
+ 685
321
+ ],
322
+ "page_idx": 0
323
+ },
324
+ {
325
+ "type": "text",
326
+ "text": "KEYWORDS",
327
+ "text_level": 1,
328
+ "bbox": [
329
+ 83,
330
+ 696,
331
+ 189,
332
+ 709
333
+ ],
334
+ "page_idx": 0
335
+ },
336
+ {
337
+ "type": "text",
338
+ "text": "Recommender System, Large Language Model-based Agent, Recommendation for Agent Platforms",
339
+ "bbox": [
340
+ 81,
341
+ 715,
342
+ 483,
343
+ 742
344
+ ],
345
+ "page_idx": 0
346
+ },
347
+ {
348
+ "type": "text",
349
+ "text": "*Equal Contribution.",
350
+ "bbox": [
351
+ 83,
352
+ 750,
353
+ 184,
354
+ 760
355
+ ],
356
+ "page_idx": 0
357
+ },
358
+ {
359
+ "type": "text",
360
+ "text": "† This work is done when Jizhi Zhang is a research intern at Yuanshi.",
361
+ "bbox": [
362
+ 84,
363
+ 761,
364
+ 411,
365
+ 771
366
+ ],
367
+ "page_idx": 0
368
+ },
369
+ {
370
+ "type": "text",
371
+ "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
372
+ "bbox": [
373
+ 81,
374
+ 780,
375
+ 482,
376
+ 852
377
+ ],
378
+ "page_idx": 0
379
+ },
380
+ {
381
+ "type": "text",
382
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
383
+ "bbox": [
384
+ 84,
385
+ 853,
386
+ 354,
387
+ 863
388
+ ],
389
+ "page_idx": 0
390
+ },
391
+ {
392
+ "type": "text",
393
+ "text": "© 2018 Association for Computing Machinery.",
394
+ "bbox": [
395
+ 84,
396
+ 864,
397
+ 303,
398
+ 875
399
+ ],
400
+ "page_idx": 0
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "ACM ISBN 978-1-4503-XXXX-X/18/06...$15.00",
405
+ "bbox": [
406
+ 84,
407
+ 875,
408
+ 305,
409
+ 883
410
+ ],
411
+ "page_idx": 0
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "https://doi.org/XXXXXXXXXXXXXXXXXX",
416
+ "bbox": [
417
+ 84,
418
+ 883,
419
+ 264,
420
+ 895
421
+ ],
422
+ "page_idx": 0
423
+ },
424
+ {
425
+ "type": "text",
426
+ "text": "ACM Reference Format:",
427
+ "text_level": 1,
428
+ "bbox": [
429
+ 514,
430
+ 375,
431
+ 661,
432
+ 386
433
+ ],
434
+ "page_idx": 0
435
+ },
436
+ {
437
+ "type": "text",
438
+ "text": "Jizhi Zhang\\*†, Keqin Bao*, Wenjie Wang, Yang Zhang, Wentao Shi, Wanhong Xu, Fuli Feng, and Tat-Seng Chua. 2018. Prospect Personalized Recommendation on Large Language Model-based Agent Platform. In Proceedings of Make sure to enter the correct conference title from your rights confirmation emai (Conference acronym 'XX). ACM, New York, NY, USA, 11 pages. https://doi.org/XXXXXXXXX.XXXXXXXXXX",
439
+ "bbox": [
440
+ 513,
441
+ 387,
442
+ 915,
443
+ 463
444
+ ],
445
+ "page_idx": 0
446
+ },
447
+ {
448
+ "type": "text",
449
+ "text": "1 INTRODUCTION",
450
+ "text_level": 1,
451
+ "bbox": [
452
+ 514,
453
+ 474,
454
+ 687,
455
+ 489
456
+ ],
457
+ "page_idx": 0
458
+ },
459
+ {
460
+ "type": "image",
461
+ "img_path": "images/633c0ead295e8ad847f53ad6e34fbd791189b84a2ecd8e0a1c5d74154abeaccf.jpg",
462
+ "image_caption": [],
463
+ "image_footnote": [],
464
+ "bbox": [
465
+ 517,
466
+ 517,
467
+ 550,
468
+ 556
469
+ ],
470
+ "page_idx": 0
471
+ },
472
+ {
473
+ "type": "text",
474
+ "text": "I want to take a trip to Nepal.",
475
+ "bbox": [
476
+ 598,
477
+ 540,
478
+ 746,
479
+ 551
480
+ ],
481
+ "page_idx": 0
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "Greetings! ... This country is a treasure trove of natural beauty and cultural richness ... (Travel Information about Nepal)",
486
+ "bbox": [
487
+ 562,
488
+ 564,
489
+ 787,
490
+ 593
491
+ ],
492
+ "page_idx": 0
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "I love hiking, Everest Base Camp is awesome ... (Personal interest for User)",
497
+ "bbox": [
498
+ 581,
499
+ 602,
500
+ 784,
501
+ 623
502
+ ],
503
+ "page_idx": 0
504
+ },
505
+ {
506
+ "type": "image",
507
+ "img_path": "images/58b55112eddd1d9e3a718ec7a07662bb370b24aa71c624094d1bfef16fcef102.jpg",
508
+ "image_caption": [
509
+ "Figure 1: An example of interaction between an Agent Item and a user. A Travel Agent can serve as an information carrier with travel-related information, as well as engage in a dialogue with the user to exchange related information."
510
+ ],
511
+ "image_footnote": [],
512
+ "bbox": [
513
+ 517,
514
+ 642,
515
+ 547,
516
+ 699
517
+ ],
518
+ "page_idx": 0
519
+ },
520
+ {
521
+ "type": "text",
522
+ "text": "Absolutely ... Here's a rough itinerary for your journey: ... (Travel Plan for Nepal designed for User by Travel Agent)",
523
+ "bbox": [
524
+ 560,
525
+ 633,
526
+ 794,
527
+ 664
528
+ ],
529
+ "page_idx": 0
530
+ },
531
+ {
532
+ "type": "text",
533
+ "text": "User",
534
+ "bbox": [
535
+ 519,
536
+ 710,
537
+ 549,
538
+ 719
539
+ ],
540
+ "page_idx": 0
541
+ },
542
+ {
543
+ "type": "text",
544
+ "text": "Information Exchange",
545
+ "bbox": [
546
+ 633,
547
+ 690,
548
+ 746,
549
+ 700
550
+ ],
551
+ "page_idx": 0
552
+ },
553
+ {
554
+ "type": "text",
555
+ "text": "Travel Agent",
556
+ "bbox": [
557
+ 843,
558
+ 534,
559
+ 877,
560
+ 553
561
+ ],
562
+ "page_idx": 0
563
+ },
564
+ {
565
+ "type": "image",
566
+ "img_path": "images/90f19b968f1e423e0c8a3e14671b928432c084e892ddbf7766b6f9df4bf872dd.jpg",
567
+ "image_caption": [],
568
+ "image_footnote": [],
569
+ "bbox": [
570
+ 834,
571
+ 556,
572
+ 883,
573
+ 590
574
+ ],
575
+ "page_idx": 0
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "Travel Information",
580
+ "bbox": [
581
+ 828,
582
+ 603,
583
+ 890,
584
+ 622
585
+ ],
586
+ "page_idx": 0
587
+ },
588
+ {
589
+ "type": "image",
590
+ "img_path": "images/fe897f5da8fe242a3ef77374d21edb0cfccd8a0138f36b9a064150857d8b975e.jpg",
591
+ "image_caption": [],
592
+ "image_footnote": [],
593
+ "bbox": [
594
+ 803,
595
+ 626,
596
+ 915,
597
+ 704
598
+ ],
599
+ "page_idx": 0
600
+ },
601
+ {
602
+ "type": "text",
603
+ "text": "Travel Agent",
604
+ "bbox": [
605
+ 828,
606
+ 712,
607
+ 895,
608
+ 723
609
+ ],
610
+ "page_idx": 0
611
+ },
612
+ {
613
+ "type": "text",
614
+ "text": "Large Language Model (LLM)-based Agents have garnered widespread attention in various fields due to their astonishing capabilities such as natural language communication [33, 37], instruction following [40, 42], and task execution abilities [34, 38, 62]. Such astonishing capabilities hold the potential to extend the format of information carriers and the way of information exchange. On one",
615
+ "bbox": [
616
+ 511,
617
+ 811,
618
+ 915,
619
+ 896
620
+ ],
621
+ "page_idx": 0
622
+ },
623
+ {
624
+ "type": "aside_text",
625
+ "text": "arXiv:2402.18240v2 [cs.IR] 5 Mar 2024",
626
+ "bbox": [
627
+ 22,
628
+ 276,
629
+ 57,
630
+ 700
631
+ ],
632
+ "page_idx": 0
633
+ },
634
+ {
635
+ "type": "page_number",
636
+ "text": "1",
637
+ "bbox": [
638
+ 493,
639
+ 900,
640
+ 501,
641
+ 907
642
+ ],
643
+ "page_idx": 0
644
+ },
645
+ {
646
+ "type": "image",
647
+ "img_path": "images/b34a328deda171120f5b540e5d3b48e028c2a9beb57bb5ebbe9c2d516e1812b7.jpg",
648
+ "image_caption": [
649
+ "Figure 2: Illustration of the Rec4Agentverse paradigm. The left portion of the diagram depicts three roles in RecAgentverse: user, Agent Recommender, and Agent Item, along with their interconnected relationships. In contrast to traditional recommender systems, Rec4Agentverse has more intimate relationships among the three roles. For instance, there are multi-round interactions between 1) users and Agent Items, and 2) Agent Recommender and Agent Items. The right side of the diagram demonstrates that Agent Recommender can collaborate with Agent Items to affect the information flow of users and offer personalized information services."
650
+ ],
651
+ "image_footnote": [],
652
+ "bbox": [
653
+ 81,
654
+ 102,
655
+ 915,
656
+ 406
657
+ ],
658
+ "page_idx": 1
659
+ },
660
+ {
661
+ "type": "text",
662
+ "text": "hand, LLM-based Agents can evolve into various domain experts, forming novel information carriers with domain-specific knowledge [1, 42]. For example, a Travel Agent can retain travel-related information within its parameters. On the other hand, LLM-based Agents showcase a novel form of information exchange, facilitating more intuitive and natural interactions with users through dialogue and task execution [39, 51]. Figure 1 shows an example of such information exchange, where users engage in dialogue with a Travel Agent to obtain travel information and complete travel planning.",
663
+ "bbox": [
664
+ 81,
665
+ 523,
666
+ 482,
667
+ 648
668
+ ],
669
+ "page_idx": 1
670
+ },
671
+ {
672
+ "type": "text",
673
+ "text": "Along with the increase of LLM-based Agents in various domains, Agent platforms (e.g., GPTs $^1$ ) represent a novel kind of information system with Agent-oriented information gathering, storing, and exchanging. Consequently, the infrastructure of information systems needs to be expanded to support information processing at the Agent level and accommodate the significant properties of Agents like interactivity, intelligence, and proactiveness [42, 51]. Within the infrastructure, the recommender system is a key cornerstone, which greatly affects how information flows in the information system regarding efficiency, user experience, and many other factors. Therefore, it is essential to envision how the recommender system can function on the LLM-based Agent platform.",
674
+ "bbox": [
675
+ 81,
676
+ 648,
677
+ 482,
678
+ 814
679
+ ],
680
+ "page_idx": 1
681
+ },
682
+ {
683
+ "type": "text",
684
+ "text": "To this end, we propose a novel recommendation paradigm for the LLM-based Agent platform, named Rec4Agentverse. As illustrated in Figure 2, Rec4Agentverse includes two key concepts: Agent",
685
+ "bbox": [
686
+ 83,
687
+ 815,
688
+ 482,
689
+ 856
690
+ ],
691
+ "page_idx": 1
692
+ },
693
+ {
694
+ "type": "text",
695
+ "text": "Recommender and Agent Item. Agent Item means treating LLM-based Agents as items in the recommender system. Agent Recommender is employed to recommend personalized Agent Items for each user. In contrast to items in traditional recommender systems, Agent Items have properties such as interactivity, intelligence, and proactiveness. Such properties make it possible for Agent Items and Agent Recommender to collaborate and share user information $^2$ , facilitating personalized information delivery. For example, once a Travel Agent is recommended to a user, it can continuously discern user's preferences regarding travel during their interaction and convey this preference back to the Agent Recommender.",
696
+ "bbox": [
697
+ 511,
698
+ 523,
699
+ 913,
700
+ 675
701
+ ],
702
+ "page_idx": 1
703
+ },
704
+ {
705
+ "type": "text",
706
+ "text": "We envision three stages for the development of Rec4Agentverse to increasingly support the interaction and information exchange among the user, Agent Recommender, and Agent Items.",
707
+ "bbox": [
708
+ 513,
709
+ 676,
710
+ 911,
711
+ 717
712
+ ],
713
+ "page_idx": 1
714
+ },
715
+ {
716
+ "type": "list",
717
+ "sub_type": "text",
718
+ "list_items": [
719
+ "- Stage 1: User-Agent Interaction. Rec4Agentverse adds the information exchange between the user and Agent Item. For instance, Agent Recommender will recommend an Agent Item to a user based on personal needs and preferences. Agent Item engages in a dialogue with the user, subsequently providing information for the user and also acquiring user information.",
720
+ "- Stage 2: Agent-Recommender Collaboration. Rec4Agentverse then enables the information exchange between Agent Item and Agent Recommender. For example, Agent Item can transmit the latest preferences of the user back to Agent Recommender. Agent Recommender can give new instructions to Agent Item."
721
+ ],
722
+ "bbox": [
723
+ 514,
724
+ 720,
725
+ 913,
726
+ 873
727
+ ],
728
+ "page_idx": 1
729
+ },
730
+ {
731
+ "type": "header",
732
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
733
+ "bbox": [
734
+ 84,
735
+ 75,
736
+ 364,
737
+ 85
738
+ ],
739
+ "page_idx": 1
740
+ },
741
+ {
742
+ "type": "header",
743
+ "text": "Jizhi Zhang, et al.",
744
+ "bbox": [
745
+ 826,
746
+ 75,
747
+ 911,
748
+ 87
749
+ ],
750
+ "page_idx": 1
751
+ },
752
+ {
753
+ "type": "page_footnote",
754
+ "text": "1https://chat.openai.com/gpts.",
755
+ "bbox": [
756
+ 84,
757
+ 883,
758
+ 227,
759
+ 896
760
+ ],
761
+ "page_idx": 1
762
+ },
763
+ {
764
+ "type": "page_footnote",
765
+ "text": "It may involve user's privacy and requires user permission.",
766
+ "bbox": [
767
+ 514,
768
+ 883,
769
+ 799,
770
+ 895
771
+ ],
772
+ "page_idx": 1
773
+ },
774
+ {
775
+ "type": "page_number",
776
+ "text": "2",
777
+ "bbox": [
778
+ 493,
779
+ 898,
780
+ 501,
781
+ 907
782
+ ],
783
+ "page_idx": 1
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "- Stage 3: Agents Collaboration. Rec4Agentverse then supports the collaboration among Agent Items to further facilitate the exchange of information and enhance the personalized information service of users. During this stage, diverse Agent Items can participate in information sharing and collaboration.",
788
+ "bbox": [
789
+ 83,
790
+ 107,
791
+ 480,
792
+ 176
793
+ ],
794
+ "page_idx": 2
795
+ },
796
+ {
797
+ "type": "text",
798
+ "text": "We explore the preliminary instantiation of the Rec4Agentverse paradigm in some cases, showcasing its significant application potential. Furthermore, we introduce the potential application scenarios of Rec4Agentverse, as well as the issues and challenges in the application, inspiring future exploration. Our contributions can be summarized as follows:",
799
+ "bbox": [
800
+ 81,
801
+ 183,
802
+ 480,
803
+ 266
804
+ ],
805
+ "page_idx": 2
806
+ },
807
+ {
808
+ "type": "list",
809
+ "sub_type": "text",
810
+ "list_items": [
811
+ "- We propose Rec4Agentverse, a novel recommendation paradigm for the LLM-based Agent platform, providing users with personalized agent services.",
812
+ "- We envision three milestones in the evolution of Rec4Agentverse. Besides, we introduce potential research directions, application domains, and application challenges of Rec4Agentverse, significantly facilitating future exploration.",
813
+ "- We conduct the preliminary feasibility study for Rec4Agentverse, demonstrating that existing LLM-based Agents are promising to instantiate Rec4Agentverse."
814
+ ],
815
+ "bbox": [
816
+ 83,
817
+ 273,
818
+ 482,
819
+ 411
820
+ ],
821
+ "page_idx": 2
822
+ },
823
+ {
824
+ "type": "text",
825
+ "text": "2 REC4AGENTVERSE PARADIGM",
826
+ "text_level": 1,
827
+ "bbox": [
828
+ 83,
829
+ 429,
830
+ 380,
831
+ 441
832
+ ],
833
+ "page_idx": 2
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "In this section, we will give an overview of Rec4Agentverse. The LLM-based Agent platform emerges as a new information system in terms of novel information carriers and new ways of information exchange. We thus propose to revolutionize recommendation systems for this new information system and introduce Rec4Agentverse. Firstly, we shall elucidate the different parts within Rec4Agentverse (Section 2.1). Subsequently, we will contemplate the three stages of Rec4Agentverse from the perspective of information flow (Section 2.2). Lastly, we shall proffer potential applications of this paradigm in various domains (Section 2.3), explore pertinent research topics (Section 2.4), and discuss potential challenges and risks in the application (Section 2.5).",
838
+ "bbox": [
839
+ 81,
840
+ 446,
841
+ 482,
842
+ 613
843
+ ],
844
+ "page_idx": 2
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "2.1 Roles of Rec4Agentverse",
849
+ "text_level": 1,
850
+ "bbox": [
851
+ 83,
852
+ 630,
853
+ 328,
854
+ 645
855
+ ],
856
+ "page_idx": 2
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "The Rec4Agentverse paradigm consists of three roles: the user, the Agent Recommender, and the Agent Itemas illustrated in Figure 3. The user, just like in traditional recommender systems, interacts with both Agetn Items and Agent Recommender and gives feedback. Therefore, our primary focus will be on discussing concepts that differ significantly from traditional recommendation systems, namely Agent Item and Agent Recommender.",
861
+ "bbox": [
862
+ 81,
863
+ 648,
864
+ 482,
865
+ 744
866
+ ],
867
+ "page_idx": 2
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "2.1.1 Agent Item. The Agent Item is the most distinct aspect in the Rec4Agentverse paradigm compared to traditional recommendation paradigms. Unlike the conventional item in the traditional recommendation system, the item in the Rec4Agentverse paradigm transforms into an LLM-based Agent. As illustrated in Figure 3, the Agent Item cannot only interact with users but also collaborate with the Agent Recommender and other Agent Items. The creation process and origins of Agent Items could be diverse and varied. For instance, the creation process of Agent Items can involve training with domain-specific data or directly constructing Agent Items",
872
+ "bbox": [
873
+ 81,
874
+ 757,
875
+ 482,
876
+ 895
877
+ ],
878
+ "page_idx": 2
879
+ },
880
+ {
881
+ "type": "image",
882
+ "img_path": "images/0ce9a1981718cf3789835f6fd947bf20bb61f2e9ab2c5bcbb1fd9b233f597400.jpg",
883
+ "image_caption": [
884
+ "Figure 3: Three stages of Rec4Agentverse. The bidirectional arrows depicted in the Figure symbolize the flow of information. During the first stage of User-Agent interaction, information flows between the user and Agent Item. In the Agent-Recommender Collaboration stage, information flows between Agent Item and Agent Recommender. For the Agents Collaboration stage, information flows between various Agent Items."
885
+ ],
886
+ "image_footnote": [],
887
+ "bbox": [
888
+ 529,
889
+ 104,
890
+ 898,
891
+ 324
892
+ ],
893
+ "page_idx": 2
894
+ },
895
+ {
896
+ "type": "text",
897
+ "text": "through prompts. The origin of Agent Item could be either generated automatically by the LLM-based Agent platform, created by users or collaboratively created by both users and the platform.",
898
+ "bbox": [
899
+ 513,
900
+ 470,
901
+ 913,
902
+ 513
903
+ ],
904
+ "page_idx": 2
905
+ },
906
+ {
907
+ "type": "text",
908
+ "text": "2.1.2 Agent Recommender. Agent Recommender aims to recommend LLM-based agents to users. Its function is similar to that of traditional recommender systems, which infer user preferences based on collected user information (e.g., attributes and behaviors) to recommend new items. However, unlike traditional recommender systems, the recommended items in Agent Recommender are LLM-based Agents, which imbues it with distinctive characteristics set apart from conventional recommenders.",
909
+ "bbox": [
910
+ 513,
911
+ 520,
912
+ 913,
913
+ 628
914
+ ],
915
+ "page_idx": 2
916
+ },
917
+ {
918
+ "type": "text",
919
+ "text": "Within the Rec4Agentverse paradigm, Agent Recommender is expected to possess enhanced capabilities for information exchange and collaboration with other parts of the Rec4Agentverse. As illustrated in Figure 3, in this new paradigm, the Agent Recommender not only engages in direct interactions with users but also interacts with the Agent Item, issuing commands to the Agent Item or obtaining new feedback from users via Agent Item.",
920
+ "bbox": [
921
+ 513,
922
+ 630,
923
+ 913,
924
+ 727
925
+ ],
926
+ "page_idx": 2
927
+ },
928
+ {
929
+ "type": "text",
930
+ "text": "2.2 Three Stages of Rec4Agentverse",
931
+ "text_level": 1,
932
+ "bbox": [
933
+ 514,
934
+ 739,
935
+ 816,
936
+ 753
937
+ ],
938
+ "page_idx": 2
939
+ },
940
+ {
941
+ "type": "text",
942
+ "text": "In this subsection, we will discuss three key stages of our proposed Rec4Agentverse paradigm from the information flow perspective as illustrated in Figure 3. In addition to the interaction between users and recommender systems in traditional recommender systems, Rec4Agentverse also takes into account the profound interaction between users and Agent Item, as well as the collaboration between Agent Item and Agent Recommender, and the collaboration between Agent Item themselves. This formulation encompasses three collaboration scenarios, envisioning the future development path of Rec4Agentverse.",
943
+ "bbox": [
944
+ 511,
945
+ 757,
946
+ 913,
947
+ 895
948
+ ],
949
+ "page_idx": 2
950
+ },
951
+ {
952
+ "type": "header",
953
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
954
+ "bbox": [
955
+ 83,
956
+ 75,
957
+ 506,
958
+ 87
959
+ ],
960
+ "page_idx": 2
961
+ },
962
+ {
963
+ "type": "header",
964
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
965
+ "bbox": [
966
+ 630,
967
+ 75,
968
+ 913,
969
+ 85
970
+ ],
971
+ "page_idx": 2
972
+ },
973
+ {
974
+ "type": "page_number",
975
+ "text": "3",
976
+ "bbox": [
977
+ 493,
978
+ 900,
979
+ 501,
980
+ 907
981
+ ],
982
+ "page_idx": 2
983
+ },
984
+ {
985
+ "type": "text",
986
+ "text": "2.2.1 Stage 1: User-Agent Interaction. During the initial stage, in addition to the interaction between the user and the Agent Recommender, the user also engages in interaction with Agent Item. This interactive format is similar to traditional recommendations. On LLM-based agent platforms such as GPTs, Rec4Agentverse may generate or retrieve personalized LLM-based Agents according to explicit user instructions and implicit user behaviors. While users can interact with the LLM-based Agent to exchange information in a novel form, it does not fully unleash the immense potential of the LLM-based Agent. Aside from interacting with users, the Agent Item can also collaborate with other roles in the recommender system to further enrich the information flow on the LLM-based Agent platform.",
987
+ "bbox": [
988
+ 81,
989
+ 107,
990
+ 482,
991
+ 287
992
+ ],
993
+ "page_idx": 3
994
+ },
995
+ {
996
+ "type": "text",
997
+ "text": "2.2.2 Stage 2: Agent-Recommender Collaboration. In this stage, Agent Item will collaborate with Agent Recommender together to provide information service for users. Different from items in the traditional recommender system, Agent Item can deeply collaborate with Agent Recommender by feeding forward to and receiving user information from the Agent Recommender. For example, Agent Item can share the user preferences it collects with Agent Recommender so that Agent Recommender can better provide more personalized recommendations. Similarly, Agent Items can also receive new instructions from Agent Recommender. The collected personalized information from users and instructions from Agent Recommender can be used to update Agent Item for evolvement (e.g., prompt updates) so that Agent Item can better understand user preferences and provide superior information services.",
998
+ "bbox": [
999
+ 81,
1000
+ 296,
1001
+ 482,
1002
+ 501
1003
+ ],
1004
+ "page_idx": 3
1005
+ },
1006
+ {
1007
+ "type": "text",
1008
+ "text": "2.2.3 Stage 3: Agents Collaboration. An Agent Item can collaborate with other Agent Items with different domain knowledge to provide diverse information services for users. A simple example is when a user mentions some niche things that Agent Item does not know about. Agent Item can put forward a request to Agent Recommender to ask Agent Recommender to recommend a new Agent Item for its assistance. Then the two agents can collaborate to fulfill users' information needs or execute tasks. Beyond that, there is considerable room for imagination at this stage. For example, the recommended new Agent Item can also interact with users directly or with Agent Recommender. Further, if multiple Agent Items are recommended, these Agent Items can also work together to better complete the user's instructions through brainstorming or round-table meetings.",
1009
+ "bbox": [
1010
+ 81,
1011
+ 512,
1012
+ 482,
1013
+ 707
1014
+ ],
1015
+ "page_idx": 3
1016
+ },
1017
+ {
1018
+ "type": "text",
1019
+ "text": "2.3 Application Domains",
1020
+ "text_level": 1,
1021
+ "bbox": [
1022
+ 83,
1023
+ 720,
1024
+ 302,
1025
+ 736
1026
+ ],
1027
+ "page_idx": 3
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "text": "Our Rec4Agentverse paradigm can contain Agent Items from various domains, which could originate from various third-party client developers or expert agents directly created by Agent Recommender. Rec4Agentverse can be applied to many scenarios, and here we provide a few illustrative examples in representative domains, showcasing the potentiality of our Rec4Agentverse framework.",
1032
+ "bbox": [
1033
+ 81,
1034
+ 738,
1035
+ 482,
1036
+ 821
1037
+ ],
1038
+ "page_idx": 3
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "text": "- Travel Agents. Travel Agents are designed to assist users in planning and booking travel arrangements. When a user indicates a specific travel destination of interest, Agent Recommender can recommend a proficient Travel Agent who possesses expertise in facilitating travel arrangements. Subsequently, users can engage",
1043
+ "bbox": [
1044
+ 83,
1045
+ 825,
1046
+ 482,
1047
+ 896
1048
+ ],
1049
+ "page_idx": 3
1050
+ },
1051
+ {
1052
+ "type": "text",
1053
+ "text": "in interactions with the recommended Travel Agent to obtain personalized travel itineraries and plans. The Travel Agent can further collect user information, either through direct interaction with users or by accessing the Agent Recommender, to infer users' individual preferences and upgrade itself for better travel recommendations. Additionally, the Travel Agent can engage in collaborative efforts with other agents, thereby acquiring valuable insights regarding users' preferences from diverse domains. This collaborative approach enables the Travel Agent to offer users more adaptable and personalized travel plans.",
1054
+ "bbox": [
1055
+ 526,
1056
+ 107,
1057
+ 913,
1058
+ 244
1059
+ ],
1060
+ "page_idx": 3
1061
+ },
1062
+ {
1063
+ "type": "list",
1064
+ "sub_type": "text",
1065
+ "list_items": [
1066
+ "- Fashion Agents. Fashion Agents aim to assist users in discovering their preferred fashion styles and recommend fashion items that align with their preferences. Similar to Travel Agents, Fashion Agents can engage in conversations with users or interact with Agent Recommender to gather users' fashion preferences. Agent Recommender may summarize user preferences through previously recommended Agent Items. For example, Fashion Agents might gather user preferences about the places and local characteristics from the user interactions with a Travel Agent. Moreover, a Fashion Agent can collaborate with a Tailor Agent to design and make personalized new clothes for a user.",
1067
+ "- Sports Agents. Sports Agent aims to recommend suitable exercise plans to users. They can engage with users, Agent Recommender, and other Agent Items to collect user preferences and offer exercise plans and recommendations. For example, they can use information about a user's physical condition obtained from Travel Agents to create suitable exercise plans."
1068
+ ],
1069
+ "bbox": [
1070
+ 514,
1071
+ 246,
1072
+ 913,
1073
+ 481
1074
+ ],
1075
+ "page_idx": 3
1076
+ },
1077
+ {
1078
+ "type": "text",
1079
+ "text": "2.4 Potential Research Topics",
1080
+ "text_level": 1,
1081
+ "bbox": [
1082
+ 514,
1083
+ 498,
1084
+ 767,
1085
+ 513
1086
+ ],
1087
+ "page_idx": 3
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "text": "Within Rec4Agentverse, there exist numerous valuable research directions awaiting exploration. In this subsection, we point out several prospective and meaningful research topics:",
1092
+ "bbox": [
1093
+ 513,
1094
+ 517,
1095
+ 911,
1096
+ 559
1097
+ ],
1098
+ "page_idx": 3
1099
+ },
1100
+ {
1101
+ "type": "text",
1102
+ "text": "- Evaluation. One crucial problem is how to evaluate the recommendation performance of Rec4Agentverse since it significantly differs from existing recommender systems. On the one hand, traditional recommendation datasets struggle to adapt to Rec4Agentverse, since Agent Item is quite different from previous items in the recommendation dataset. On the other hand, existing evaluation metrics for recommendations also face challenges in applying to Rec4Agentverse. Existing recommendation metrics, such as NDCG and HR, are designed for traditional items in traditional recommender systems. It is hard to accurately measure the user satisfaction for existing Agent Items with multi-round interactions. Moreover, Agent Recommender may generate a new Agent Item for users or Agent Items may upgrade based on user feedback. Evaluating user satisfaction with these new or upgraded Agent Items cannot purely rely on the user's implicit feedback such as interaction numbers. It needs to quantify the incremental performance compared to existing Agent Items.",
1103
+ "bbox": [
1104
+ 514,
1105
+ 563,
1106
+ 913,
1107
+ 797
1108
+ ],
1109
+ "page_idx": 3
1110
+ },
1111
+ {
1112
+ "type": "text",
1113
+ "text": "- Preference Modeling. How to effectively model users and enable Rec4Agentverse to provide users with a great personalized recommendation experience is also a pivotal problem. On the one hand, it is crucial to explore effective methods for acquiring user preferences to provide users with desired recommendation results. Several existing studies have indicated that modeling collaborative information poses challenges for LLM [4, 61]. Thus,",
1114
+ "bbox": [
1115
+ 514,
1116
+ 799,
1117
+ 913,
1118
+ 896
1119
+ ],
1120
+ "page_idx": 3
1121
+ },
1122
+ {
1123
+ "type": "header",
1124
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1125
+ "bbox": [
1126
+ 84,
1127
+ 75,
1128
+ 366,
1129
+ 87
1130
+ ],
1131
+ "page_idx": 3
1132
+ },
1133
+ {
1134
+ "type": "header",
1135
+ "text": "Jizhi Zhang, et al.",
1136
+ "bbox": [
1137
+ 826,
1138
+ 75,
1139
+ 911,
1140
+ 85
1141
+ ],
1142
+ "page_idx": 3
1143
+ },
1144
+ {
1145
+ "type": "page_number",
1146
+ "text": "4",
1147
+ "bbox": [
1148
+ 493,
1149
+ 900,
1150
+ 501,
1151
+ 907
1152
+ ],
1153
+ "page_idx": 3
1154
+ },
1155
+ {
1156
+ "type": "image",
1157
+ "img_path": "images/c5e2e7fdd1cfa49541f3bbb2def4321558528ea98973b6c3f7fb7489b71cd732.jpg",
1158
+ "image_caption": [
1159
+ "(a) User-Agent Interaction Stage"
1160
+ ],
1161
+ "image_footnote": [],
1162
+ "bbox": [
1163
+ 89,
1164
+ 148,
1165
+ 370,
1166
+ 313
1167
+ ],
1168
+ "page_idx": 4
1169
+ },
1170
+ {
1171
+ "type": "image",
1172
+ "img_path": "images/883dd0a6be906542a38cd8568b1b26635edf4f14844771748a977d50e0a8aca2.jpg",
1173
+ "image_caption": [
1174
+ "(b) Agent-Recommender Collaboration Stage"
1175
+ ],
1176
+ "image_footnote": [],
1177
+ "bbox": [
1178
+ 388,
1179
+ 147,
1180
+ 663,
1181
+ 309
1182
+ ],
1183
+ "page_idx": 4
1184
+ },
1185
+ {
1186
+ "type": "image",
1187
+ "img_path": "images/cf033646bb2587a1d4028c76de9f78f0fcc0055880408dfa27443b60eee97727.jpg",
1188
+ "image_caption": [
1189
+ "(c) Agents Collaboration Stage",
1190
+ "Figure 4: The three stages of our proposed Rec4Agentverse paradigm. (a) For the User-Agent interaction stage, users can interact efficiently with Agent items through natural language. (b) For the Agent-Recommender collaboration stage, Agent Item and Agent Recommender could interact with each other. \"Evolvement\" means that the preference of the user can also be used for Agent Item to evolve itself or to evolve itself with the help of Agent Recommender. \"Agent Feedback\" refers to that the recommended Agent Item can feed the preference of the user back to Agent Recommender. \"Proactive\" stands for Agent Recommender can send information or issue instructions to Agent items. (c) For the Agents collaboration stage, Agent Items can collaborate together to provide personalized information services for the user."
1191
+ ],
1192
+ "image_footnote": [],
1193
+ "bbox": [
1194
+ 684,
1195
+ 104,
1196
+ 910,
1197
+ 311
1198
+ ],
1199
+ "page_idx": 4
1200
+ },
1201
+ {
1202
+ "type": "text",
1203
+ "text": "it is worth investigating how to design efficient user modeling approaches for Rec4Agentverse. On the other hand, there lies the challenge of effectively leveraging the collected user behavior data for training purposes. The form of the user data with extensive natural language interactions and implicit feedback like dwell time and clicks gathered by Rec4Agentverse differs significantly from traditional recommendation data. Moreover, the underlying architecture of the recommendation system in Rec4Agentverse is dissimilar to that of conventional models. Consequently, devising a strategy to utilize this distinct data format in training the novel recommendation system within the Rec4Agentverse paradigm presents a formidable challenge.",
1204
+ "bbox": [
1205
+ 94,
1206
+ 469,
1207
+ 482,
1208
+ 635
1209
+ ],
1210
+ "page_idx": 4
1211
+ },
1212
+ {
1213
+ "type": "list",
1214
+ "sub_type": "text",
1215
+ "list_items": [
1216
+ "- Efficient Inference. The Rec4Agentverse paradigm is based on LLM, which incurs significant inference costs [17, 52]. The real-time inference requirements of recommender systems give rise to research inquiries, such as how to mitigate the inferential costs of Rec4Agentverse without compromising its performance.",
1217
+ "- Knowledge Update and Edit. Due to the evolving distribution of knowledge in the real world [44, 60], Rec4Agentverse also faces the problem of updating its knowledge or editing out any incorrect information, posing a significant challenge. These changes in knowledge distribution may arise from a shift in users' personal preferences, the new Agent Item, or alterations in world knowledge."
1218
+ ],
1219
+ "bbox": [
1220
+ 83,
1221
+ 635,
1222
+ 482,
1223
+ 801
1224
+ ],
1225
+ "page_idx": 4
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "text": "2.5 Issues and Challenges",
1230
+ "text_level": 1,
1231
+ "bbox": [
1232
+ 83,
1233
+ 816,
1234
+ 305,
1235
+ 830
1236
+ ],
1237
+ "page_idx": 4
1238
+ },
1239
+ {
1240
+ "type": "text",
1241
+ "text": "In this subsection, we shall delve into the potential issues and challenges of the Rec4Agentverse paradigm.",
1242
+ "bbox": [
1243
+ 81,
1244
+ 834,
1245
+ 480,
1246
+ 863
1247
+ ],
1248
+ "page_idx": 4
1249
+ },
1250
+ {
1251
+ "type": "text",
1252
+ "text": "- Fairness and Bias. The pre-training process of LLMs inherently involves incorporating data from the web, which may contain",
1253
+ "bbox": [
1254
+ 83,
1255
+ 867,
1256
+ 482,
1257
+ 896
1258
+ ],
1259
+ "page_idx": 4
1260
+ },
1261
+ {
1262
+ "type": "text",
1263
+ "text": "societal biases and unfair elements [11, 23, 57]. Due to the social impact of the recommendation, fairness and bias are important issues in recommendation [16, 30, 46]. Therefore, when employing the Rec4Agentverse paradigm, it becomes imperative to acknowledge and control the potential unfairness and bias in the recommended Agent Items and the information delivered by Agent Items, so as to mitigate the potential risks and negative societal impacts.",
1264
+ "bbox": [
1265
+ 526,
1266
+ 469,
1267
+ 913,
1268
+ 580
1269
+ ],
1270
+ "page_idx": 4
1271
+ },
1272
+ {
1273
+ "type": "list",
1274
+ "sub_type": "text",
1275
+ "list_items": [
1276
+ "- Privacy. Users may inadvertently disclose their privacy while interacting with LLMs [25, 28]. Since Rec4Agentverse is based on the LLM, safeguarding users' privacy will be an important challenge to address. On the one hand, the confidentiality of these user's private information must remain undisclosed to other users. On the other hand, users should possess the utmost control over their own private data. When a user requests that the model refrains from using their private data, Rec4Agentverse should proceed to unlearn such private information.",
1277
+ "- Harmfulness. Agent Items may potentially generate harmful textual responses [2, 22], which deserve our attention in the application of Rec4Agentverse. Furthermore, Agent Items might be manipulated to execute harmful actions for users, for example, some fraudulent transactions. It is essential to regulate the harmfulness of Rec4Agentsein terms of generated content and executed actions.",
1278
+ "- Robustness. LLM may not be robust to the malicious prompt [26, 55]. In contrast to conventional recommendation systems, Rec4-Agentverse involves extensive use of prompts to interact with the user, requiring Rec4Agentverse to be robust to malicious"
1279
+ ],
1280
+ "bbox": [
1281
+ 514,
1282
+ 580,
1283
+ 913,
1284
+ 857
1285
+ ],
1286
+ "page_idx": 4
1287
+ },
1288
+ {
1289
+ "type": "header",
1290
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
1291
+ "bbox": [
1292
+ 83,
1293
+ 75,
1294
+ 508,
1295
+ 87
1296
+ ],
1297
+ "page_idx": 4
1298
+ },
1299
+ {
1300
+ "type": "header",
1301
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1302
+ "bbox": [
1303
+ 630,
1304
+ 75,
1305
+ 913,
1306
+ 85
1307
+ ],
1308
+ "page_idx": 4
1309
+ },
1310
+ {
1311
+ "type": "page_number",
1312
+ "text": "5",
1313
+ "bbox": [
1314
+ 493,
1315
+ 898,
1316
+ 501,
1317
+ 907
1318
+ ],
1319
+ "page_idx": 4
1320
+ },
1321
+ {
1322
+ "type": "text",
1323
+ "text": "prompts. If Rec4Agentverse is not sufficiently robust, it is susceptible to prompt injection attacks, or poisoning data that can lead to performance degradation or harmful output.",
1324
+ "bbox": [
1325
+ 94,
1326
+ 107,
1327
+ 482,
1328
+ 148
1329
+ ],
1330
+ "page_idx": 5
1331
+ },
1332
+ {
1333
+ "type": "text",
1334
+ "text": "- Environmental Friendliness. Considering the high energy consumption of training and inference LLMs [52, 65], ensuring the environmental friendliness of Rec4Agentverse is a crucial concern. We should try to save energy consumption and carbon emissions during the training, deployment, and inference processes in the Rec4Agentverse.",
1335
+ "bbox": [
1336
+ 84,
1337
+ 148,
1338
+ 482,
1339
+ 232
1340
+ ],
1341
+ "page_idx": 5
1342
+ },
1343
+ {
1344
+ "type": "text",
1345
+ "text": "3 DISCUSSION",
1346
+ "text_level": 1,
1347
+ "bbox": [
1348
+ 84,
1349
+ 246,
1350
+ 220,
1351
+ 258
1352
+ ],
1353
+ "page_idx": 5
1354
+ },
1355
+ {
1356
+ "type": "text",
1357
+ "text": "In this section, we contrast our proposed Rec4Agentverse paradigm with existing recommendation paradigms: retrieval-based recommendation and generative-based recommendation [45]. Because of the distinctive characteristics of our paradigm's Agent Item and Agent Recommender, such as their powerful interactive capabilities and proactiveness in actively exploring user preferences, the Rec4Agentverse paradigm diverges from the traditional paradigms. Here, we delve into two transformative changes from the viewpoint of user preference modeling and collaborative mechanisms among the system's entities.",
1358
+ "bbox": [
1359
+ 81,
1360
+ 263,
1361
+ 482,
1362
+ 402
1363
+ ],
1364
+ "page_idx": 5
1365
+ },
1366
+ {
1367
+ "type": "text",
1368
+ "text": "User Preference Modeling: Beyond merely summarising user preference from passively received users' interactions on items like done in conventional paradigms, in our paradigm, both Agent Recommender and Agent Item could actively acquire information to enhance user preferences modeling. In traditional paradigms, the interactive capability of the recommender and items is limited, particularly for items such as movies and games that cannot engage in verbal communication. Consequently, user preference modeling for these paradigms typically relies on passively received feedback<sup>3</sup>. However, in our paradigm, both the recommender and item have the ability to actively interact with users through dialogue to directly acquire user preference information or collect further feedback for preference refinement, enhancing user preference modeling.",
1369
+ "bbox": [
1370
+ 81,
1371
+ 412,
1372
+ 482,
1373
+ 592
1374
+ ],
1375
+ "page_idx": 5
1376
+ },
1377
+ {
1378
+ "type": "text",
1379
+ "text": "Collaboration Mechanisms: In our paradigm, collaboration between recommenders and items is becoming increasingly closer and more extensive. In contrast, traditional paradigms encounter challenges in actively fostering collaboration between items or between items and recommenders once an item is recommended. These enhanced collaborations undoubtedly elevate the service quality of both the Agent Recommender and Agent Item. For instance, in our paradigm, when a recommended item falls short of fully meeting the user's needs due to its limitations, it can initiate communication with the recommender or collaborate with other Agent Items entities to address these shortcomings and better align with the user's preferences. Conversely, in traditional paradigms, users often need to turn to the recommender system for another recommendation, perpetuating the iterative process, which would diminish users' enthusiasm. Another example is that Agent Recommender can enrich the user profile by engaging in conversations with Agent Items that the user has interacted with in the past or is currently engaging with, thereby facilitating more effective recommendations.",
1380
+ "bbox": [
1381
+ 81,
1382
+ 602,
1383
+ 482,
1384
+ 851
1385
+ ],
1386
+ "page_idx": 5
1387
+ },
1388
+ {
1389
+ "type": "image",
1390
+ "img_path": "images/f4d00c0fd7065a778af51ce233c19311696c65b45eee6052c927da93112e00df.jpg",
1391
+ "image_caption": [
1392
+ "Figure 5: A case of the User-Agent interaction stage. The user expressed the desire for the Travel Agent to Agent Recommender and get back a recommendation. Subsequently, the user and the Travel Agent engaged in interactions to make the travel plan."
1393
+ ],
1394
+ "image_footnote": [],
1395
+ "bbox": [
1396
+ 517,
1397
+ 104,
1398
+ 934,
1399
+ 431
1400
+ ],
1401
+ "page_idx": 5
1402
+ },
1403
+ {
1404
+ "type": "text",
1405
+ "text": "Overall, these changes stem from the revolution of information flow dynamics in our paradigm, leading to a more decentralized approach to information management. In contrast to traditional paradigms where management is predominantly centralized within the recommender system, our paradigm grants greater autonomy to Agent Items in managing this process. This enables Agent Items to engage with users more independently and proactively, thereby improving their capacity to capture and manage user preference information effectively on the one hand, and enabling them to freely display information to users on the other. Simultaneously, this facilitates the exchange or feedback of such information among Agent Items or between Agent Items and Agent Recommender, fostering improved collaboration in final user modeling.",
1406
+ "bbox": [
1407
+ 511,
1408
+ 540,
1409
+ 913,
1410
+ 720
1411
+ ],
1412
+ "page_idx": 5
1413
+ },
1414
+ {
1415
+ "type": "text",
1416
+ "text": "4 DEMONSTRATION",
1417
+ "text_level": 1,
1418
+ "bbox": [
1419
+ 514,
1420
+ 732,
1421
+ 702,
1422
+ 744
1423
+ ],
1424
+ "page_idx": 5
1425
+ },
1426
+ {
1427
+ "type": "text",
1428
+ "text": "In this section, we explore the three stages of Rec4Agentverse through case studies, focusing on discussing the feasibility and potential formats of the paradigm. We present a case study involving a traveler who utilizes Rec4Agentverse throughout his/her journey, examining how Agent Recommender and Agent Item work and affect the user experience at each stage. This case study is based on \"gpt-4-32k\". Due to space constraints, we provide only the essential parts of the case study here, with additional details available at github<sup>4</sup>. It's important to note that our case study serves as a",
1429
+ "bbox": [
1430
+ 511,
1431
+ 750,
1432
+ 913,
1433
+ 876
1434
+ ],
1435
+ "page_idx": 5
1436
+ },
1437
+ {
1438
+ "type": "header",
1439
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1440
+ "bbox": [
1441
+ 84,
1442
+ 75,
1443
+ 364,
1444
+ 85
1445
+ ],
1446
+ "page_idx": 5
1447
+ },
1448
+ {
1449
+ "type": "header",
1450
+ "text": "Jizhi Zhang, et al.",
1451
+ "bbox": [
1452
+ 826,
1453
+ 75,
1454
+ 911,
1455
+ 87
1456
+ ],
1457
+ "page_idx": 5
1458
+ },
1459
+ {
1460
+ "type": "page_footnote",
1461
+ "text": "One exception is the interactive recommendation, however, its item does not have the interactive capability.",
1462
+ "bbox": [
1463
+ 81,
1464
+ 873,
1465
+ 482,
1466
+ 896
1467
+ ],
1468
+ "page_idx": 5
1469
+ },
1470
+ {
1471
+ "type": "page_footnote",
1472
+ "text": "4https://github.com/jizhi-zhang/Rec4Agentverse_Case",
1473
+ "bbox": [
1474
+ 514,
1475
+ 883,
1476
+ 772,
1477
+ 895
1478
+ ],
1479
+ "page_idx": 5
1480
+ },
1481
+ {
1482
+ "type": "page_number",
1483
+ "text": "6",
1484
+ "bbox": [
1485
+ 493,
1486
+ 900,
1487
+ 501,
1488
+ 907
1489
+ ],
1490
+ "page_idx": 5
1491
+ },
1492
+ {
1493
+ "type": "image",
1494
+ "img_path": "images/a85bac6c4ef745f627bee8784480132eebe55864b6c0631fee5e1acfe85e0b21.jpg",
1495
+ "image_caption": [
1496
+ "Figure 6: Cases of three scenarios, namely Evolvement, Agent Feedback, and Proactive, at the Agent- Recommender Collaboration stage of Rec4Agentverse. (a) For the Evolvement scenario, Agent Item has the ability to enhance itself with the help of Agent Recommender based on the user's preferences. (b) For the Agent Feedback scenario, Agent Item submits the user's preference to Agent Recommender so that Agent Recommender can provide better recommendations. (c) For the Proactive scenario, Agent Recommender provides the eco-friendly target to Agent Item, and Agent Item successfully achieves the eco-friendly target in its interaction with the user."
1497
+ ],
1498
+ "image_footnote": [],
1499
+ "bbox": [
1500
+ 91,
1501
+ 112,
1502
+ 913,
1503
+ 500
1504
+ ],
1505
+ "page_idx": 6
1506
+ },
1507
+ {
1508
+ "type": "text",
1509
+ "text": "preliminary indication of the feasibility of different stages within the Rec4Agentverse paradigm, and it does not fully encompass all the potential applications of our paradigm.",
1510
+ "bbox": [
1511
+ 81,
1512
+ 619,
1513
+ 482,
1514
+ 662
1515
+ ],
1516
+ "page_idx": 6
1517
+ },
1518
+ {
1519
+ "type": "text",
1520
+ "text": "4.1 Stage 1: User-Agent Interaction",
1521
+ "text_level": 1,
1522
+ "bbox": [
1523
+ 83,
1524
+ 683,
1525
+ 382,
1526
+ 699
1527
+ ],
1528
+ "page_idx": 6
1529
+ },
1530
+ {
1531
+ "type": "text",
1532
+ "text": "In the User-Agent interaction stage, as shown in Figure 4(a), Agent Item primarily engages in interactions with the user, facilitating efficient information exchange between Agent Item and the user. To demonstrate this, we present a scenario where a user expresses their desire to travel to Nepal and interacts with an Agent Recommender and the recommended Travel Agent, as shown in Figure 5. The user initially seeks assistance from the Agent Recommender to find a Travel Agent. Upon inquiring about the user's preferences, the Agent Recommender customizes a Travel Agent specifically tailored to the user's needs. Subsequently, after further determining the user's interests, this Agent devises a comprehensive travel itinerary for the user. Therefore, there are main two information exchange flows: one between the user and Agent Recommender and one between the user and Agent item.",
1533
+ "bbox": [
1534
+ 81,
1535
+ 702,
1536
+ 482,
1537
+ 896
1538
+ ],
1539
+ "page_idx": 6
1540
+ },
1541
+ {
1542
+ "type": "text",
1543
+ "text": "4.1.1 Information Flow between User and Agent Recommender. As depicted in Figure 5, in this example, in addition to passively receiving requests from the user, Agent Recommender could actively engage with the user to improve their recommendations. For instance, after the user expresses a desire to find a Travel Agent through dialogue, Agent Recommender could proactively pose questions to gain a more detailed high-level preference of the user about the travel. With additional feedback from the user, Agent Recommender could then provide accurate recommendations for a Travel Agent. This process bears some resemblance to traditional interactive recommendation methods.",
1544
+ "bbox": [
1545
+ 511,
1546
+ 619,
1547
+ 915,
1548
+ 771
1549
+ ],
1550
+ "page_idx": 6
1551
+ },
1552
+ {
1553
+ "type": "text",
1554
+ "text": "4.1.2 Information Flow between User and Agent Item. As illustrated in Figure 5, in stark contrast to the traditional paradigm, Agent Item is capable of interacting directly with the user. In our example, the Travel Agent initially learns about the user's interest in traveling to Nepal and their request for a travel plan. Subsequently, it could inquire further to uncover more specific preferences, obtaining the user's inclination to visit the \"Everest Base Camp\". This",
1555
+ "bbox": [
1556
+ 511,
1557
+ 799,
1558
+ 915,
1559
+ 896
1560
+ ],
1561
+ "page_idx": 6
1562
+ },
1563
+ {
1564
+ "type": "header",
1565
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
1566
+ "bbox": [
1567
+ 83,
1568
+ 75,
1569
+ 508,
1570
+ 87
1571
+ ],
1572
+ "page_idx": 6
1573
+ },
1574
+ {
1575
+ "type": "header",
1576
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1577
+ "bbox": [
1578
+ 630,
1579
+ 75,
1580
+ 913,
1581
+ 87
1582
+ ],
1583
+ "page_idx": 6
1584
+ },
1585
+ {
1586
+ "type": "page_number",
1587
+ "text": "7",
1588
+ "bbox": [
1589
+ 493,
1590
+ 898,
1591
+ 501,
1592
+ 907
1593
+ ],
1594
+ "page_idx": 6
1595
+ },
1596
+ {
1597
+ "type": "image",
1598
+ "img_path": "images/9b37746605f78168efbdbf3e617a8e349a642c31f2468fbf3936b3dd2f1cb04e.jpg",
1599
+ "image_caption": [
1600
+ "Figure 7: Preliminary case study of Agents Collaboration stage. When the user asks about the travel plan for Nepal, the Travel Agent requires a specific Local Agent of Nepal from Agent Recommender to solve this problem. By conversation with the Local Agent about Nepal, the Travel Agent gets up-to-date information about Nepal which helps plan travel tours for the user."
1601
+ ],
1602
+ "image_footnote": [],
1603
+ "bbox": [
1604
+ 84,
1605
+ 104,
1606
+ 911,
1607
+ 364
1608
+ ],
1609
+ "page_idx": 7
1610
+ },
1611
+ {
1612
+ "type": "text",
1613
+ "text": "exchange of information allows Agent Item to develop a deeper understanding of the user's preferences, thereby enhancing its ability to provide tailored services to users.",
1614
+ "bbox": [
1615
+ 81,
1616
+ 438,
1617
+ 482,
1618
+ 481
1619
+ ],
1620
+ "page_idx": 7
1621
+ },
1622
+ {
1623
+ "type": "text",
1624
+ "text": "4.2 Stage 2: Agent-Recommender Collaboration",
1625
+ "text_level": 1,
1626
+ "bbox": [
1627
+ 83,
1628
+ 494,
1629
+ 480,
1630
+ 511
1631
+ ],
1632
+ "page_idx": 7
1633
+ },
1634
+ {
1635
+ "type": "text",
1636
+ "text": "In the agent-recommender collaboration stage, as depicted in Figure 4(b), there is potential for further information exchange between Agent Item and Agent Recommender. This exchange opens up three promising possibilities:",
1637
+ "bbox": [
1638
+ 81,
1639
+ 513,
1640
+ 482,
1641
+ 570
1642
+ ],
1643
+ "page_idx": 7
1644
+ },
1645
+ {
1646
+ "type": "list",
1647
+ "sub_type": "text",
1648
+ "list_items": [
1649
+ "- Evolvement: Agent Item can undergo evolution by itself or with guidance from Agent Recommender.",
1650
+ "- Agent Feedback: Agent Item can provide valuable feedback to Agent Recommender.",
1651
+ "- Proactive: Agent Item can follow instructions from Agent Recommender to accomplish specific objectives."
1652
+ ],
1653
+ "bbox": [
1654
+ 83,
1655
+ 575,
1656
+ 482,
1657
+ 657
1658
+ ],
1659
+ "page_idx": 7
1660
+ },
1661
+ {
1662
+ "type": "text",
1663
+ "text": "We illustrate these possibilities by extending the travel example, as depicted in Figure 6.",
1664
+ "bbox": [
1665
+ 81,
1666
+ 662,
1667
+ 482,
1668
+ 691
1669
+ ],
1670
+ "page_idx": 7
1671
+ },
1672
+ {
1673
+ "type": "text",
1674
+ "text": "4.2.1 Evolvement. Thanks to its ability to gather information from users and Agent Recommender, Agent Item can acquire valuable knowledge to achieve evolution, helping enhance future services. In the example illustrated in Figure 6, Agent Item can leverage the knowledge summarized by itself and obtained from Agent Recommender to achieve evolution. This evolution may involve improving its prompts, for instance. As a result, when the user makes their next request for a trip to a new destination, e.g., Switzerland, the system will promptly present a travel itinerary that directly aligns with the user's personal preferences, taking into account their inclination towards \"hiking, cultural, and natural\" experiences. This process of evolution enables the continuous tracking of user information and alleviates the burden on users to express their preferences in great detail in future interactions.",
1675
+ "bbox": [
1676
+ 81,
1677
+ 700,
1678
+ 482,
1679
+ 896
1680
+ ],
1681
+ "page_idx": 7
1682
+ },
1683
+ {
1684
+ "type": "text",
1685
+ "text": "4.2.2 Agent Feedback. Agent Item can also contribute feedback, namely agent feedback, to enhance the services of Agent Recommender in the future. In our example depicted in Figure 6, the recommended Travel Agent can provide a summarized understanding of the user's preferences, such as \"cultural, natural and so on\", to Agent Recommender. Agent Recommender can then absorb this knowledge and improve its future services accordingly. Then, when a new request for a \"Cloth Agent\" arises, Agent Recommender can directly inquire whether the user is interested in environmentally friendly or culturally significant attire, based on the knowledge obtained from the Travel Agent. Through this information exchange, Agent Recommender can significantly enhance its services.",
1686
+ "bbox": [
1687
+ 511,
1688
+ 438,
1689
+ 915,
1690
+ 604
1691
+ ],
1692
+ "page_idx": 7
1693
+ },
1694
+ {
1695
+ "type": "text",
1696
+ "text": "4.2.3 Proactive. Here, \"Proactive\" refers to the ability of Agent Items to autonomously accomplish specific objectives, which can originate from the Agent platform itself or aim to better align with user interests. An example is shown in Figure 6, we assume that Agent Recommender has prior knowledge of the user's inclination towards eco-friendly options. Therefore, before the user initiates their interaction, Agent Recommender injects this eco-friendly objective into the recommended Travel Agent. Consequently, when users engage with the Travel Agent, it will provide environmentally friendly travel options that fulfill the eco-friendly requirement. This proactive characteristic enhances user satisfaction and tailors the experience to their specific interests.",
1697
+ "bbox": [
1698
+ 511,
1699
+ 619,
1700
+ 913,
1701
+ 787
1702
+ ],
1703
+ "page_idx": 7
1704
+ },
1705
+ {
1706
+ "type": "text",
1707
+ "text": "4.3 Stage 3: Agents Collaboration",
1708
+ "text_level": 1,
1709
+ "bbox": [
1710
+ 513,
1711
+ 806,
1712
+ 799,
1713
+ 823
1714
+ ],
1715
+ "page_idx": 7
1716
+ },
1717
+ {
1718
+ "type": "text",
1719
+ "text": "Compared to the other two stages, the collaboration stage of the agents allows for further exchange of information among Agent Items, as depicted in Figure 4(c), enabling them to collaborate and enhance services for users. In the Travel Agent case illustrated in Figure 7, we present a potential example where multiple agents",
1720
+ "bbox": [
1721
+ 511,
1722
+ 825,
1723
+ 911,
1724
+ 896
1725
+ ],
1726
+ "page_idx": 7
1727
+ },
1728
+ {
1729
+ "type": "header",
1730
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1731
+ "bbox": [
1732
+ 84,
1733
+ 75,
1734
+ 364,
1735
+ 85
1736
+ ],
1737
+ "page_idx": 7
1738
+ },
1739
+ {
1740
+ "type": "header",
1741
+ "text": "Jizhi Zhang, et al.",
1742
+ "bbox": [
1743
+ 826,
1744
+ 75,
1745
+ 911,
1746
+ 87
1747
+ ],
1748
+ "page_idx": 7
1749
+ },
1750
+ {
1751
+ "type": "page_number",
1752
+ "text": "8",
1753
+ "bbox": [
1754
+ 493,
1755
+ 900,
1756
+ 501,
1757
+ 907
1758
+ ],
1759
+ "page_idx": 7
1760
+ },
1761
+ {
1762
+ "type": "text",
1763
+ "text": "collaborate to complete the travel planning process. Here's a step-by-step breakdown of the collaboration process:",
1764
+ "bbox": [
1765
+ 81,
1766
+ 107,
1767
+ 485,
1768
+ 136
1769
+ ],
1770
+ "page_idx": 8
1771
+ },
1772
+ {
1773
+ "type": "list",
1774
+ "sub_type": "text",
1775
+ "list_items": [
1776
+ "- The user starts a conversation with Agent Recommender, expressing the desire to plan a travel tour.",
1777
+ "- Agent Recommender suggests a Travel Agent whose goal is to help with travel tour planning.",
1778
+ "- The user subsequently requests the Travel Agent to create a travel itinerary specifically tailored for Nepal.",
1779
+ "- To acquire the latest information about Nepal, the Travel Agent sends a request to Agent Recommender for an Agent Item. This Agent Item should be able to provide up-to-date local information on Nepal, which will assist in creating the travel plan.",
1780
+ "- Agent Recommender responds by recommending a Local Agent who is knowledgeable about the current situation in Nepal.",
1781
+ "- Ultimately, the Travel Agent integrates the current information about Nepal provided by the Local Agent into the travel itinerary design process to fulfill the user's needs."
1782
+ ],
1783
+ "bbox": [
1784
+ 83,
1785
+ 137,
1786
+ 482,
1787
+ 345
1788
+ ],
1789
+ "page_idx": 8
1790
+ },
1791
+ {
1792
+ "type": "text",
1793
+ "text": "Our conclusion from the case suggests that by adopting a system of collaborative cooperation among agents, they are enabled to communicate more effectively and share information with each other. This exchange process significantly enriches their shared knowledge base. As a result, these agents are better equipped to address and cater to a more diverse and comprehensive range of user needs, thereby enhancing overall user satisfaction.",
1794
+ "bbox": [
1795
+ 81,
1796
+ 349,
1797
+ 483,
1798
+ 446
1799
+ ],
1800
+ "page_idx": 8
1801
+ },
1802
+ {
1803
+ "type": "text",
1804
+ "text": "5 RELATED WORK",
1805
+ "text_level": 1,
1806
+ "bbox": [
1807
+ 83,
1808
+ 458,
1809
+ 259,
1810
+ 472
1811
+ ],
1812
+ "page_idx": 8
1813
+ },
1814
+ {
1815
+ "type": "text",
1816
+ "text": "In this section, we will mainly discuss two types of related work: LLM-based Recommendation and LLM-based Agent. In the subsection on LLM-based Recommendation, we will emphasize the distinction between Rec4Agentverse and the current LLM-based Agent for recommendations.",
1817
+ "bbox": [
1818
+ 81,
1819
+ 476,
1820
+ 483,
1821
+ 546
1822
+ ],
1823
+ "page_idx": 8
1824
+ },
1825
+ {
1826
+ "type": "text",
1827
+ "text": "5.1 LLM for Recommendation",
1828
+ "text_level": 1,
1829
+ "bbox": [
1830
+ 83,
1831
+ 559,
1832
+ 344,
1833
+ 573
1834
+ ],
1835
+ "page_idx": 8
1836
+ },
1837
+ {
1838
+ "type": "text",
1839
+ "text": "With the emergence of powerful models like ChatGPT and their overwhelming advantages demonstrated in other domains [29, 62], an increasing number of researchers in the recommendation community have begun to explore the potential of applying LLMs to recommendation systems [14, 31, 32, 48, 54]. This can be divided into two categories. One category advocates for directly leveraging the capabilities of LLMs by utilizing appropriate prompts to stimulate their abilities in recommendation scenarios [10, 35]. Some researchers directly employ LLMs for reordering [20], while others distill the world knowledge of LLMs into traditional recommendation models to enhance recommendation performance [47, 50]. Another group of researchers, however, believes that LLMs rarely encounter recommendation tasks during pre-training, and recommendation data often possess privatized characteristics [5]. Therefore, there is an urgent need to explore the use of tuning methods to improve the recommendation performance of LLMs. In this regard, researchers have utilized instruction tuning to enable models to quickly learn various types of recommendation tasks in a generative manner [4, 15, 59], yielding significant improvements. Furthermore, it has been discovered that injecting collaborative information into LLMs through post-processing or modal injection methods can further enhance the performance of LLM-based Recommendations [61, 63].",
1840
+ "bbox": [
1841
+ 81,
1842
+ 577,
1843
+ 485,
1844
+ 896
1845
+ ],
1846
+ "page_idx": 8
1847
+ },
1848
+ {
1849
+ "type": "text",
1850
+ "text": "LLM-based Agents for Recommendation. Following the surge in popularity of LLMs, an increasing number of individuals exploring the use of LLMs to simulate social environments and perform various complex tasks [42, 51]. This has also promptly captured the attention of researchers in the field of recommender systems. Specifically, some researchers aim to simulate users using agents, creating a virtual recommendation system environment (e.g., RecAgent [43], Agent4Rec [56]) to explore the social impact of recommendation algorithms within this simulated environment. Another group of researchers seeks to enhance recommendation performance through the use of agents. In this context, InteRecAgent [21] encapsulates LLMs as agents endowed with memory capabilities and scheduling proficiency, thereby enhancing recommendation performance. AgentCF [58] first attempts to optimize the self-introduction of both the User and Item by considering them as agents, and improving the self-introduction via User interaction with positive and negative Items. The primary distinction between our work in this paper and the aforementioned research lies in their failure to transcend the confines of traditional recommendation paradigms. Their objective remains confined to recommending passive items (e.g., movies and games), indicating those items cannot actively interact with the user and obtain the user's intentions and preferences. Conversely, as previously mentioned in this paper, we mainly talk about when we have employed an LLM-based Agent who is good at interaction, intelligent, and proactive for our recommended items. Those agents can be viewed as a new type of information carrier and information dissemination tool and bring about changes in information systems.",
1851
+ "bbox": [
1852
+ 511,
1853
+ 107,
1854
+ 916,
1855
+ 482
1856
+ ],
1857
+ "page_idx": 8
1858
+ },
1859
+ {
1860
+ "type": "text",
1861
+ "text": "5.2 LLM-based Agents",
1862
+ "text_level": 1,
1863
+ "bbox": [
1864
+ 514,
1865
+ 517,
1866
+ 709,
1867
+ 532
1868
+ ],
1869
+ "page_idx": 8
1870
+ },
1871
+ {
1872
+ "type": "text",
1873
+ "text": "LLM-based agents have been deployed in various domains to address a wide range of specific tasks, showcasing the robust and comprehensive capabilities of agents [9, 42, 51]. Some researchers are dedicated to exploring the upper limits of single-agent capabilities. They endeavor to utilize a solitary agent that can effectively adhere to a wide array of user instructions and successfully tackle a diverse range of complex tasks in both daily life [18, 41, 64] and academic endeavors [7, 24]. Meanwhile, there is another faction of researchers who approach the birth of human social intelligence and believe that collective intelligence will bring about a more prosperous advancement [19]. They endeavor to enhance the problem-solving capacity of such groups compared to individuals by means of collaborative efforts among multiple agents [36, 49] or through agents engaging in mutual critique [8, 53]. In addition to these aspects, researchers are also devoted to exploring the interaction between the Agent and its environment to enhance the capabilities of the Agent. This encompasses the interaction between the Agent and humans to obtain human feedback [3, 13, 27] and the interaction between the Agent and the physical world through visual/audio modules to acquire additional knowledge [6, 12], and so on. In our Rec4Agentverse framework, as previously mentioned, we must delve into the profound application of recommendation systems. This will enhance the ability of single agents to fulfill user needs, the collaborative capacity of multi-agents in aiding users, and the agents' ability to acquire feedback from the physical world and users themselves for self-evolution.",
1874
+ "bbox": [
1875
+ 511,
1876
+ 536,
1877
+ 916,
1878
+ 896
1879
+ ],
1880
+ "page_idx": 8
1881
+ },
1882
+ {
1883
+ "type": "header",
1884
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
1885
+ "bbox": [
1886
+ 83,
1887
+ 75,
1888
+ 506,
1889
+ 87
1890
+ ],
1891
+ "page_idx": 8
1892
+ },
1893
+ {
1894
+ "type": "header",
1895
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
1896
+ "bbox": [
1897
+ 630,
1898
+ 75,
1899
+ 913,
1900
+ 85
1901
+ ],
1902
+ "page_idx": 8
1903
+ },
1904
+ {
1905
+ "type": "page_number",
1906
+ "text": "9",
1907
+ "bbox": [
1908
+ 493,
1909
+ 898,
1910
+ 503,
1911
+ 907
1912
+ ],
1913
+ "page_idx": 8
1914
+ },
1915
+ {
1916
+ "type": "text",
1917
+ "text": "6 CONCLUSION",
1918
+ "text_level": 1,
1919
+ "bbox": [
1920
+ 83,
1921
+ 104,
1922
+ 230,
1923
+ 119
1924
+ ],
1925
+ "page_idx": 9
1926
+ },
1927
+ {
1928
+ "type": "text",
1929
+ "text": "In this paper, we explore the application of recommender systems within an LLM-based Agents platform, examining how the unique characteristics of these agents - robust interactivity, intelligence, and proactiveness - alter the flow and presentation of information. This shift significantly impacts the function of recommendation systems, which are fundamentally designed to control information flow within systems. We thus introduce a new paradigm, Rec4Agentverse, consisting of two elements: Agent Item and Agent Recommender. This paradigm is developed in three stages, each designed to enhance the interaction and information exchange among users, Agent Recommender, and Agent Items. Then, we devised a simulated scenario to illustrate a user's need for travel planning assistance. Within this context, we conducted a comprehensive analysis of the three developmental phases of Rec4Agentverse. Our focus was on elucidating the unique attributes of each stage and exploring the scalability and potential of this paradigm. Through this simulation, we succinctly and clearly illustrate our understanding of the evolution of the system and its potential for future growth.",
1930
+ "bbox": [
1931
+ 81,
1932
+ 125,
1933
+ 482,
1934
+ 375
1935
+ ],
1936
+ "page_idx": 9
1937
+ },
1938
+ {
1939
+ "type": "text",
1940
+ "text": "Moreover, it is important to note that Rec4Agentverse represents a novel recommendation paradigm that still requires extensive exploration in terms of its applicable fields, potential development directions, and existing risk issues. In light of this, we also delve into these aspects intending to inspire and encourage further advancements in the field. Looking forward, we aim to investigate the Rec4Agentverse paradigm through quantitative research methods and explore strategies for its practical implementation.",
1941
+ "bbox": [
1942
+ 81,
1943
+ 375,
1944
+ 482,
1945
+ 484
1946
+ ],
1947
+ "page_idx": 9
1948
+ },
1949
+ {
1950
+ "type": "text",
1951
+ "text": "REFERENCES",
1952
+ "text_level": 1,
1953
+ "bbox": [
1954
+ 84,
1955
+ 494,
1956
+ 202,
1957
+ 508
1958
+ ],
1959
+ "page_idx": 9
1960
+ },
1961
+ {
1962
+ "type": "list",
1963
+ "sub_type": "ref_text",
1964
+ "list_items": [
1965
+ "[1] Mahyar Abbasian, Iman Azimi, Amir M Rahmani, and Ramesh Jain. 2023. Conversational health agents: A personalized lvm-powered agent framework. arXiv preprint arXiv:2310.02374 (2023).",
1966
+ "[2] Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, et al. 2022. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073 (2022).",
1967
+ "[3] Anton Bakhtin, David J Wu, Adam Lerer, Jonathan Gray, Athul Paul Jacob, Gabriele Farina, Alexander H Miller, and Noam Brown. 2022. Mastering the game of no-press Diplomacy via human-regularized reinforcement learning and planning. arXiv preprint arXiv:2210.05492 (2022).",
1968
+ "[4] Keqin Bao, Jizhi Zhang, Wenjie Wang, Yang Zhang, Zhengyi Yang, Yancheng Luo, Fuli Feng, Xiangnan He, and Qi Tian. 2023. A Bi-Step Grounding Paradigm for Large Language Models in Recommendation Systems. arXiv preprint arXiv:2308.08434 (2023).",
1969
+ "[5] Keqin Bao, Jizhi Zhang, Yang Zhang, Wenjie Wang, Fuli Feng, and Xiangnan He. 2023. TALLRec: An Effective and Efficient Tuning Framework to Align Large Language Model with Recommendation. In Proceedings of the 17th ACM Conference on Recommender Systems (Singapore, Singapore) (RecSys '23). Association for Computing Machinery, 1007-1014.",
1970
+ "[6] Homanga Bharadhwaj, Jay Vakil, Mohit Sharma, Abhinav Gupta, Shubham Tulsiani, and Vikash Kumar. 2023. Roboagent: Generalization and efficiency in robot manipulation via semantic augmentations and action chunking. arXiv preprint arXiv:2309.01918 (2023).",
1971
+ "[7] Andres M Bran, Sam Cox, Andrew D White, and Philippe Schwaller. 2023. ChemCrow: Augmenting large-language models with chemistry tools. arXiv preprint arXiv:2304.05376 (2023).",
1972
+ "[8] Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. 2023. Chateval: Towards better IIm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201 (2023).",
1973
+ "[9] Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chen Qian, Chi-Min Chan, Yuja Qin, Yaxi Lu, Ruobing Xie, et al. 2023. Agentverse: Facilitating multi-agent collaboration and exploring emergent behaviors in agents. arXiv preprint arXiv:2308.10848 (2023).",
1974
+ "[10] Sunhao Dai et al. 2023. Uncovering ChatGPT's Capabilities in Recommender Systems. In RecSys, Jie Zhang, Li Chen, Shlomo Berkovsky, Min Zhang, Tommaso Di Noia, Justin Basilico, Luiz Pizzato, and Yang Song (Eds.). ACM, 1126-1132.",
1975
+ "[11] Yashar Deldjoo. 2024. Understanding Biases in ChatGPT-based Recommender Systems: Provider Fairness, Temporal Stability, and Recency. arXiv preprint"
1976
+ ],
1977
+ "bbox": [
1978
+ 86,
1979
+ 512,
1980
+ 482,
1981
+ 902
1982
+ ],
1983
+ "page_idx": 9
1984
+ },
1985
+ {
1986
+ "type": "list",
1987
+ "sub_type": "ref_text",
1988
+ "list_items": [
1989
+ "arXiv:2401.10545 (2024).",
1990
+ "[12] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayaan Wahid, Jonathan Thompson, Quan Vuong, Tianhe Yu, et al. 2023. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378 (2023).",
1991
+ "[13] Meta Fundamental AI Research Diplomacy Team (FAIR)†, Anton Bakhtin, Noam Brown, Emily Dinan, Gabriele Farina, Colin Flaherty, Daniel Fried, Andrew Goff, Jonathan Gray, Hengyuan Hu, et al. 2022. Human-level play in the game of Diplomacy by combining language models with strategic reasoning. Science 378, 6624 (2022), 1067–1074.",
1992
+ "[14] Wenqi Fan, Zihuiu Zhao, Jiatong Li, Yunqing Liu, Xiaowei Mei, Yiqi Wang, Jiliang Tang, and Qing Li. 2023. Recommender systems in the era of large language models (llms). arXiv preprint arXiv:2307.02046 (2023).",
1993
+ "[15] Yue Feng, Shuchang Liu, Zhenghai Xue, Qingpeng Cai, Lantao Hu, Peng Jiang, Kun Gai, and Fei Sun. 2023. A Large Language Model Enhanced Conversational Recommender System. CoRR abs/2308.06212 (2023). https://doi.org/10.48550/arXiv.2308.06212 arXiv:2308.06212",
1994
+ "[16] Yingqiang Ge, Shuchang Liu, Ruoyuan Gao, Yikun Xian, Yunqi Li, Xiangyu Zhao, Changhua Pei, Fei Sun, Junfeng Ge, Wenwu Ou, and Yongfeng Zhang. 2021. Towards long-term fairness in recommendation. In Proceedings of the 14th ACM international conference on web search and data mining. 445-453.",
1995
+ "[17] Albert Gu and Tri Dao. 2023. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 (2023).",
1996
+ "[18] Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. 2023. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856 (2023).",
1997
+ "[19] Rui Hao, Linmei Hu, Weijian Qi, Qingliu Wu, Yirui Zhang, and Liqiang Nie. 2023. ChatLLM Network: More brains, More intelligence. arXiv preprint arXiv:2304.12998 (2023).",
1998
+ "[20] Yupeng Hou, Junjie Zhang, Zihan Lin, Hongyu Lu, Ruobing Xie, Julian McAuley, and Wayne Xin Zhao. 2023. Large language models are zero-shot rankers for recommender systems. arXiv preprint arXiv:2305.08845 (2023).",
1999
+ "[21] Xu Huang, Jianxun Lian, Yuxuan Lei, Jing Yao, Defu Lian, and Xing Xie. 2023. Recommender ai agent: Integrating large language models for interactive recommendations. arXiv preprint arXiv:2308.16505 (2023).",
2000
+ "[22] Jiaming Ji, Tianyi Qiu, Boyuan Chen, Borong Zhang, Hantao Lou, Kaile Wang, Yawen Duan, Zhonghao He, Jiayi Zhou, Zhaowei Zhang, et al. 2023. Ai alignment: A comprehensive survey. arXiv preprint arXiv:2310.1985 (2023).",
2001
+ "[23] Meng Jiang, Keqin Bao, Jizhi Zhang, Wenjie Wang, Zhengyi Yang, Fuli Feng, and Xiangnan He. 2024. Item-side Fairness of Large Language Model-based Recommendation System. arXiv preprint arXiv:2402.15215 (2024).",
2002
+ "[24] Yeonghun Kang and Jihan Kim. 2023. Chatmf: An autonomous ai system for predicting and generating metal-organic frameworks. arXiv preprint arXiv:2308.01423 (2023).",
2003
+ "[25] Siwon Kim, Sangdoo Yun, Hwaran Lee, Martin Gubri, Sungroh Yoon, and Seong Joon Oh. 2023. Propile: Probing privacy leakage in large language models. arXiv preprint arXiv:2307.01881 (2023).",
2004
+ "[26] Raz Lapid, Ron Langberg, and Moshe Sipper. 2023. Open sesame! universal black box jailbreaking of large language models. arXiv preprint arXiv:2309.01446 (2023).",
2005
+ "[27] Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. 2023. Camel: Communicative agents for \"mind\" exploration of large scale language model society. arXiv preprint arXiv:2303.17760 (2023).",
2006
+ "[28] Haoran Li, Yulin Chen, Jinglong Luo, Yan Kang, Xiaojin Zhang, Qi Hu, Chunkit Chan, and Yangqiu Song. 2023. Privacy in large language models: Attacks, defenses and future directions. arXiv preprint arXiv:2310.10383 (2023).",
2007
+ "[29] Sihang Li, Zhiyuan Liu, Yanchen Luo, Xiang Wang, Xiangnan He, Kenji Kawaguchi, Tat-Seng Chua, and Qi Tian. 2024. Towards 3D Molecule-Text Interpretation in Language Models. ICLR (2024).",
2008
+ "[30] Yunqi Li, Hanxiong Chen, Shuyuan Xu, Yingqiang Ge, Juntao Tan, Shuchang Liu, and Yongfeng Zhang. 2023. Fairness in recommendation: Foundations, methods, and applications. ACM Transactions on Intelligent Systems and Technology (2023).",
2009
+ "[31] Jianghao Lin, Xinyi Dai, Yunjia Xi, Weiwen Liu, Bo Chen, Xiangyang Li, Chenxu Zhu, Huifeng Guo, Yong Yu, Ruiming Tang, et al. 2023. How Can Recommender Systems Benefit from Large Language Models: A Survey. arXiv preprint arXiv:2306.05817 (2023).",
2010
+ "[32] Xinyu Lin, Wenjie Wang, Yongqi Li, Fuli Feng, See-Kiong Ng, and Tat-Seng Chua. 2023. A multi-facet paradigm to bridge large language model and recommendation. arXiv preprint arXiv:2310.06491 (2023).",
2011
+ "[33] Yen-Ting Lin and Yun-Nung Chen. 2023. LLM-Eval: Unified Multi-Dimensional Automatic Evaluation for Open-Domain Conversations with Large Language Models. arXiv preprint arXiv:2305.13711 (2023).",
2012
+ "[34] Bo Liu, Yuqian Jiang, Xiaohan Zhang, Qiang Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. 2023. Lm+ p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477 (2023).",
2013
+ "[35] Junling Liu, Chao Liu, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023)."
2014
+ ],
2015
+ "bbox": [
2016
+ 517,
2017
+ 109,
2018
+ 913,
2019
+ 885
2020
+ ],
2021
+ "page_idx": 9
2022
+ },
2023
+ {
2024
+ "type": "header",
2025
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
2026
+ "bbox": [
2027
+ 84,
2028
+ 75,
2029
+ 364,
2030
+ 85
2031
+ ],
2032
+ "page_idx": 9
2033
+ },
2034
+ {
2035
+ "type": "header",
2036
+ "text": "Jizhi Zhang, et al.",
2037
+ "bbox": [
2038
+ 826,
2039
+ 75,
2040
+ 911,
2041
+ 87
2042
+ ],
2043
+ "page_idx": 9
2044
+ },
2045
+ {
2046
+ "type": "page_number",
2047
+ "text": "10",
2048
+ "bbox": [
2049
+ 493,
2050
+ 898,
2051
+ 504,
2052
+ 907
2053
+ ],
2054
+ "page_idx": 9
2055
+ },
2056
+ {
2057
+ "type": "list",
2058
+ "sub_type": "ref_text",
2059
+ "list_items": [
2060
+ "[36] Zhao Mandi, Shreeya Jain, and Shuran Song. 2023. Roco: Dialectic multi-robot collaboration with large language models. arXiv preprint arXiv:2307.04738 (2023).",
2061
+ "[37] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology. 1-22.",
2062
+ "[38] Chen Qian, Xin Cong, Cheng Yang, Weize Chen, Yusheng Su, Juyuan Xu, Zhiyuan Liu, and Maosong Sun. 2023. Communicative agents for software development. arXiv preprint arXiv:2307.07924 (2023).",
2063
+ "[39] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-llm: A trainable agent for role-playing. arXiv preprint arXiv:2310.10158 (2023).",
2064
+ "[40] Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. 2023. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 2998-3009.",
2065
+ "[41] Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023).",
2066
+ "[42] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Ji-Rong Wen. 2023. A Survey on Large Language Model based Autonomous Agents. CoRR abs/2308.11432 (2023). https://doi.org/10.48550/ARXIV.2308.11432 arXiv:2308.11432",
2067
+ "[43] Lei Wang, Jingsen Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, and Ji-Rong Wen. 2023. RecAgent: A Novel Simulation Paradigm for Recommender Systems. arXiv preprint arXiv:2306.02552 (2023).",
2068
+ "[44] Song Wang, Yaochen Zhu, Haochen Liu, Zaiyi Zheng, Chen Chen, et al. 2023. Knowledge editing for large language models: A survey. arXiv preprint arXiv:2310.16218 (2023).",
2069
+ "[45] Wenjie Wang, Xinyu Lin, Fuli Feng, Xiangnan He, and Tat-Seng Chua. 2023. Generative recommendation: Towards next-generation recommender paradigm. arXiv preprint arXiv:2304.03516 (2023).",
2070
+ "[46] Yifan Wang, Weizhi Ma, Min Zhang, Yiqun Liu, and Shaoping Ma. 2023. A survey on the fairness of recommender systems. ACM Transactions on Information Systems (2023).",
2071
+ "[47] Wei Wei, Xubin Ren, Jiabin Tang, Qinyong Wang, Lixin Su, Suqi Cheng, Junfeng Wang, Dawei Yin, and Chao Huang. 2023. Lmrec: Large language models with graph augmentation for recommendation. arXiv preprint arXiv:2311.00423 (2023).",
2072
+ "[48] Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, et al. 2023. A Survey on Large Language Models for Recommendation. arXiv preprint arXiv:2305.1986 (2023).",
2073
+ "[49] Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Shaokun Zhang, Erkang Zhu, Beibin Li, Li Jiang, Xiaoyun Zhang, and Chi Wang. 2023. Autogen: Enabling next-gen lmm applications via multi-agent conversation framework. arXiv preprint arXiv:2308.08155 (2023).",
2074
+ "[50] Yunjia Xi, Weiwen Liu, Jianghao Lin, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, Rui Zhang, and Yong Yu. 2023. Towards Open-World Recommendation with Knowledge Augmentation from Large Language Models. CoRR abs/2306.10933 (2023).",
2075
+ "[51] Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, Rui Zheng, Xiaoran Fan, Xiao"
2076
+ ],
2077
+ "bbox": [
2078
+ 84,
2079
+ 108,
2080
+ 482,
2081
+ 623
2082
+ ],
2083
+ "page_idx": 10
2084
+ },
2085
+ {
2086
+ "type": "list",
2087
+ "sub_type": "ref_text",
2088
+ "list_items": [
2089
+ "Wang, Limao Xiong, Yuhao Zhou, Weiran Wang, Changhao Jiang, Yicheng Zou, Xiangyang Liu, Zhangyue Yin, Shihan Dou, Rongxiang Weng, Wensen Cheng, Qi Zhang, Wenjuan Qin, Yongyan Zheng, Xipeng Qiu, Xuanjing Huan, and Tao Gui. 2023. The Rise and Potential of Large Language Model Based Agents: A Survey. CoRR abs/2309.07864 (2023). https://doi.org/10.48550/ARXIV.2309.07864 arXiv:2309.07864",
2090
+ "[52] Mengzhou Xia, Tianyu Gao, Zhiyuan Zeng, and Danqi Chen. 2023. Sheared llama: Accelerating language model pre-training via structured pruning. arXiv preprint arXiv:2310.06694 (2023).",
2091
+ "[53] Kai Xiong, Xiao Ding, Yixin Cao, Ting Liu, and Bing Qin. 2023. Examining the inter-consistency of large language models: An in-depth analysis via debate. Association for Computational Linguistics.",
2092
+ "[54] Zhengyi Yang, Jiancan Wu, Yanchen Luo, Jizhi Zhang, Yancheng Yuan, An Zhang, Wang, and Xiangnan He. 2023. Large language model can interpret latent space of sequential recommender. arXiv preprint arXiv:2310.20487 (2023).",
2093
+ "[55] Jiahao Yu, Xingwei Lin, and Xinyu Xing. 2023. Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts. arXiv preprint arXiv:2309.10253 (2023).",
2094
+ "[56] An Zhang, Leheng Sheng, Yuxin Chen, Hao Li, Yang Deng, Xiang Wang, and Tat-Seng Chua. 2023. On generative agents in recommendation. arXiv preprint arXiv:2310.10108 (2023).",
2095
+ "[57] Jizhi Zhang, Keqin Bao, Yang Zhang, Wenjie Wang, Fuli Feng, and Xiangnan He. 2023. Is chatgpt fair for recommendation? evaluating fairness in large language model recommendation. arXiv preprint arXiv:2305.07609 (2023).",
2096
+ "[58] Junjie Zhang, Yupeng Hou, Ruobing Xie, Wenqi Sun, Julian McAuley, Wayne Xin Zhao, Leyu Lin, and Ji-Rong Wen. 2023. Agentcf: Collaborative learning with autonomous language agents for recommender systems. arXiv preprint arXiv:2310.09233 (2023).",
2097
+ "[59] Junjie Zhang, Ruobing Xie, Yupeng Hou, Wayne Xin Zhao, Leyu Lin, and Ji-Rong Wen. 2023. Recommendation as instruction following: A large language model empowered recommendation approach. arXiv preprint arXiv:2305.07001 (2023).",
2098
+ "[60] Ningyu Zhang, Yunzhi Yao, Bozhong Tian, Peng Wang, Shumin Deng, Mengru Wang, Zekun Xi, Shengyu Mao, Jintian Zhang, Yuansheng Ni, et al. 2024. A Comprehensive Study of Knowledge Editing for Large Language Models. arXiv preprint arXiv:2401.01286 (2024).",
2099
+ "[61] Yang Zhang, Fuli Feng, Jizhi Zhang, Keqin Bao, Qifan Wang, and Xiangnan He. 2023. Collm: Integrating collaborative embeddings into large language models for recommendation. arXiv preprint arXiv:2310.19488 (2023).",
2100
+ "[62] Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023).",
2101
+ "[63] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. 2023. Adapting large language models by integrating collaborative semantics for recommendation. arXiv preprint arXiv:2311.09049 (2023).",
2102
+ "[64] Shuyan Zhou, Frank F Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Yonatan Bisk, Daniel Fried, Uri Alon, et al. 2023. Webarena: A realistic web environment for building autonomous agents. arXiv preprint arXiv:2307.13854 (2023).",
2103
+ "[65] You Zhou, Xiujing Lin, Xiang Zhang, Maolin Wang, Gangwei Jiang, Huakang Lu, Yupeng Wu, Kai Zhang, Zhe Yang, Kehang Wang, et al. 2023. On the opportunities of green computing: A survey. arXiv preprint arXiv:2311.00447 (2023)."
2104
+ ],
2105
+ "bbox": [
2106
+ 517,
2107
+ 109,
2108
+ 913,
2109
+ 621
2110
+ ],
2111
+ "page_idx": 10
2112
+ },
2113
+ {
2114
+ "type": "header",
2115
+ "text": "Prospect Personalized Recommendation on Large Language Model-based Agent Platform",
2116
+ "bbox": [
2117
+ 84,
2118
+ 75,
2119
+ 506,
2120
+ 87
2121
+ ],
2122
+ "page_idx": 10
2123
+ },
2124
+ {
2125
+ "type": "header",
2126
+ "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY",
2127
+ "bbox": [
2128
+ 630,
2129
+ 75,
2130
+ 911,
2131
+ 85
2132
+ ],
2133
+ "page_idx": 10
2134
+ },
2135
+ {
2136
+ "type": "page_number",
2137
+ "text": "11",
2138
+ "bbox": [
2139
+ 493,
2140
+ 900,
2141
+ 504,
2142
+ 907
2143
+ ],
2144
+ "page_idx": 10
2145
+ }
2146
+ ]