SlowGuess commited on
Commit
ffbb0a2
·
verified ·
1 Parent(s): e83af27

Add Batch adfa43dc-542c-404e-a10a-02fdeebe8286

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_content_list.json +0 -0
  3. 2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_model.json +0 -0
  4. 2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_origin.pdf +3 -0
  5. 2401.14xxx/2401.14915/full.md +0 -0
  6. 2401.14xxx/2401.14915/images.zip +3 -0
  7. 2401.14xxx/2401.14915/layout.json +0 -0
  8. 2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_content_list.json +0 -0
  9. 2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_model.json +0 -0
  10. 2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_origin.pdf +3 -0
  11. 2401.14xxx/2401.14939/full.md +515 -0
  12. 2401.14xxx/2401.14939/images.zip +3 -0
  13. 2401.14xxx/2401.14939/layout.json +0 -0
  14. 2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_content_list.json +0 -0
  15. 2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_model.json +0 -0
  16. 2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_origin.pdf +3 -0
  17. 2401.14xxx/2401.14968/full.md +0 -0
  18. 2401.14xxx/2401.14968/images.zip +3 -0
  19. 2401.14xxx/2401.14968/layout.json +0 -0
  20. 2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_content_list.json +2219 -0
  21. 2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_model.json +0 -0
  22. 2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_origin.pdf +3 -0
  23. 2401.15xxx/2401.15006/full.md +361 -0
  24. 2401.15xxx/2401.15006/images.zip +3 -0
  25. 2401.15xxx/2401.15006/layout.json +0 -0
  26. 2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_content_list.json +0 -0
  27. 2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_model.json +0 -0
  28. 2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_origin.pdf +3 -0
  29. 2401.15xxx/2401.15024/full.md +428 -0
  30. 2401.15xxx/2401.15024/images.zip +3 -0
  31. 2401.15xxx/2401.15024/layout.json +0 -0
  32. 2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_content_list.json +2035 -0
  33. 2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_model.json +0 -0
  34. 2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_origin.pdf +3 -0
  35. 2401.15xxx/2401.15077/full.md +349 -0
  36. 2401.15xxx/2401.15077/images.zip +3 -0
  37. 2401.15xxx/2401.15077/layout.json +0 -0
  38. 2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_content_list.json +1130 -0
  39. 2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_model.json +1639 -0
  40. 2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_origin.pdf +3 -0
  41. 2401.15xxx/2401.15170/full.md +198 -0
  42. 2401.15xxx/2401.15170/images.zip +3 -0
  43. 2401.15xxx/2401.15170/layout.json +0 -0
  44. 2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_content_list.json +0 -0
  45. 2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_model.json +0 -0
  46. 2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_origin.pdf +3 -0
  47. 2401.15xxx/2401.15174/full.md +660 -0
  48. 2401.15xxx/2401.15174/images.zip +3 -0
  49. 2401.15xxx/2401.15174/layout.json +0 -0
  50. 2401.15xxx/2401.15204/8af84b54-198a-463e-ad3b-700716af27ab_content_list.json +1155 -0
.gitattributes CHANGED
@@ -10134,3 +10134,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10134
  2402.09xxx/2402.09441/88c10c82-d919-4742-8cf9-51054a35ae67_origin.pdf filter=lfs diff=lfs merge=lfs -text
10135
  2402.10xxx/2402.10926/df5caf3b-0250-4589-b1bf-cea2bd42a85d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10136
  2404.16xxx/2404.16038/6a3384c5-356e-4b1b-a9cc-eb0df2f7634b_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10134
  2402.09xxx/2402.09441/88c10c82-d919-4742-8cf9-51054a35ae67_origin.pdf filter=lfs diff=lfs merge=lfs -text
10135
  2402.10xxx/2402.10926/df5caf3b-0250-4589-b1bf-cea2bd42a85d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10136
  2404.16xxx/2404.16038/6a3384c5-356e-4b1b-a9cc-eb0df2f7634b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10137
+ 2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_origin.pdf filter=lfs diff=lfs merge=lfs -text
10138
+ 2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10139
+ 2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_origin.pdf filter=lfs diff=lfs merge=lfs -text
10140
+ 2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10141
+ 2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10142
+ 2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10143
+ 2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
10144
+ 2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10145
+ 2401.15xxx/2401.15204/8af84b54-198a-463e-ad3b-700716af27ab_origin.pdf filter=lfs diff=lfs merge=lfs -text
10146
+ 2401.15xxx/2401.15232/0d29d890-ff60-4ab1-87a9-e1c1cae6c92c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10147
+ 2401.15xxx/2401.15235/ed7cda40-6689-42ac-9e77-20019137f37a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10148
+ 2401.15xxx/2401.15266/6f21fafb-7405-4d66-9d29-d9fa16ef00d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10149
+ 2401.15xxx/2401.15269/bc7fa28d-04ca-42a8-83fe-d12e86418cc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10150
+ 2401.15xxx/2401.15273/b0acc5a9-1359-4bf9-9bd5-d23859dbd488_origin.pdf filter=lfs diff=lfs merge=lfs -text
10151
+ 2401.15xxx/2401.15305/5868e453-1001-4496-adde-8a1373ad48f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10152
+ 2401.15xxx/2401.15321/e7106ed3-0d87-4dd4-adce-041f50c9838f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10153
+ 2401.15xxx/2401.15351/fc97351f-0085-44ee-86eb-8273aa791f28_origin.pdf filter=lfs diff=lfs merge=lfs -text
10154
+ 2401.15xxx/2401.15354/9d12ba64-c556-42a6-a6ad-f9fc672b0885_origin.pdf filter=lfs diff=lfs merge=lfs -text
10155
+ 2401.15xxx/2401.15356/47d6ef7a-e5c1-4c11-987e-e4296684f4f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10156
+ 2401.15xxx/2401.15390/2db76c23-56b0-46f7-b836-7641b33ed1ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
10157
+ 2401.15xxx/2401.15391/88505b7e-f2eb-4ef4-9b56-2b5e00ba98f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10158
+ 2401.15xxx/2401.15422/62afb6b6-d239-4814-a2d0-adddb7b023f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10159
+ 2401.15xxx/2401.15443/68b88865-dd76-4859-a184-a2a80c431f67_origin.pdf filter=lfs diff=lfs merge=lfs -text
10160
+ 2401.15xxx/2401.15449/cad310d7-4b88-4019-b572-74b7adb073f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10161
+ 2401.15xxx/2401.15468/5cfaee25-d71b-4001-91f2-a35fc867ce42_origin.pdf filter=lfs diff=lfs merge=lfs -text
10162
+ 2401.15xxx/2401.15473/53f8d4ae-a4be-4b11-b51a-b8fa9853a60e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10163
+ 2401.15xxx/2401.15481/363866b4-c9cf-4e87-8a8f-bf73c1badaae_origin.pdf filter=lfs diff=lfs merge=lfs -text
10164
+ 2401.15xxx/2401.15497/a97a7f8f-e780-465b-9896-22fed592bd21_origin.pdf filter=lfs diff=lfs merge=lfs -text
10165
+ 2401.15xxx/2401.15530/be3f6a99-43ab-415d-a2a2-5658a6208a33_origin.pdf filter=lfs diff=lfs merge=lfs -text
10166
+ 2401.15xxx/2401.15562/fea87de2-c6e3-47c4-aa8a-9d70dfc7ed96_origin.pdf filter=lfs diff=lfs merge=lfs -text
10167
+ 2401.15xxx/2401.15563/8526f16a-5956-459c-bfa1-87b467a1fc67_origin.pdf filter=lfs diff=lfs merge=lfs -text
10168
+ 2401.15xxx/2401.15569/c7bb8830-0046-42ae-94d4-143be90f76f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10169
+ 2401.15xxx/2401.15583/f11f999a-882e-4655-bf74-64a98df75bd3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10170
+ 2401.15xxx/2401.15585/105a32f5-e3bc-457a-a749-d49b9b4ae6e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10171
+ 2401.15xxx/2401.15635/35dee594-5353-4a0b-8c0d-2eb65b7cfe0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10172
+ 2401.15xxx/2401.15641/98d7e9b4-7b7b-4ed5-bfbd-f3bcc358b3e4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10173
+ 2401.15xxx/2401.15647/c79ec4b1-5d4c-4ae3-8c5f-da0d4da68a14_origin.pdf filter=lfs diff=lfs merge=lfs -text
10174
+ 2401.15xxx/2401.15675/843e2dd5-ee23-4acf-b725-0503b2197306_origin.pdf filter=lfs diff=lfs merge=lfs -text
10175
+ 2401.15xxx/2401.15687/3bb4a7ea-bc01-465e-88ea-fb997a741cab_origin.pdf filter=lfs diff=lfs merge=lfs -text
10176
+ 2401.15xxx/2401.15700/93338af9-dd6f-43d7-be79-0ac2743f923a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10177
+ 2401.15xxx/2401.15715/6016dffc-c939-4194-a15b-50c255037bde_origin.pdf filter=lfs diff=lfs merge=lfs -text
10178
+ 2401.15xxx/2401.15739/28be517c-5521-49c8-a70c-2b2a03a60f55_origin.pdf filter=lfs diff=lfs merge=lfs -text
10179
+ 2401.15xxx/2401.15753/7f6bb4ff-195a-4899-aee5-59fc16ebc9a1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10180
+ 2401.15xxx/2401.15855/1224e1c1-e714-4939-bff3-9c3faf2287b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10181
+ 2401.15xxx/2401.15859/9fc0bf20-f0ab-4825-bf0f-93e2fa55b062_origin.pdf filter=lfs diff=lfs merge=lfs -text
10182
+ 2401.15xxx/2401.15864/77472af4-10f7-4194-b365-3c9174a46747_origin.pdf filter=lfs diff=lfs merge=lfs -text
10183
+ 2401.15xxx/2401.15865/476187b6-d6ea-43fc-b614-824c47ef7705_origin.pdf filter=lfs diff=lfs merge=lfs -text
10184
+ 2401.15xxx/2401.15884/45d33e91-14ce-4474-abab-8607a75c43ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
10185
+ 2401.15xxx/2401.15897/d0a66c89-e011-4ad3-9103-66f4fc99ad5d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10186
+ 2401.15xxx/2401.15947/1b4c4df5-6454-40ad-aa0a-26629328149b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10187
+ 2401.15xxx/2401.15956/ef3356f9-ea43-4d0a-afa3-7372e5e398c3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10188
+ 2401.15xxx/2401.15975/f80c2d6a-a524-4fbb-bcd2-0cc2244542d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10189
+ 2401.15xxx/2401.15977/f74fd074-afb8-4cc6-b769-bb6977c1c00b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10190
+ 2401.16xxx/2401.16448/4fb04fc2-fd02-4b60-b8b3-7fb02350a1be_origin.pdf filter=lfs diff=lfs merge=lfs -text
10191
+ 2402.00xxx/2402.00059/3a6dbc33-8a4d-4c17-b579-45429fba71f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10192
+ 2402.01xxx/2402.01721/d66435ac-9ef0-4302-9aa2-b1312b8cca53_origin.pdf filter=lfs diff=lfs merge=lfs -text
10193
+ 2402.01xxx/2402.01722/65259c8f-c1dc-41fe-8b03-619268fdbd29_origin.pdf filter=lfs diff=lfs merge=lfs -text
10194
+ 2402.01xxx/2402.01727/1a195052-23dd-464f-bd9b-2946ea31f10d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10195
+ 2402.01xxx/2402.01730/24c7d6f0-20fd-47aa-86be-257f97a88106_origin.pdf filter=lfs diff=lfs merge=lfs -text
10196
+ 2402.01xxx/2402.01732/6e234ddd-b2c8-4a9e-b190-1ddb8b181eb5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10197
+ 2402.01xxx/2402.01733/e75adfd2-1ad8-45b3-a825-59f1b1adfd62_origin.pdf filter=lfs diff=lfs merge=lfs -text
10198
+ 2402.01xxx/2402.01735/b626276e-361c-458f-addf-44b0a63dcd0f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10199
+ 2402.03xxx/2402.03358/b9aa3ea1-fca6-43f5-ac06-51646e73c7ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
10200
+ 2402.04xxx/2402.04268/bccace19-cc92-4ccb-b8d1-d2a72950939a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14915/b13534f8-6347-48ca-851c-49192f4ca235_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:371d15ea0a367ec9eb6f5815e805d76251ecb875e16ee07549fe30651750a92e
3
+ size 5111728
2401.14xxx/2401.14915/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14915/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9854d394d30ae7c22c90976453f221d029a19338ecd5f2149234911fcc7fa814
3
+ size 873599
2401.14xxx/2401.14915/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14939/003083d8-8481-436c-b004-dd19115858c8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6fa915230b78bbd7f883edb7693544224aecbc9b591a666d9444dbd5dfe61b
3
+ size 1505340
2401.14xxx/2401.14939/full.md ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Macro Graph Neural Networks for Online Billion-Scale Recommender Systems
2
+
3
+ Hao Chen*
4
+
5
+ The Hong Kong Polytechnic
6
+
7
+ University
8
+
9
+ Hung Hom, Hong Kong SAR
10
+
11
+ sundaychenhao@gmail.com
12
+
13
+ Yuanchen Bei*
14
+
15
+ Zhejiang University
16
+
17
+ Hangzhou, China
18
+
19
+ yuanchenbei@zju.edu.cn
20
+
21
+ Qijie Shen
22
+
23
+ Alibaba Group
24
+
25
+ Hangzhou, China
26
+
27
+ qijie.sqj@alibaba-inc.com
28
+
29
+ Yue Xu
30
+
31
+ Alibaba Group
32
+
33
+ Hangzhou, China
34
+
35
+ yuexu.xy@foxmail.com
36
+
37
+ Feiran Huang†
38
+
39
+ Jinan University
40
+
41
+ Guangzhou, China
42
+
43
+ huangfr@jnu.edu.cn
44
+
45
+ Sheng Zhou
46
+
47
+ Zhejiang University
48
+
49
+ Hangzhou, China
50
+
51
+ zhousheng_zju@zju.edu.cn
52
+
53
+ Senzhang Wang
54
+
55
+ Central South University
56
+
57
+ Changsha, China
58
+
59
+ szwang@csu.edu.cn
60
+
61
+ Wenbing Huang
62
+
63
+ Renmin University of China
64
+
65
+ Beijing, China
66
+
67
+ hwenbing@126.com
68
+
69
+ Xiao Huang
70
+
71
+ The Hong Kong Polytechnic
72
+
73
+ University
74
+
75
+ Hung Hom, Hong Kong SAR
76
+
77
+ xiaohuang@comp.polyu.edu.hk
78
+
79
+ # ABSTRACT
80
+
81
+ Predicting Click-Through Rate (CTR) in billion-scale recommender systems poses a long-standing challenge for Graph Neural Networks (GNNs) due to the overwhelming computational complexity involved in aggregating billions of neighbors. To tackle this, GNN-based CTR models usually sample hundreds of neighbors out of the billions to facilitate efficient online recommendations. However, sampling only a small portion of neighbors results in a severe sampling bias and the failure to encompass the full spectrum of user or item behavioral patterns. To address this challenge, we name the conventional user-item recommendation graph as "micro recommendation grap" and introduce a revolutionizing MAcro Recommendation Graph (MAG) for billion-scale recommendations to reduce the neighbor count from billions to hundreds in the graph structure infrastructure. Specifically, We group micro nodes (users and items) with similar behavior patterns to form macro nodes and then MAG directly describes the relation between the user/item and the hundred of macro nodes rather than the billions of micro nodes. Subsequently, we introduce tailored Macro Graph Neural Networks (MacGNN) to aggregate information on a macro level and revise the embeddings of macro nodes. MacGNN has already served Taobao's homepage feed for two months, providing recommendations for over one billion users. Extensive offline
82
+
83
+ *Both authors contributed equally to this research.
84
+
85
+ † Corresponding author.
86
+
87
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
88
+
89
+ WWW '24, May 13-17, 2024, Singapore, Singapore.
90
+
91
+ © 2024 Copyright held by the owner/author(s). Publication rights licensed to ACM.
92
+
93
+ ACM ISBN 979-8-4007-0171-9/24/05...$15.00
94
+
95
+ https://doi.org/10.1145/3589334.3645517
96
+
97
+ experiments on three public benchmark datasets and an industrial dataset present that MacGNN significantly outperforms twelve CTR baselines while remaining computationally efficient. Besides, online A/B tests confirm MacGNN's superiority in billion-scale recommender systems.
98
+
99
+ # CCS CONCEPTS
100
+
101
+ - Information systems $\rightarrow$ Online advertising; Web applications; - Human-centered computing $\rightarrow$ Social recommendation.
102
+
103
+ # KEYWORDS
104
+
105
+ next-generation recommendation model, graph-based CTR prediction, billion-scale online model
106
+
107
+ # ACM Reference Format:
108
+
109
+ Hao Chen, Yuanchen Bei, Qijie Shen, Yue Xu, Sheng Zhou, Wenbing Huang, Feiran Huang, Senzhang Wang, and Xiao Huang. 2024. Macro Graph Neural Networks for Online Billion-Scale Recommender Systems. In Proceedings of the ACM Web Conference 2024 (WWW '24), May 13-17, 2024, Singapore, Singapore. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3589334.3645517
110
+
111
+ # 1 INTRODUCTION
112
+
113
+ Billion-scale recommender systems, with billions of users, items, and interactions, are prevalent in today's societies [32, 35, 36, 39], such as YouTube [10, 42] and Taobao [24, 34]. At the heart of these billion-scale recommender systems lies Click-Through Rate (CTR) prediction [1, 37]. Its goal is to predict, in real-time, whether a given user will click on a given item. However, due to efficiency requirements, while Graph Neural Networks (GNNs) have shown significant performance in collaborative filtering recommendation tasks [5, 19], they are not well-suited for CTR tasks. This is because performing graph neural networks over billion-scale neighbors leads to overwhelming computational complexity. It is crucial to
114
+
115
+ develop appropriate graph neural networks capable of handling recommender systems with billions of users, items, and interactions.
116
+
117
+ Existing GNN models typically create the graph by linking users to their interacted (called) items [2, 19, 31]. In this scenario, if a user interacts with a highly popular item with billions of interactions, then the subgraph of that user will potentially have billions of 2-hop neighbors. To reduce computational complexity, PinSage [35] randomly selects a fixed number of 1-hop and 2-hop neighbors for both users and items. GLSM [29] and GMT [23] introduce importance-based and similarity-based scoring mechanisms to filter the most suitable hundreds of 1-hop and 2-hop neighbors. Besides, traditional CTR models introduce filtering strategies to accelerate the inferring process. DIN [41] and DIEN [40], typically truncate a user's hundreds of recently interacted items. UBR4CTR [26] and SIM [25] further introduced search-based strategies to filter the most relevant items from the user's long historical behavior. However, traditional CTR models fail to consider the subgraph of items or the 2-hop neighbors of users.
118
+
119
+ Though the above strategies can reduce the neighbor size for GNNs, these approaches still face the following limitations in billion-scale recommender systems.
120
+
121
+ 1. Severe Sampling Bias: In Figure 1(a), we illustrate the distribution of neighbor numbers in the user-item clicking interaction graph within a real-world shopping platform. Both users and items exhibit a substantial number of 1-hop and 2-hop neighbors. Sampling only a few hundred neighbors can only cover about $5\%$ of user 1-hop neighbors and $0.2\%$ of item 1-hop neighbors. Sampling such small portions cannot accurately represent the entire spectrum of neighbors and may lead to severe sampling bias.
122
+
123
+ 2. Unfitted Users/Items Sampling: As shown in Figure 1(a), users exhibit vastly different number distributions compared to items. For example, users have significantly more 2-hop neighbors and significantly fewer 1-hop neighbors than items. It is inappropriate to sample users and items using the same approach.
124
+
125
+ 3. Ambiguous Neighbor Counts: The sampled neighbors do not accurately represent the true number of interactions prior to the sampling process for users and items. For instance, a user with hundreds of historical interactions will yield the same sample size as another user with millions of historical interactions.
126
+
127
+ The main problem behind the mentioned issues arises from relying on sampling strategies to decrease the size of neighbors. Instead, it's more promising to boost the expressive capacity of graph nodes and significantly reduce the neighbor size by grouping nodes into macro nodes. This grouping approach allows models to overcome the inherent limitations of sampling strategies by eliminating the need for the sampling process entirely. However, actualizing a grouping strategy for recommendation graphs introduces the following challenges.
128
+
129
+ 1. Grouping Strategy: Identifying an optimal grouping strategy for user and item nodes into macro nodes is non-trivial, as it demands a careful balance between reducing complexity and maintaining the integrity of original behavioral patterns.
130
+ 2. Subgraph Definition: Constructing edges between macro nodes is complex due to the necessity of representing aggregated interactions between their constituent user/item nodes accurately. Additionally, defining the subgraph for a given user/item using macro nodes demands innovative approaches.
131
+
132
+ ![](images/48529be6faec2913f932b2e7aeadb45ad87680eb19d7be2324a5b21b700ccb9e.jpg)
133
+ (a) Neighborhood number distribution of micro graphs.
134
+
135
+ ![](images/47ca4648172ea5c81cfb923e4ac118d6ab52d8861332c1975c5c2857e4247da6.jpg)
136
+
137
+ ![](images/1e2333b88e3e746c693bd95f8241750c5eb475e6c1f9d4c6581f9cef20a49068.jpg)
138
+ (b) Neighborhood number distribution of macro graphs.
139
+
140
+ ![](images/5abba7b240f4bba6e91e98cd11558a9e267ea2d364a55d2544c76295280cf52c.jpg)
141
+ Figure 1: Illustration of neighbor number distributions in micro and macro user-item clicking interaction graphs within Taobao's billion-scale recommender system.
142
+
143
+ 3. Recommending with Macro Nodes: Each macro node represents a group of user/item nodes, and the edge between two macro nodes signifies the connections between two groups of nodes. It is challenging to extract the behavioral pattern of a user/item node based on its newly constructed macro-node subgraphs.
144
+
145
+ By addressing the three challenges mentioned above, we propose a more suitable MAcro Recommendation Graph (MAG) for billion-scale recommendations. MAG groups user/item nodes based on similar behaviors to create macro nodes, as illustrated in Figure 2. This grouping reduces the number of neighbors from billions to hundreds. As depicted in Figure 1(b), MAG now only consists of hundreds of 1-hop and 2-hop neighbors. This reduction allows billion-scale recommender systems to alleviate the adverse consequences of sampling only a small portion of neighbors. To achieve this, we introduce tailored Macro Graph Neural Networks (MacGNN) to aggregate the macro information for the target user/item with our specially designed MAG, facilitating accurate and efficient click-through rate prediction for online billion-scale recommender systems. Our paper's primary contributions are summarized as follows:
146
+
147
+ - We create a customized macro recommendation graph, which involves constructing the macro node, macro edge, and macro subgraph. This helps reduce the neighbor size from billions to hundreds, making it easier for GNNs to operate in online billion-scale recommender systems.
148
+ - We propose a novel macro-scale recommendation paradigm known as the Macro Graph Neural Network (MacGNN). This framework efficiently aggregates macro-graph information and updates macro-node embeddings to enable online click-through rate prediction for billion-scale recommender systems.
149
+ - MacGNN has been serving a major shopping platform for two months, offering recommendations to more than one billion users.
150
+
151
+ ![](images/6b4a1066279c261c8dd088ca8b8ef36e77c7391261626460c9862845c9c7307b.jpg)
152
+ Figure 2: Sketch map of the construction of the macro graph.
153
+
154
+ Additionally, we introduce our online implementation to enable online updates of macro nodes and macro edges.
155
+
156
+ - Extensive offline experiments conducted on three public benchmark datasets and a billion-scale industrial dataset demonstrate that MacGNN outperforms twelve state-of-the-art CTR baselines while maintaining competitive efficiency. Furthermore, online A/B tests have confirmed the superiority of MacGNN in real-world billion-scale recommender systems.
157
+
158
+ # 2 PRELIMINARIES
159
+
160
+ In this section, we first present the basic notations in CTR prediction. Then, we present the concept of micro nodes, micro edges, and micro recommendation graphs for recommender systems. Finally, we introduce the definition of our macro recommendation graph.
161
+
162
+ CTR Prediction. Supposed the set of users and items as $\mathcal{U} = \{u_1, \dots, u_n\}$ , and $\mathcal{I} = \{i_1, \dots, i_m\}$ , respectively, where $|\mathcal{U}| = n$ and $|\mathcal{I}| = m$ denotes the number of users and items. In real-world recommender systems, CTR models correspond to a click or not problem. When item $i$ is exposed to user $u$ , user $u$ will have two reflections: (i) having a positive behavior toward the item $i$ such as click or purchase, or (ii) having a negative behavior toward the item $i$ such as neglect or dislike. Thus, given the target user-item pair as $(u, i)$ , the corresponding interaction $y_{ui}$ can be present as:
163
+
164
+ $$
165
+ y _ {u i} = \left\{ \begin{array}{l l} 1, & \text {i f u e x i b i t s p o s i t i v e b e h a v i o r t o w a r d s i ;} \\ 0, & \text {i f u e x i b i t s n e g a t i v e b e h a v i o r t o w a r d s i .} \end{array} \right. \tag {1}
166
+ $$
167
+
168
+ Given a target user-item pair $(u,i)$ , the CTR prediction task is to predict the target user $u$ 's positive behavior probability $\hat{y}_{ui}$ on target item $i$ . In form, the aim of a CTR model is to learn an accurate prediction function $\mathcal{F}(\cdot)$ , namely the predicted clicking probability $\hat{y}_{ui} = \mathcal{F}(u,i)$ , to minimize the difference from $\hat{y}_{ui}$ to $y_{ui}$ .
169
+
170
+ Micro Node. Starting with several popular works [19, 31, 35], GNN-based recommendation models usually connect users with their interacted (e.g. clicked or purchased) items. Under this setting, users and items are treated as micro nodes. Specifically, each user $u$ and item $i$ is associated with a trainable embedding $E_{u} \in \mathbb{R}^{d}$ and $E_{i} \in \mathbb{R}^{d}$ , where $d$ is the embedding dimension size.
171
+
172
+ Micro Edge. As stated in Eq. (1), the user-item behaviors actually provide the most raw material for edges. Given the micro user-item interaction matrix $\mathcal{R} \in \mathbb{R}^{|U| \times |I|}$ , where $|\mathcal{R}|$ is the total number of interactions. Each element $r_{ui} \in \mathcal{R}$ reflects whether users $u$ have a positive interaction with item $i$ , namely $r_{ui} = y_{ui}$ .
173
+
174
+ Micro Recommendation Graph (MIG). After defining the micro nodes and micro edges, the MIG can be represented as $\mathcal{G} = (\mathcal{U},\mathcal{I},\mathcal{R})$ . For user interest models, the user behavior sequences
175
+
176
+ can be given as the first-order neighbor of the user $u$ as $\mathcal{N}_u^{(1)}$ , where $\mathcal{N}_u^{(k)}$ denotes the $k^{th}$ -hop neighbors of user $u$ .
177
+
178
+ According to the definition of MIG, when GNNs predict the CTR of a given user-item pair $(u,i)$ , GNNs first construct the micro subgraph of the target user/item and then extract the embeddings according to MIG. When the graph size grows to a billion-scale, the subgraph may contain billions of micro nodes, which means only loading the embeddings of the subgraph is difficult to accomplish.
179
+
180
+ MAcro Recommendation Graph (MAG). Our proposed MAG can be defined as $\widetilde{\mathcal{G}} = (\widetilde{\mathcal{U}},\widetilde{\mathcal{I}},\widetilde{\mathcal{R}})$ , where $\widetilde{\mathcal{U}},\widetilde{\mathcal{I}}$ , and $\widetilde{\mathcal{R}}$ are the macro user nodes, macro item nodes, and macro edges respectively, and $\widetilde{\mathcal{N}}_v^{(k)}$ represents the $k^{th}$ -hop macro neighbors of node $v$ . Specifically, each macro node $v$ is associated with a trainable embedding $\widetilde{E}_v\in \mathbb{R}^d$ . With MAG, MacGNN only needs to aggregate hundreds of macro nodes, significantly reducing computational complexity.
181
+
182
+ # 3 METHODOLOGY
183
+
184
+ In this section, we first formally introduce the concept of Macro Recommendation Graphs and introduce how to design macro nodes and macro edges. Then we present the macro graph neural network for CTR prediction. Finally, we illustrate the implementation architecture of our real-world billion-scale recommender system.
185
+
186
+ # 3.1 Macro Recommendation Graph (MAG)
187
+
188
+ 3.1.1 Constructing Macro Nodes. As presented in the preliminaries, MIG records the detailed micro node and micro edge for each user and item. Then, given any user or item, the GNNs have to access the embeddings of each hop of micro nodes to infer the behavior pattern of the given user or item, which is computationally inconvenient and raises responsible delays. Motivated by this, MAG presents the behavior pattern within macro nodes rather than listing all the micro nodes and utilizes the GNNs to extract the behavior pattern from detailed micro nodes.
189
+
190
+ Intuitively, the macro nodes are designed to represent the behavior pattern of a set of micro nodes, while all the micro nodes inside share similar behavior patterns. Thus, we conduct the behavior pattern grouping to map the micro nodes into specific macro behavior nodes, with the objection of minimizing the behavioral pattern gap between micro nodes assigned to the same macro node [16].
191
+
192
+ Specifically, given the micro user-item interaction matrix $\mathcal{R} \in \mathbb{R}^{|\mathcal{U}| \times |\mathcal{I}|}$ , for a given user/item micro node $v$ , we first obtain its behavior embedding $\pmb{b}_v$ as follows:
193
+
194
+ $$
195
+ \boldsymbol {b} _ {v} = \frac {[ R ] _ {v}}{\| [ R ] _ {v} \| _ {2}} = \frac {\boldsymbol {r} _ {v}}{\| \boldsymbol {r} _ {v} \| _ {2}}, \quad \boldsymbol {R} = \left\{ \begin{array}{l l} \mathcal {R}, & v \in \mathcal {U}; \\ \mathcal {R} ^ {\top}, & v \in \mathcal {I}. \end{array} \right. \tag {2}
196
+ $$
197
+
198
+ where $||\cdot ||_2^2$ is the $L_{2}$ norm. Then, to obtain each macro node $C_k$ , we conduct the behavior pattern grouping based on behavior embeddings of micro nodes. Specifically, we first randomly initialize $K$ macro centroids $\{\mu_1,\dots,\mu_k,\dots,\mu_K\}$ , where $\pmb{\mu}_k\in \mathbb{R}^d$ is the centroids of macro node $C_k$ , $K\ll n$ and $m$ is the hyperparameter set as the macro node number, and we denote $K$ for macro user node and macro item node is $\widetilde{n}$ and $\widetilde{m}$ , respectively. Then, we explore and assign micro nodes to the appropriate macro node based on their behavior patterns, and update the centroid of macro nodes
199
+
200
+ iteratively. The process can be expressed as:
201
+
202
+ $$
203
+ \boldsymbol {\mu} _ {k} = \frac {1}{| C _ {k} |} \sum_ {x _ {v} = k, \boldsymbol {b} _ {v} \in C _ {k}} \boldsymbol {b} _ {v}, \tag {3}
204
+ $$
205
+
206
+ where $|C_k|$ is the number of micro nodes within $C_k$ , and $x_v$ is the macro node index that $v$ is assigned to. Further, the optimization objection of the behavior pattern grouping is:
207
+
208
+ $$
209
+ \begin{array}{l} \min_{\substack{x_{1},\ldots ,x_{m + n}\\ \mu_{1},\ldots ,\mu_{K}}}J\left(x_{1},\ldots ,x_{m + n};\boldsymbol{\mu}_{1},..,\boldsymbol{\mu}_{K}\right) \\ \triangleq \sum_ {k = 1} ^ {K} \sum_ {x _ {v} = k, \boldsymbol {b} _ {v} \in C _ {k}} \sqrt {\left(\boldsymbol {b} _ {v} - \boldsymbol {\mu} _ {k}\right) \left(\boldsymbol {b} _ {v} - \boldsymbol {\mu} _ {k}\right) ^ {\top}}. \tag {4} \\ \end{array}
210
+ $$
211
+
212
+ where $J$ is the objection function of behavior pattern grouping. As shown in Figure 2, the micro nodes with similar behavior patterns will be composed of a macro node. Note that each macro node $v$ will also be assigned a trainable embedding $\widetilde{E}_v \in \mathbb{R}^d$ .
213
+
214
+ 3.1.2 Organizing Macro Edges. Macro edges depict relationships between two macro nodes within a specific user/item subgraph, signifying the behavioral patterns within that subgraph. It's important to note that macro edges have a distinct design compared to micro edges. The micro edges present connections between fixed micro user nodes and micro item nodes. Since micro nodes remain constant, the micro edges are also fixed. In contrast, macro edges capture the connection strength between two macro nodes in a subgraph, which is tailored to each user and item subgraph.
215
+
216
+ In Figure 2, the user $v$ is depicted as having two 1-hop macro nodes, each with macro edge weights of 4 and 3, respectively. Moving to the second hop, the user extends to three macro nodes, and these macro edges represent the connections between the 1-hop macro nodes and the 2-hop macro node. Formally, we use $\widetilde{C} = \{C_1,C_2,\dots ,C_{\widetilde{n} +\widetilde{m}}\}$ to represent the entire set of macro nodes in the MAG. We employ $\widetilde{\mathcal{R}}_{v;p,q}^{(k)}$ to denote the macro edge for any user/item node $v$ with its $k^{th}$ -hop neighbors, where $C_{v;p}^{(k - 1)}$ represents the macro node in $(k - 1)^{th}$ -hop macro neighbors $\widetilde{\mathcal{N}}_v^{(k - 1)}$ , and $C_{v;q}^{(k)}$ represents the macro node in $k^{th}$ -hop macro neighbors $\widetilde{\mathcal{N}}_v^{(k)}$ . Thus the weight of macro edges can be computed as:
217
+
218
+ $$
219
+ \widetilde {\mathcal {R}} _ {v; p, q} ^ {(k)} = \sum_ {a \in C _ {v; p} ^ {(k - 1)}, b \in C _ {v; q} ^ {(k)}} r _ {a b}, \tag {5}
220
+ $$
221
+
222
+ where $C_{v;p}^{(k-1)} = C_{v;p} \cap \mathcal{N}_v^{(k-1)}$ represents the macro nodes related to node $v$ within its $(k-1)^{th}$ -hop neighbors and $C_{v;q}^{(k)} = C_{v;q} \cap \mathcal{N}_v^{(k)}$ represents the macro nodes related to node $v$ within its $k^{th}$ -hop neighbors. In § 3.3, we will introduce how to get online updating macro edges on billion-scale recommender systems. Finally, after transforming micro recommendation graphs into macro recommendation graphs, MAGs have significantly fewer nodes and edges by extracting behavior patterns explicitly into macro nodes.
223
+
224
+ # 3.2 Macro Graph Neural Network
225
+
226
+ 3.2.1 Macro Weight Modeling. The overall framework of our proposed MacGNN is shown in Figure 3. To better identify the target user/item preferences over a certain macro node, we design
227
+
228
+ ![](images/1791e8ab75684eadbf4f1bd143822c4840b2e5f3398fdb51b1cf134311f67dd4.jpg)
229
+ Figure 3: The model architecture of the proposed MacGNN.
230
+
231
+ the macro weight modeling for macro neighbors according to the weights of connected macro edges.
232
+
233
+ In order to avoid the excessive gap between the macro edge weights of hot nodes and cold nodes and conduct modeling flexibly, we equip the macro weight modeling with logarithmic smoothing and temperature-based softmax activation. Formally, take the target user/item $v$ as an example, given a macro node $q$ in its $k^{th}$ -hop neighborhood, the macro weight $w_{v;q}^{(k)}$ of $q$ toward the target user/item $v$ is calculated as:
234
+
235
+ $$
236
+ s _ {v; q} ^ {(k)} = \log \left(\sum_ {p \in \widetilde {\mathcal {N}} _ {v} ^ {(k - 1)}} \widetilde {\mathcal {R}} _ {v; p, q} ^ {(k)} + 1\right), \quad w _ {v; q} ^ {(k)} = \frac {\exp \left(s _ {v ; q} ^ {(k)} / \tau\right)}{\sum_ {j \in \widetilde {\mathcal {N}} _ {v} ^ {(k)}} \exp \left(s _ {v ; j} ^ {(k)} / \tau\right)}, \tag {6}
237
+ $$
238
+
239
+ where $\tau$ is a temperature coefficient hyper-parameter [7]. These modeled weights represent the importance of these macro neighboring nodes in the target user/item's historical interactions.
240
+
241
+ 3.2.2 Marco Neighbor Aggregation & Layer Readout. To mine the macro relationships effectively and efficiently, we first design a macro neighbor aggregation architecture rather than a time-consuming recursive graph convolution. Then, we propose the macro layer readout to aggregate the macro information of the target user and item.
242
+
243
+ Macro Neighbor Aggregation. Due to the different semantics of users and items, we utilized two separate macro neighbor aggregation modules without parameter sharing for user-type macro nodes and item-type macro nodes, respectively.
244
+
245
+ For user-type target nodes and their $k^{th}$ -hop user-type macro neighbors, the aggregation function $MNA_{u}$ can be defined as:
246
+
247
+ $$
248
+ \begin{array}{l} M N A _ {u} (u, p \in \widetilde {\mathcal {N}} _ {u} ^ {(k)}, E _ {u}, \widetilde {E} _ {p}, \widetilde {\mathcal {N}} _ {u} ^ {(k)}; Q _ {u}, K _ {u}, V _ {u}) \\ \triangleq \sum_ {p \in \widetilde {\mathcal {N}} _ {u} ^ {(k)}} \sigma (\langle Q _ {u} \cdot \widetilde {E} _ {p}, K _ {u} \cdot E _ {u} \rangle) \cdot V _ {u} \cdot \widetilde {E} _ {p}, \tag {7} \\ \end{array}
249
+ $$
250
+
251
+ where $Q_{u}, K_{u}, V_{u} \in \mathbb{R}^{d \times d'}$ are trainable self-attention matrices for user-type nodes, $\langle \cdot \rangle$ is the inner product function, and $\sigma(\cdot)$ is the softmax activation function. Specifically, given the given target user $u$ and a user-type macro node $p$ in its $k^{th}$ -hop neighborhood (such
252
+
253
+ as the node in target user $u$ 's 2-hop macro neighborhood and target item $i$ 's 1-hop macro neighborhood), the process is expressed as:
254
+
255
+ $$
256
+ \alpha_ {u, p} = \frac {\exp \left((Q _ {u} \cdot \widetilde {E} _ {p}) (K _ {u} \cdot E _ {u}) ^ {\top} / \sqrt {d}\right)}{\sum_ {j \in \widetilde {\mathcal {N}} _ {u} ^ {(k)}} \exp \left((Q _ {u} \cdot \widetilde {E} _ {j}) (K _ {u} \cdot E _ {u}) ^ {\top} / \sqrt {d}\right)}, \tag {8}
257
+ $$
258
+
259
+ $$
260
+ \widetilde {Z} _ {u, p} = \alpha_ {u, p} \cdot \left(V _ {u} \cdot \widetilde {E} _ {p}\right), \tag {9}
261
+ $$
262
+
263
+ where $\widetilde{Z}_{u,p}$ is the aggregated macro embedding. Similarly, for the item-type target node $i$ and its macro item-type neighbor $p$ in the $k^{th}$ -hop neighborhood, the aggregation function $MNA_{i}$ to obtain the aggregated macro embedding $\widetilde{Z}_{i,q}$ can be derived in similar ways using separating parameters as:
264
+
265
+ $$
266
+ \begin{array}{l} M N A _ {i} (i, q \in \widetilde {\mathcal {N}} _ {i} ^ {(k)}, E _ {i}, \widetilde {E} _ {q}, \widetilde {\mathcal {N}} _ {i} ^ {(k)}; Q _ {i}, K _ {i}, V _ {i}) \\ \triangleq \sum_ {q \in \widetilde {\mathcal {N}} _ {i} ^ {(k)}} \sigma (\langle Q _ {i} \cdot \widetilde {E} _ {q}, K _ {i} \cdot E _ {i} \rangle) \cdot V _ {i} \cdot \widetilde {E} _ {q}, \tag {10} \\ \end{array}
267
+ $$
268
+
269
+ where $Q_{i}, K_{i}, V_{i} \in \mathbb{R}^{d \times d'}$ are trainable self-attention matrices for item-type nodes.
270
+
271
+ Macro Layer Readout. With the co-consideration of macro weight modeling and macro neighbor aggregation, we can measure the importance of the specific neighboring macro node from different perspectives. Thus, the representation of a specific-hop macro neighborhood of the target user/item node can be obtained by the following layer readout:
272
+
273
+ $$
274
+ E _ {u} ^ {\left(l _ {u}\right)} = \sum_ {j \in \widetilde {\mathcal {N}} _ {u} ^ {\left(l _ {u}\right)}} w _ {u, j} \cdot \widetilde {Z} _ {u, j}, \quad E _ {i} ^ {\left(l _ {i}\right)} = \sum_ {j \in \widetilde {\mathcal {N}} _ {i} ^ {\left(l _ {i}\right)}} w _ {i, j} \cdot \widetilde {Z} _ {i, j}, \tag {11}
275
+ $$
276
+
277
+ where $E_{u}^{(l_{u})}$ and $E_{i}^{(l_{i})}$ denote the $l_{u}$ -hop/ $l_{i}$ -hop readout representation of target user/item, respectively.
278
+
279
+ 3.2.3 Recent Behavior Modeling. The above macro modeling takes into account the general and stable behavioral characteristics of the target node. Leveraging the learned knowledge at such a macro level, we further consider the information of recent behavior to better extract users' changing short-term interests and the evolving interaction patterns of items [11, 29].
280
+
281
+ Formally, for the target user $u$ and target item $i$ , the few most recently interacted neighbor sequence $RS_{u}$ and $RS_{i}$ are utilized and their embeddings are co-trained with the macro nodes in the above aggregation functions, respectively.
282
+
283
+ $$
284
+ \begin{array}{l} Z _ {u, r s _ {p}} = M N A _ {i} \left(i, r s _ {p} \in R S _ {u}, E _ {i}, E _ {r s _ {p}}, R S _ {u}; Q _ {i}, K _ {i}, V _ {i}\right), \tag {12} \\ Z _ {i, r s _ {q}} = M N A _ {u} (u, r s _ {q} \in R S _ {i}, E _ {u}, E _ {r s _ {q}}, R S _ {i}; Q _ {u}, K _ {u}, V _ {u}), \\ \end{array}
285
+ $$
286
+
287
+ $$
288
+ E _ {i} ^ {r s} = \sum_ {r s _ {q} \in R S _ {i}} Z _ {i, r s _ {q}}, \quad E _ {u} ^ {r s} = \sum_ {r s _ {p} \in R S _ {u}} Z _ {u, r s _ {p}}, \tag {13}
289
+ $$
290
+
291
+ where $E_{u}^{rs}$ and $E_{i}^{rs}$ are the representation of the few macro node sequence $RS_{u}$ and $RS_{i}$ . The sequence length of the few recent behaviors for auxiliary training is set to 20. Note that the number of recent nodes for modeling is much smaller than the hundreds of sequence lengths in the advanced interest models [40, 41].
292
+
293
+ ![](images/9038f7b708198d8883dd71efe01918e7d1bbf6c35c3416f174785debd889ddd9.jpg)
294
+ Figure 4: The system architecture for online deployment.
295
+
296
+ 3.2.4 CTR Prediction Layer. With the obtained informative representations, we utilize them for the final CTR prediction for the target user $u$ and target item $i$ as the following calculation:
297
+
298
+ $$
299
+ \hat {y} _ {u, i} = M L P \left(\left(\left\| _ {l _ {u}} ^ {K} E _ {u} ^ {\left(l _ {u}\right)} \right\|\right) \left\| \left\| _ {l _ {i}} ^ {K} E _ {i} ^ {\left(l _ {i}\right)} \right\|\right) \left\| E _ {u} ^ {r s} \right\| \left\| E _ {i} ^ {r s} \right\| \left\| E _ {u} \right\| \left\| E _ {i} \right.\right), \tag {14}
300
+ $$
301
+
302
+ where the architecture and parameter settings of the MLP are the same as previous works [40, 41].
303
+
304
+ To train and optimize the model parameters, we apply the binary cross-entropy loss as the model objective function. Formally, for each user-item pair $(u,i)$ in training set $\mathcal{T}\mathcal{S}$ , the adopted objective function can be expressed as:
305
+
306
+ $$
307
+ \mathcal {L} _ {b c e} = - \frac {1}{| \mathcal {T} \mathcal {S} |} \sum_ {(u, i) \in \mathcal {T} \mathcal {S}} y _ {u, i} \log (\hat {y} _ {u, i}) + (1 - y _ {u, i}) \log (1 - \hat {y} _ {u, i}), \tag {15}
308
+ $$
309
+
310
+ where $\hat{y}_{u,i}$ is the predicted CTR and $y_{u,i}$ is the ground-truth label. Then, the overall objective function of MacGNN is as follows:
311
+
312
+ $$
313
+ \mathcal {L} = \mathcal {L} _ {b c e} + \lambda \cdot \| \boldsymbol {\theta} \| _ {2} ^ {2}, \tag {16}
314
+ $$
315
+
316
+ where $\lambda \cdot \| \pmb{\theta}\|_2^2$ denotes the $L_{2}$ regularization to avoid over-fitting.
317
+
318
+ # 3.3 Online Implementation
319
+
320
+ In this section, we present the online deployment of MacGNN on a leading e-commerce platform's homepage. MacGNN has provided stable and precise recommendations to over 1 billion users and 2 billion items, analyzing more than 12 trillion interactions since August 2023.
321
+
322
+ The core architecture to implement the proposed MacGNN model is presented in Fig. 4, including the workflow of both offline computing and online serving. Offline computing can compute the necessary embeddings and graph structures without affecting the online service. Specifically, offline computing is based on a distributed machine learning platform, which loads log data to train the model parameters and embeddings. Then the learned user/item embedding and the macro node embedding are uploaded to the graph feature center for online serving.
323
+
324
+ Another job of offline computing is the graph structure updates. For example, during shopping events like Black Friday or Singles' Day, certain popular items can receive billions of clicks within seconds. In such scenarios, we employ two modules to facilitate graph structure updates. The stock micro edges are computed offline on a
325
+
326
+ # 4 EXPERIMENTS
327
+
328
+ # 4.1 Experimental Setup
329
+
330
+ daily basis (or even hourly if necessary). Meanwhile, the incremental micro edges store the micro edges generated in real-time. Since the macro edge weights (Eq. (5)) are defined through summation, the complete micro edge weights can be computed by adding the stock macro edge weights and the incremental macro edge weights.
331
+ With the help of offline computing, during the online inferring process, MacGNN can directly get the macro edges through the graph structure center and get the macro node embeddings through the graph feature center. Since MacGNN only considers the macro node, we can give the upper bound of the related node number as $O((\widetilde{n} + \widetilde{m}))$ . On the contrary, the expected related node number of traditional micro GNNs can be given as $O(\frac{|\mathcal{R}|^2}{m \times n})$ . Specifically, we construct 200 macro nodes for users and 300 macro nodes for items. Then the micro GNNs will consider about 6 million times more nodes of the MacGNN if micro GNNs consider all the micro nodes in the billion-scale recommender system.
332
+ In this section, we conduct comprehensive experiments on both offline datasets and real-world online recommendation systems, aiming to answer the following research questions. RQ1: How does MacGNN perform compared to state-of-the-art models? RQ2: How efficient is the proposed MacGNN? RQ3: What is the effect of different components in MacGNN? RQ4: How does MacGNN perform on billion-scale real-world recommendation platforms?
333
+ 4.1.1 Datasets. We conduct comprehensive experiments on three widely used benchmark datasets MovieLens [17], Electronics [22], and Kuaishou [13], and one billion-scale industrial dataset from Alibaba, one of the biggest shopping platforms in China, to verify the effectiveness of MacGNN. The statistics of these datasets are shown in Table 1. The detailed description of these datasets is illustrated in Appendix A.
334
+ 4.1.2 Competitors. To evaluate the effectiveness of MacGNN, we compare it with twelve representative state-of-the-art CTR prediction models into three main groups. (i) Feature Interaction-based Methods: Wide&Deep [8], DeepFM [15], AFM [33], and NFM [18]. (ii) User Interest-based Methods: DIN [41], DIEN [40], UBR4CTR [26], and SIM [25]. (iii) Graph-based Methods: PinSage [35], Light-GCN [19], GLSM [29], and GMT [23]. We leave the details of these baseline models in Appendix B.
335
+ 4.1.3 Hyperparameter Setting. For all models, the embedding size is fixed to 10 and the embedding parameters are initialized with the Xavier method [14] for fair comparison. Shapes of the final MLP for all models are set to [200, 80, 2] as previous works [40, 41]. The learning rate of MacGNN is searched from $\{1 \times 10^{-2}, 5 \times 10^{-3}, 1 \times 10^{-3}\}$ , the regularization term $\lambda$ is searched from $\{1 \times 10^{-4}, 5 \times 10^{-5}, 1 \times 10^{-5}\}$ . The batch size is set to 1024 for all models and the Adam optimizer is used [21].
336
+ 4.1.4 Evaluation Metrics. We evaluate the models with three widely-adopted CTR prediction metrics including AUC [12], GAUC [41], and Logloss [6]. The higher AUC and GAUC values indicate higher CTR prediction performance, and the lower Logloss value indicates
337
+
338
+ Table 1: Statistics of the experimental datasets.
339
+
340
+ <table><tr><td>Dataset</td><td># Users</td><td># Items</td><td># Interactions</td><td># Categories</td></tr><tr><td>MovieLens</td><td>71,567</td><td>10,681</td><td>10,000,054</td><td>21</td></tr><tr><td>Electronics</td><td>192,403</td><td>63,001</td><td>1,689,188</td><td>801</td></tr><tr><td>Kuaishou</td><td>7,176</td><td>10,728</td><td>12,530,806</td><td>31</td></tr><tr><td>Industrial</td><td>170,000,000</td><td>310,000,000</td><td>118,000,000,000</td><td>27,452</td></tr></table>
341
+
342
+ higher CTR prediction performance. Note that we run all the experiments five times with different random seeds and report the average results with standard deviation to prevent extreme cases.
343
+
344
+ # 4.2 Offline Evaluation (RQ1)
345
+
346
+ In this subsection, we compare our proposed MacGNN with twelve state-of-the-art baseline models on the four experimental datasets. The comparison results on the AUC and GAUC metrics are reported in Table 2 and Table 3, with the following observations:
347
+
348
+ MacGNN can achieve significant improvements over state-of-the-art methods on all experimental datasets. From the tables, we observe that the proposed MacGNN achieves the highest AUC and GAUC performance and the lowest Logloss results. Specifically, for the Logloss metric, MacGNN outperforms the best baseline by $1.95\%$ , $4.10\%$ , $0.71\%$ , and $0.93\%$ on MovieLens, Electronics, Kuaishou, and the industrial dataset, respectively. For all the AUC, GAUC, and Logloss metrics, MacGNN brings effective gains of $1.00\%$ , $0.93\%$ , and $1.70\%$ on average respectively. These comparison results verify that taking into account the graph information in a macro perspective of MacGNN contributes to achieving better interest modeling and CTR prediction performance.
349
+
350
+ The graph-based methods perform relatively well than other types of baseline models. Comparing the three main categories of baseline models, we can find the graph-based models (i.e. PinSage, LightGCN, GLSM, and GMT) obtain relatively better results than user interest modeling and feature interaction methods, which indicates that apart from the directly interacted neighborhood, incorporating high-order graph information can reflect the useful implicit preferences of the target user-item pair, and is significant for the overall CTR prediction performance.
351
+
352
+ Increasing the modeling range through node sampling does not necessarily bring effective gains in all scenarios. These results show that applying node sampling-based methods (e.g. UBR4CTR, SIM, and GLSM) to consider behaviors does not consistently bring improvements to the performance. This suggests that modeling node interests by only searching and sampling similar nodes based on certain rules may not be accurate enough. Additionally, retrieving neighbors beyond the 1-hop using GLSM resulted in relatively better performance compared to UBR4CTR and SIM, also indicating that the higher-order interaction information is meaningful. The designed macro graph paradigm of MacGNN avoids this issue, which is an important factor contributing to its optimal performance.
353
+
354
+ # 4.3 Efficiency Study (RQ2)
355
+
356
+ Since CTR prediction has to infer the user's intent in real-time and thus the computational efficiency of models is also an important evaluation factor [30]. Hence, to verify the efficiency of MacGNN,
357
+
358
+ Table 2: CTR prediction comparison results over five trial runs (↑: the higher, the better; ↓: the lower, the better). The best baseline(s) are highlighted with underlining.
359
+
360
+ <table><tr><td rowspan="2">Model</td><td colspan="3">MovieLens</td><td colspan="3">Electronics</td><td colspan="3">Kuaishou</td></tr><tr><td>AUC (↑)</td><td>GAUC (↑)</td><td>Logloss (↓)</td><td>AUC (↑)</td><td>GAUC (↑)</td><td>Logloss (↓)</td><td>AUC (↑)</td><td>GAUC (↑)</td><td>Logloss (↓)</td></tr><tr><td>Wide&amp;Deep</td><td>0.7237±0.0008</td><td>0.6922±0.0009</td><td>0.6072±0.0020</td><td>0.8242±0.0009</td><td>0.8247±0.0008</td><td>0.5132±0.0033</td><td>0.8202±0.0023</td><td>0.7761±0.0006</td><td>0.4922±0.0025</td></tr><tr><td>DeepFM</td><td>0.7215±0.0015</td><td>0.6910±0.0011</td><td>0.6080±0.0026</td><td>0.8064±0.0028</td><td>0.8066±0.0028</td><td>0.5352±0.0081</td><td>0.8207±0.0014</td><td>0.7753±0.0007</td><td>0.4922±0.0023</td></tr><tr><td>AFM</td><td>0.7199±0.0008</td><td>0.6884±0.0007</td><td>0.6091±0.0013</td><td>0.7995±0.0008</td><td>0.7999±0.0009</td><td>0.5330±0.0008</td><td>0.8184±0.0034</td><td>0.7731±0.0049</td><td>0.4969±0.0041</td></tr><tr><td>NFM</td><td>0.7156±0.0039</td><td>0.6850±0.0042</td><td>0.6171±0.0078</td><td>0.8044±0.0009</td><td>0.8049±0.0009</td><td>0.5372±0.0033</td><td>0.8186±0.0045</td><td>0.7717±0.0022</td><td>0.4951±0.0040</td></tr><tr><td>DIN</td><td>0.7248±0.0010</td><td>0.6974±0.0005</td><td>0.6143±0.0043</td><td>0.8295±0.0026</td><td>0.8307±0.0030</td><td>0.5186±0.0028</td><td>0.8208±0.0019</td><td>0.7792±0.0005</td><td>0.4978±0.0031</td></tr><tr><td>DIEN</td><td>0.7262±0.0010</td><td>0.6958±0.0009</td><td>0.6112±0.0020</td><td>0.8313±0.0031</td><td>0.8323±0.0027</td><td>0.5167±0.0056</td><td>0.8273±0.0016</td><td>0.7783±0.0009</td><td>0.4943±0.0054</td></tr><tr><td>UBR4CTR</td><td>0.7245±0.0002</td><td>0.6943±0.0010</td><td>0.6233±0.0076</td><td>0.8300±0.0005</td><td>0.8299±0.0006</td><td>0.5056±0.0007</td><td>0.8266±0.0005</td><td>0.7799±0.0006</td><td>0.4907±0.0020</td></tr><tr><td>SIM</td><td>0.7255±0.0014</td><td>0.6950±0.0012</td><td>0.6254±0.0094</td><td>0.8296±0.0033</td><td>0.8305±0.0031</td><td>0.5186±0.0062</td><td>0.8273±0.0005</td><td>0.7800±0.0005</td><td>0.4906±0.0021</td></tr><tr><td>PinSage</td><td>0.7298±0.0017</td><td>0.7069±0.0017</td><td>0.6121±0.0039</td><td>0.8136±0.0027</td><td>0.8133±0.0027</td><td>0.5269±0.0078</td><td>0.8163±0.0019</td><td>0.7810±0.0006</td><td>0.5037±0.0041</td></tr><tr><td>LightGCN</td><td>0.7305±0.0009</td><td>0.7077±0.0012</td><td>0.6122±0.0061</td><td>0.8329±0.0011</td><td>0.8333±0.0010</td><td>0.5101±0.0049</td><td>0.8139±0.0019</td><td>0.7803±0.0014</td><td>0.5068±0.0041</td></tr><tr><td>GLSM</td><td>0.7320±0.0003</td><td>0.7096±0.0007</td><td>0.6088±0.0035</td><td>0.8318±0.0026</td><td>0.8324±0.0026</td><td>0.5112±0.0066</td><td>0.8170±0.0012</td><td>0.7811±0.0004</td><td>0.5031±0.0059</td></tr><tr><td>GMT</td><td>0.7353±0.0014</td><td>0.7097±0.0010</td><td>0.6003±0.0023</td><td>0.8313±0.0020</td><td>0.8322±0.0024</td><td>0.5110±0.0083</td><td>0.8215±0.0018</td><td>0.7803±0.0017</td><td>0.4981±0.0020</td></tr><tr><td>MacGNN</td><td>0.7458±0.0006</td><td>0.7198±0.0007</td><td>0.5886±0.0027</td><td>0.8444±0.0009</td><td>0.8458±0.0008</td><td>0.4892±0.0040</td><td>0.8306±0.0013</td><td>0.7813±0.0010</td><td>0.4872±0.0026</td></tr></table>
361
+
362
+ Table 3: Comparison results on the industrial dataset.
363
+
364
+ <table><tr><td>Industrial</td><td>AUC (↑)</td><td>GAUC (↑)</td><td>Logloss (↓)</td></tr><tr><td>Wide&amp;deep</td><td>0.8123±0.0021</td><td>0.6908±0.0024</td><td>0.5223±0.0009</td></tr><tr><td>DeepFM</td><td>0.8169±0.0012</td><td>0.6982±0.0036</td><td>0.5202±0.0018</td></tr><tr><td>AFM</td><td>0.8103±0.0008</td><td>0.6866±0.0021</td><td>0.5301±0.0020</td></tr><tr><td>NFM</td><td>0.8112±0.0031</td><td>0.6823±0.0043</td><td>0.5286±0.0032</td></tr><tr><td>DIN</td><td>0.8225±0.0017</td><td>0.6963±0.0013</td><td>0.5022±0.0012</td></tr><tr><td>DIEN</td><td>0.8231±0.0042</td><td>0.7008±0.0018</td><td>0.5009±0.0021</td></tr><tr><td>UBR4CTR</td><td>0.8263±0.0037</td><td>0.7019±0.0032</td><td>0.4931±0.0019</td></tr><tr><td>SIM</td><td>0.8313±0.0025</td><td>0.7103±0.0010</td><td>0.4902±0.0008</td></tr><tr><td>PinSage</td><td>0.8289±0.0036</td><td>0.7086±0.0031</td><td>0.4917±0.0017</td></tr><tr><td>LightGCN</td><td>0.8309±0.0006</td><td>0.7093±0.0018</td><td>0.4909±0.0012</td></tr><tr><td>GLSM</td><td>0.8326±0.0053</td><td>0.7149±0.0039</td><td>0.4887±0.0029</td></tr><tr><td>GMT</td><td>0.8343±0.0022</td><td>0.7178±0.0033</td><td>0.4862±0.0021</td></tr><tr><td>MacGNN</td><td>0.8408±0.0019</td><td>0.7233±0.0014</td><td>0.4817±0.0013</td></tr></table>
365
+
366
+ we compare the average online response time per user-item pair between MacGNN and the well-performed and representative baselines: feature interaction-based model Wide&Deep, user interest-based model DIN and the node searching scheme SIM, graph-based recursively convolution method LightGCN and graph transformer-based method GMT. Note that we present the online inference time on real-world recommender systems.
367
+
368
+ The comparison result is shown in Figure 5. From the figure, we have the following observations: (i) The proposed model is almost as efficient as the fastest Wide&Deep model. Apart from Wide&Deep, our model achieves the best performance and efficiency among all user interest-based models and graph-based models. (ii) While graph models employ sampling strategies to expedite the inference process, LightGCN and GMT are the two slowest models. Particularly on online platforms, LightGCN and GMT require nearly three times and two times the online inference time of MacGNN, leading to a significant online burden for billion-scale systems.
369
+
370
+ # 4.4 Ablation Study (RQ3)
371
+
372
+ To verify the effectiveness of the key designed components and modeled information in MacGNN, we conduct the ablation study by comparing MacGNN with its four variants: (1) w/o weighting
373
+
374
+ ![](images/62b7e9085b42d3501cb0be5a356c663e809ed72c20dd0c7af4ed61ee90e38d6e.jpg)
375
+ Figure 5: Efficiency study of the model inference time.
376
+
377
+ Table 4: Ablation study results between MacGNN with its four variants on MovieLens and Electronics.
378
+
379
+ <table><tr><td></td><td>Variant</td><td>AUC (↑)</td><td>GAUC (↑)</td><td>Logloss (↓)</td></tr><tr><td rowspan="5">MovieLens</td><td>MacGNN</td><td>0.7458±0.0006</td><td>0.7198±0.0007</td><td>0.5886±0.0027</td></tr><tr><td>w/o weighting</td><td>0.7396±0.0013</td><td>0.7132±0.0009</td><td>0.5923±0.0037</td></tr><tr><td>w/o recent</td><td>0.7212±0.0005</td><td>0.6936±0.0009</td><td>0.6176±0.0052</td></tr><tr><td>w/o highorder</td><td>0.7401±0.0004</td><td>0.7126±0.0009</td><td>0.5929±0.0030</td></tr><tr><td>w/o itemgraph</td><td>0.7239±0.0002</td><td>0.6871±0.0007</td><td>0.6073±0.0032</td></tr><tr><td rowspan="5">Electronics</td><td>MacGNN</td><td>0.8444±0.0009</td><td>0.8458±0.0008</td><td>0.4892±0.0040</td></tr><tr><td>w/o weighting</td><td>0.8418±0.0006</td><td>0.8417±0.0006</td><td>0.4938±0.0032</td></tr><tr><td>w/o recent</td><td>0.8316±0.0010</td><td>0.8333±0.0008</td><td>0.5127±0.0037</td></tr><tr><td>w/o highorder</td><td>0.8302±0.0003</td><td>0.8319±0.0005</td><td>0.5083±0.0033</td></tr><tr><td>w/o itemgraph</td><td>0.8189±0.0005</td><td>0.8199±0.0007</td><td>0.5259±0.0043</td></tr></table>
380
+
381
+ removes the macro weight modeling module in MacGNN, which ignores the macro edge weights. (2) w/o recent removes the recent behavior modeling scheme in MacGNN, of which the short-term pattern modeling. (3) w/o highorder excludes the high-order graph information of the target user and item for MacGNN training and the final prediction. (4) w/o itemgraph excludes the target item's graph information for MacGNN training and prediction, which is largely ignored by previous works due to the efficiency trade-off. From Table 4., we have the following observations:
382
+
383
+ Effectiveness of key designed components. (i) The lack of consideration of the macro edge weight results in the inferior performance of $w/o$ weighting, as the macro edge intensity can reflect the behavior pattern of users and items. (ii) The removal of recent behavior may impact the recommendation performance of $w/o$ recent in comparison to MacGNN. This underscores the importance of taking recent behaviors into account from a macro perspective.
384
+
385
+ Effectiveness of key modeled information. (i) The decline in the performance of $w/o$ highorder relative to MacGNN due to the neglect of high-order neighbors indicates the significance of graph information, and considering it from a macroscopic perspective is effective. (ii) The substantial performance gap between $w/o$ itemgraph and MacGNN highlights the significance of considering item-side graphs. Nonetheless, traditional CTR models tend to discard them due to computational constraints.
386
+
387
+ We have also conducted key parameter studies and case studies of MacGNN, which are left in Appendix C.
388
+
389
+ # 4.5 Online Evaluation (RQ4)
390
+
391
+ We have deployed MacGNN and conducted the online A/B test in Taobao's homepage feed of Alibaba, one of the biggest online shopping platforms in China. The online performance is compared against the best-performed user interest-based model SIM and the sampling-based graph model GMT. The performance in Table 5 is averaged over four consecutive weeks.
392
+
393
+ Compared to SIM, firstly, MacGNN demonstrates a performance improvement of $3.13\%$ for PCTR, $1.32\%$ for UCTR, and $5.13\%$ for GMV, suggesting that our model enhances users' willingness to engage with items and convert to purchases. Secondly, the Stay Time increases by $1.01\%$ , indicating that MacGNN can effectively engage users, encouraging them to spend more time on the platform by catering to their comprehensive macro behavior interests. Thirdly, MacGNN achieves a Response Time that is $20.97\%$ faster than SIM, showing that MacGNN achieves significantly improved performance and enhanced efficiency.
394
+
395
+ Compared to GMT, MacGNN still demonstrates a performance improvement of $2.35\%$ for PCTR, $1.09\%$ for UCTR, and $3.53\%$ for GMV. This suggests that taking into account the complete macro behavior patterns of users and items can yield significantly better performance than considering only a small portion of sampled neighbors. Furthermore, the Stay Time increases by $0.69\%$ , indicating that MacGNN encourages users to stay by considering more comprehensive behavior patterns. Lastly, MacGNN's Response Time is $38.13\%$ faster than SIM, confirming the efficiency of MAG.
396
+
397
+ # 5 RELATED WORK
398
+
399
+ # 5.1 Click-Through Rate Prediction
400
+
401
+ Tradition models utilize feature interaction for CTR prediction [37, 43]. FM [27] first introduces the latent vectors for 2-order feature interaction to address the feature sparsity. Wide&Deep [8] conducts feature interaction by a wide linear regression model and a deep feed-forward network with joint training. DeepFM [15] further replaces the linear regression in Wide&Deep with FM to avoid feature engineering. Recently, user interest-based models have achieved better CTR performance. DIN [41] first designs a deep interest network with an attention mechanism between the user's behavior
402
+
403
+ Table 5: Results of online A/B tests in the industrial platform.
404
+
405
+ <table><tr><td>A/B Test</td><td>PCTR</td><td>UCTR</td><td>GMV</td><td>StayTime</td><td>ResTime</td></tr><tr><td>v.s. SIM</td><td>+3.13%</td><td>+1.32%</td><td>+5.13%</td><td>+1.01%</td><td>-20.97%</td></tr><tr><td>v.s. GMT</td><td>+2.35%</td><td>+1.09%</td><td>+3.53%</td><td>+0.69%</td><td>-38.13%</td></tr></table>
406
+
407
+ sequence and the target item. DIEN [40] then enhances DIN with GRU [9] for user's evolution patterns mining. Further, some studies aim to model longer behavior sequences with sampling [3, 38]. UBR4CTR [26] samples the most relevant item from the user's long interaction behaviors. Similarly, SIM [25] designs a two-stage paradigm, sampling relevant items and computing their attention score with the target, to reduce the scale of the user's complete behaviors.
408
+
409
+ # 5.2 Graph Learning for Recommendation
410
+
411
+ Recently, massive works have attempted to improve recommendation performance through graph learning methods [4, 20, 28]. Typically, NGCF [31] enhances traditional collaborative filtering with high-order graph information. LightGCN [19] then removes the non-linear operation in NGCF, which is drawn from the observation of extensive experimental analysis. These methods have been widely used for appropriate item recalling in industrial recommender systems. However, due to the strict requirements for time efficiency, they cannot be applied directly as CTR prediction models. Then, some advances try to consider the graph information in the CTR scenario but they still maintain the node sampling paradigm. GLSM [29] conducts relevant node retrieval of the central user from the interaction graph for long-term interest modeling. GMT [23] constructs a heterogeneous information network (HIN) with sampled various types of user interactions and designs a graph-masked transformer for user modeling. NRCGI [1] further models the deeper graph information with cluster-scale sampled neighborhoods based on non-recursive aggregation.
412
+
413
+ # 6 CONCLUSION
414
+
415
+ In this paper, we introduce the Macro Recommendation Graph (MAG) and Macro Graph Neural Networks (MacGNN) for billion-scale recommender systems, offering a more suitable solution to the prevalent issues of computational complexity and sampling bias in conventional GNN recommendation models. By ingeniously grouping micro nodes into macro nodes, MAG allows for efficient computation, while MacGNN facilitates effective information aggregation and embedding refinement at a macro level. Demonstrating superior performance in both offline experiments and online A/B tests, and practically serving over a billion users in Alibaba, MacGNN not only elevates the capability of predictive models in expansive digital environments but also paves the way for future research and optimizations in the realm of large-scale recommendation systems.
416
+
417
+ # ACKNOWLEDGMENTS
418
+
419
+ This work was supported in part by the Scientific Innovation 2030 Major Project for New Generation of AI (Grant No. 2020AAA0107300), Ministry of Science and Technology of the People's Republic of China, and the National Natural Science Foundation of China (Grant No. 62272200, U22A2095, 61932010, 62172443).
420
+
421
+ # REFERENCES
422
+
423
+ [1] Yuanchen Bei, Hao Chen, Shengyuan Chen, Xiao Huang, Sheng Zhou, and Feiran Huang. 2023. Non-Recursive Cluster-Scale Graph Interacted Model for Click-Through Rate Prediction. In Proceedings of the 32nd ACM International Conference on Information and Knowledge Management. 3748-3752.
424
+ [2] Yuanchen Bei, Sheng Zhou, Qiaoyu Tan, Hao Xu, Hao Chen, Zhao Li, and Jiajun Bu. 2023. Reinforcement Neighborhood Selection for Unsupervised Graph Anomaly Detection. In 2023 IEEE International Conference on Data Mining (ICDM).
425
+ [3] Yue Cao, Xiaojiang Zhou, Jiaqi Feng, Peihao Huang, Yao Xiao, Dayao Chen, and Sheng Chen. 2022. Sampling Is All You Need on Modeling Long-Term User Behaviors for CTR Prediction. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 2974-2983.
426
+ [4] Hao Chen, Zengde Deng, Yue Xu, and Zhoujun Li. 2021. Non-recursive graph convolutional networks. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 3205-3209.
427
+ [5] Hao Chen, Zefan Wang, Feiran Huang, Xiao Huang, Yue Xu, Yishi Lin, Peng He, and Zhoujun Li. 2022. Generative adversarial framework for cold-start item recommendation. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2565-2571.
428
+ [6] Junxuan Chen, Baigui Sun, Hao Li, Hongtao Lu, and Xian-Sheng Hua. 2016. Deep ctr prediction in display advertising. In Proceedings of the 24th ACM international conference on Multimedia, 811-820.
429
+ [7] Jiawei Chen, Junkang Wu, Jiancan Wu, Xuezhi Cao, Sheng Zhou, and Xiangnan He. 2023. Adap-τ: Adaptively Modulating Embedding Magnitude for Recommendation. In Proceedings of the ACM Web Conference 2023. 1085-1096.
430
+ [8] Heng-Tze Cheng, Levent Koc, Jeremiah Harmsen, Tal Shaked, Tushar Chandra, Hrishi Aradhye, Glen Anderson, Greg Corrado, Wei Chai, Mustafa Ispir, et al. 2016. Wide & deep learning for recommender systems. In Proceedings of the 1st workshop on deep learning for recommender systems. 7-10.
431
+ [9] Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling. In NIPS 2014 Deep Learning Workshop.
432
+ [10] Paul Covington, Jay Adams, and Emre Sargin. 2016. Deep neural networks for youtube recommendations. In Proceedings of the 10th ACM conference on recommender systems. 191-198.
433
+ [11] Weiwei Deng, Xiaoliang Ling, Yang Qi, Tunzi Tan, Eren Manavoglu, and Qi Zhang. 2018. Ad click prediction in sequence with long short-term memory networks: an externality-aware model. In The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval. 1065-1068.
434
+ [12] Tom Fawcett. 2006. An introduction to ROC analysis. Pattern recognition letters 27, 8 (2006), 861-874.
435
+ [13] Chongming Gao, Shijun Li, Wenqiang Lei, Jiawei Chen, Biao Li, Peng Jiang, Xiangnan He, Jiaxin Mao, and Tat-Seng Chua. 2022. KuaiRec: A Fully-observed Dataset and Insights for Evaluating Recommender Systems. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 540-550.
436
+ [14] Xavier Glorot and Yoshua Bengio. 2010. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 249-256.
437
+ [15] Huifeng Guo, Ruiming Tang, Yunming Ye, Zhenguo Li, and Xiuqiang He. 2017. DeepFM: a factorization-machine based neural network for CTR prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence. 1725-1731.
438
+ [16] Greg Hamerly and Charles Elkan. 2003. Learning the k in k-means. Advances in neural information processing systems 16 (2003).
439
+ [17] F Maxwell Harper and Joseph A Konstan. 2015. The movielens datasets: History and context. Acm transactions on interactive intelligent systems (tiis) 5, 4 (2015), 1-19.
440
+ [18] Xiangnan He and Tat-Seng Chua. 2017. Neural factorization machines for sparse predictive analytics. In Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. 355-364.
441
+ [19] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648.
442
+ [20] Feiran Huang, Zefan Wang, Xiao Huang, Yufeng Qian, Zhetao Li, and Hao Chen. 2023. Aligning Distillation For Cold-Start Item Recommendation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1147–1157.
443
+ [21] Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations.
444
+ [22] Julian McAuley, Christopher Targett, Qinfeng Shi, and Anton Van Den Hengel. 2015. Image-based recommendations on styles and substitutes. In Proceedings of the 38th international ACM SIGIR conference on research and development in information retrieval. 43-52.
445
+
446
+ [23] Erxue Min, Yu Rong, Tingyang Xu, Yatao Bian, Da Luo, Kangyi Lin, Junzhou Huang, Sophia Ananiadou, and Peilin Zhao. 2022. Neighbour interaction based click-through rate prediction via graph-masked transformer. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. 353-362.
447
+ [24] Andreas Pfadler, Huan Zhao, Jizhe Wang, Lifeng Wang, Pipei Huang, and Dik Lun Lee. 2020. Billion-scale recommendation with heterogeneous side information at taobao. In 2020 IEEE 36th International Conference on Data Engineering (ICDE). IEEE, 1667-1676.
448
+ [25] Qi Pi, Guorui Zhou, Yujing Zhang, Zhe Wang, Lejian Ren, Ying Fan, Xiaogiang Zhu, and Kun Gai. 2020. Search-based user interest modeling with lifelong sequential behavior data for click-through rate prediction. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management. 2685-2692.
449
+ [26] Jiarui Qin, Weinan Zhang, Xin Wu, Jiarui Jin, Yuchen Fang, and Yong Yu. 2020. User behavior retrieval for click-through rate prediction. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2347-2356.
450
+ [27] Steffen Rendle. 2010. Factorization machines. In 2010 IEEE International conference on data mining. IEEE, 995-1000.
451
+ [28] Chen Shengyuan, Yunfeng Cai, Huang Fang, Xiao Huang, and Mingming Sun. 2023. Differentiable Neuro-Symbolic Reasoning on Large-Scale Knowledge Graphs. In Thirty-seventh Conference on Neural Information Processing Systems.
452
+ [29] Huinan Sun, Guangliang Yu, Pengye Zhang, Bo Zhang, Xingxing Wang, and Dong Wang. 2022. Graph Based Long-Term And Short-Term Interest Model for Click-Through Rate Prediction. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 1818-1826.
453
+ [30] Weixun Wang, Junqi Jin, Jianye Hao, Chunjie Chen, Chuan Yu, Weinan Zhang, Jun Wang, Xiaotian Hao, Yixi Wang, Han Li, et al. 2019. Learning adaptive display exposure for real-time advertising. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 2595-2603.
454
+ [31] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174.
455
+ [32] Shiwen Wu, Fei Sun, Wentao Zhang, Xu Xie, and Bin Cui. 2022. Graph neural networks in recommender systems: a survey. Comput. Surveys 55, 5 (2022), 1-37.
456
+ [33] Jun Xiao, Hao Ye, Xiangnan He, Hanwang Zhang, Fei Wu, and Tat-Seng Chua. 2017. Attentional factorization machines: learning the weight of feature interactions via attention networks. In Proceedings of the 26th International Joint Conference on Artificial Intelligence. 3119-3125.
457
+ [34] Yue Xu, Hao Chen, Zefan Wang, Jianwen Yin, Qijie Shen, Dimin Wang, Feiran Huang, Lixiang Lai, Tao Zhuang, Junfeng Ge, et al. 2023. Multi-factor Sequential Re-ranking with Perception-Aware Diversification. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5327-5337.
458
+ [35] Rex Ying, Ruining He, Kaifeng Chen, Pong Eksombatchai, William L Hamilton, and Jure Leskovec. 2018. Graph convolutional neural networks for web-scale recommender systems. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining. 974-983.
459
+ [36] Eva Zangerle and Christine Bauer. 2022. Evaluating recommender systems: survey and framework. Comput. Surveys 55, 8 (2022), 1-38.
460
+ [37] Weinan Zhang, Jiarui Qin, Wei Guo, Ruiming Tang, and Xiuqiang He. 2021. Deep Learning for Click-Through Rate Estimation. In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21. 4695-4703.
461
+ [39] Chang Zhou, Jianxin Ma, Jianwei Zhang, Jingren Zhou, and Hongxia Yang. 2021. Contrastive learning for debiased candidate generation in large-scale recommender systems. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 3985-3995.
462
+ [40] Guorui Zhou, Na Mou, Ying Fan, Qi Pi, Weijie Bian, Chang Zhou, Xiaogiang Zhu, and Kun Gai. 2019. Deep interest evolution network for click-through rate prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33. 5941-5948.
463
+ [41] Guorui Zhou, Xiaogiang Zhu, Chenru Song, Ying Fan, Han Zhu, Xiao Ma, Yanghui Yan, Junqi Jin, Han Li, and Kun Gai. 2018. Deep interest network for click-through rate prediction. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1059-1068.
464
+ [42] Renjie Zhou, Samamon Khemmarat, and Lixin Gao. 2010. The impact of YouTube recommendation system on video views. In Proceedings of the 10th ACM SIGCOMM conference on Internet measurement. 404-410.
465
+ [43] Jieming Zhu, Jinyang Liu, Shuai Yang, Qi Zhang, and Xiuqiang He. 2021. Open benchmarking for click-through rate prediction. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 2759-2769.
466
+
467
+ # A DATASET DETAILS
468
+
469
+ We adopt both three publicly available datasets and a billion-scale industrial dataset for offline evaluation. The detailed description and preprocessing manner of the datasets are as follows:
470
+
471
+ MovieLens Dataset<sup>2</sup> [17] contains 71,567 users, 10,681 movies, and 10,000,054 interactions of users' ratings to the movies. To make the rating interactions suitable for the CTR prediction task, we follow the previous works [41] to transform the rating interactions into clicked and non-clicked relationships, which label the samples with rating values that greater than or equal to 4 to be positive and the rest to be negative.
472
+
473
+ Electronics Dataset<sup>3</sup> [22] is a subset of Amazon Dataset, which contains product reviews and metadata from Amazon. It contains 192,403 users, 63,001 items, and 1,689,188 interactions. We treat all the user reviews as user click behaviors, which is widely used in the related works [40, 41].
474
+
475
+ Kuaishou Dataset<sup>4</sup> [13] is a real-world dataset collected from the recommendation logs of the video-sharing mobile app Kuaishou. It contains 7,176 users, 10,728 videos, and 12,530,806 interactions. We regard the samples with video play time account for more than $50\%$ of the total time to be truly clicked videos.
476
+
477
+ Industrial Dataset is a billion-scale dataset collected from Alibaba Taobao's homepage, one of the largest e-commerce recommendation applications, involving billions scale of users and items. The industrial dataset contains both positive and negative interactions (e.g., impression without user clicks) such that negative sampling is not needed. There are over 118 billion instances and each user has around 938 recent behaviors on average, which is much longer than the sequences from the public dataset. Following SIM [25], we use the instances of the past two weeks as the training set and the instances of the next day as the test set.
478
+
479
+ For public datasets, the number of macro user clusters is set as 20 and the macro item cluster utilizes item categories for simplicity. For the industrial dataset, the number of macro user clusters is set as 200 and the number of macro item clusters is set as 300.
480
+
481
+ # B BASELINE DETAILS
482
+
483
+ We compare our proposed MacGNN with twelve representative state-of-the-art CTR prediction models as follows.
484
+
485
+ Feature Interaction-based Methods: (i) Wide&Deep [8] is widely used in real industrial applications. It consists of a wide module and a deep module to discover and extract the correlation and nonlinear relations between features. (ii) DeepFM [15] is a variant model of Wide&Deep, which imposes a factorization machine (FM) [27] as a wide part avoiding manufactured feature engineering. (iii) AFM [33] improves feature interactions by discriminating the different importance via an attention network. (iv) NFM [18] introduces the bi-interaction pooling to deepen FM for learning higher-order and non-linear feature interactions.
486
+
487
+ User Interest Modeling-based Methods: (i) DIN [41] is the first model that uses an attention mechanism to extract user interest representation from truncated historical user behaviors in CTR prediction. (ii) DIEN [40] is an improved version of DIN, which
488
+
489
+ ![](images/ee2192636c4fcdc10b6091377f69a064bc282931b1dc5f05fac08d03bac3095d.jpg)
490
+ Figure 6: Parameter study of temperature parameter $\tau$ and macro node number $\widetilde{n}$ on MovieLens and Electronics.
491
+
492
+ uses a two-layer RNNs module enhanced with the attention mechanism to capture the evolving user interests. (iii) UBR4CTR [26] proposes a search engine-based method to retrieve more relevant and appropriate behavioral data in long user sequential behaviors for model training. (iv) SIM [25] uses two cascaded search units to extract user interests, which has a better ability to model long sequential behavior data in both scalability and performance in the CTR prediction.
493
+
494
+ Graph-based Methods: (i) PinSage [35] is a representative graph-based web-scale recommendation model, which conducts inductive graph aggregation on the sampled user/item nodes. We concatenate and feed the trained embeddings by PinSage into the widely employed prediction layer to fit the CTR prediction scenario. (ii) LightGCN [19] is a simplified collaborative filtering model design by including only the most essential components in GCN for recommendation. Since it is a collaborative filtering model, the trained embeddings are also fed into the prediction layer as PinSage for the CTR prediction. (iii) GLSM [29] is a sampling-based model to introduce graph information, which consists of a multi-interest graph structure for capturing the long-term patterns and a sequence model for modeling the short-term information. (iv) GMT [23] is also a sampling-based state-of-the-art graph model for CTR prediction with a graph-masked transformer to learn different kinds of interactions on the heterogeneous information network among the constructed neighborhood nodes.
495
+
496
+ # C ADDITIONAL EXPERIMENTS
497
+
498
+ # C.1 Parameter Analysis
499
+
500
+ C.1.1 Effect of Temperature Parameter. We investigate the effect of the temperature parameter $\tau$ in macro node weighting with the range of 0.1 to 1.9 with a step size of 0.2 as illustrated in Figure 6. We can observe from the results that a too-small weighting value of $\tau$ will cause poor performance. Furthermore, the suitable value of $\tau$ for MovieLens is larger than 1 while for Electronics is smaller
501
+
502
+ ![](images/f1fef65639983bf36826f77efceb3b0135187bdf377385ffefc181a85c3907b2.jpg)
503
+ Figure 7: Case study of user groups with different interaction scales on the MovieLens dataset.
504
+
505
+ than 1, one possible reason is that the temperature of MacGNN should be set smaller on more sparse datasets.
506
+
507
+ C.1.2 Effect of Macro Node Number. We also evaluate the impact of different macro user numbers $\widetilde{n}$ under the behavior pattern grouping and fixed utilize of category as item grouping to avoid the impact of multiple variables. From the second row of Figure 6, we can find that the too-small cluster number will lead to too coarsen user segmentation and result in poor results. In addition, choosing a relatively appropriate number of clusters, such as 20, can bring
508
+
509
+ good enough performance of MacGNN on the public datasets and this macro node number is much smaller than the micro interaction scale, and also much smaller than the sequence length of previous user interest modeling works [40, 41].
510
+
511
+ # C.2 Case Study
512
+
513
+ We further conduct the case study to verify the performance of MacGNN on users with different interaction frequencies. Specifically, we divided users into 6 groups according to their interaction frequency on the MovieLens dataset. The case study results are illustrated in Figure 7.
514
+
515
+ We can find that our MacGNN performs better in most cases, which shows that the introduction of MAG can benefit users with different interaction frequencies. This observation can be explained in the following two main aspects: (i) For low-active users, the modeling view from a macro perspective will bring additional general key features, and the high-order graph information from MAG also provides helpful information for user modeling. (ii) For high-active users, in addition to ensuring computational efficiency, macro modeling on MAG can also avoid noise and overly complex information contained in excessively long interaction sequences. Thus, besides improving computational efficiency for considering both complete and high-order patterns, the organization of MAG is also beneficial for modeling interests in various interaction frequencies.
2401.14xxx/2401.14939/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5dfe9cdc59aa4651d655cf7d01511199e2075ee210f2c4a083ae4d0d524d20
3
+ size 680035
2401.14xxx/2401.14939/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14968/934b11c1-22d4-4fd5-b641-2148c5b49149_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1dd864d2c037418d2189e71f7693596bdb6391ddbc393dbd7dab64a525c0a9
3
+ size 5187901
2401.14xxx/2401.14968/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.14xxx/2401.14968/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:413cf506670e6537ca00ab7240341314ae5fa6a11717cfa1193c409f654db49a
3
+ size 830563
2401.14xxx/2401.14968/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_content_list.json ADDED
@@ -0,0 +1,2219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "AIRAVATA: INTRODUCING HINDI INSTRUCTION-TUNED LLM",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 171,
8
+ 99,
9
+ 789,
10
+ 118
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jay Gala $^{1}$ Thanmay Jayakumar $^{1}$ Jaavid Aktar Husain $^{1,3}$ Aswanth Kumar $^{4}$",
17
+ "bbox": [
18
+ 179,
19
+ 140,
20
+ 759,
21
+ 157
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Mohammed Safi Ur Rahman Khan<sup>1</sup> Diptesh Kanojia<sup>5</sup> Ratish Puduppully<sup>6</sup>",
28
+ "bbox": [
29
+ 181,
30
+ 160,
31
+ 738,
32
+ 179
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Mitesh M. Khapra $^{1,2}$ Raj Dabre $^{7}$ Rudra Murthy $^{8}$ Anoop Kunchukuttan $^{1,2,9}$",
39
+ "bbox": [
40
+ 181,
41
+ 181,
42
+ 766,
43
+ 199
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "$^{1}$ Nilekani Centre at AI4Bharat $^{2}$ IIT Madras $^{3}$ IIIT D&M Kancheepuram",
50
+ "bbox": [
51
+ 181,
52
+ 209,
53
+ 687,
54
+ 224
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "$^{4}$ Flipkart $^{5}$ University of Surrey $^{6}$ A*STAR $^{7}$ NICT $^{8}$ IBM Research $^{9}$ Microsoft",
61
+ "bbox": [
62
+ 181,
63
+ 229,
64
+ 772,
65
+ 244
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "https://ai4bharat.github.io/airavata",
72
+ "bbox": [
73
+ 318,
74
+ 287,
75
+ 676,
76
+ 303
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "image",
82
+ "img_path": "images/eb4680e0728a77773011d0bbcc9936fc37dc85913dfe1b3274c98ff4489fd13d.jpg",
83
+ "image_caption": [
84
+ "Figure 1: Image Courtesy: DALL-E 3 (Betker et al., 2023)."
85
+ ],
86
+ "image_footnote": [],
87
+ "bbox": [
88
+ 334,
89
+ 325,
90
+ 661,
91
+ 575
92
+ ],
93
+ "page_idx": 0
94
+ },
95
+ {
96
+ "type": "text",
97
+ "text": "1 INTRODUCTION",
98
+ "text_level": 1,
99
+ "bbox": [
100
+ 173,
101
+ 635,
102
+ 336,
103
+ 648
104
+ ],
105
+ "page_idx": 0
106
+ },
107
+ {
108
+ "type": "text",
109
+ "text": "The recent year has witnessed tremendous interest and activity in the world of Large Language Models (LLMs). LLMs hold the potential to unlock exciting applications in artificial intelligence due to their ability to comprehend complex natural language instructions and excel in a broad spectrum of tasks involving language, knowledge, reasoning, and creative generation. To foster research, innovation, and widespread adoption, an open ecosystem is essential. We have observed significant advancements in this area with the launch of models like Llama 2 (Touvron et al., 2023) and Mistral (Jiang et al., 2023), as well as their instruction-tuned variants such as Llama 2 Chat (Touvron et al., 2023), Mistral-Instruct (Jiang et al., 2023), and Zephyr (Tunstall et al., 2023), among others. Major progress has also been made in developing datasets for pre-training such as RedPajama (Computer, 2023)), instruction tuning (e.g., Alpaca (Taori et al., 2023), UltraChat (Ding et al., 2023), Dolly (Conover et al., 2023), OpenAssistant (Kopf et al., 2023), LMSYS-Chat (Zheng et al., 2023a)), and evaluation benchmarks (e.g., AlpacaEval (Dubois et al., 2023), MT-Bench (Zheng et al., 2023b)). However, most of these advancements have been predominantly centered around the English language.",
110
+ "bbox": [
111
+ 169,
112
+ 666,
113
+ 826,
114
+ 863
115
+ ],
116
+ "page_idx": 0
117
+ },
118
+ {
119
+ "type": "text",
120
+ "text": "There is limited support for Indian languages, which can be attributed to the incidental inclusion of some Indian language data that slipped through the data filters during the pre-training of these language models. However, the representation of data, the efficacy of tokenizers, and task performance for Indian languages are considerably behind that of English. The performance in Indian",
121
+ "bbox": [
122
+ 169,
123
+ 867,
124
+ 823,
125
+ 925
126
+ ],
127
+ "page_idx": 0
128
+ },
129
+ {
130
+ "type": "header",
131
+ "text": "Technical Report",
132
+ "bbox": [
133
+ 173,
134
+ 32,
135
+ 290,
136
+ 47
137
+ ],
138
+ "page_idx": 0
139
+ },
140
+ {
141
+ "type": "aside_text",
142
+ "text": "arXiv:2401.15006v2 [cs.CL] 26 Feb 2024",
143
+ "bbox": [
144
+ 22,
145
+ 265,
146
+ 58,
147
+ 708
148
+ ],
149
+ "page_idx": 0
150
+ },
151
+ {
152
+ "type": "page_number",
153
+ "text": "1",
154
+ "bbox": [
155
+ 493,
156
+ 948,
157
+ 503,
158
+ 959
159
+ ],
160
+ "page_idx": 0
161
+ },
162
+ {
163
+ "type": "text",
164
+ "text": "languages, even on closed-source models such as ChatGPT (OpenAI, 2022), GPT-4 (Achiam et al., 2023), and others, is inferior compared to English (Ahuja et al., 2023). Therefore, there is an urgent need to develop a similar ecosystem of tools, models, and datasets for Indian languages to foster research and innovation. In pursuit of this objective, the recent collaboration with Sarvam AI led to release of OpenHathi (SarvamAI, 2023), an open-source foundational model for Hindi, developed by extending Llama 2 (Touvron et al., 2023).",
165
+ "bbox": [
166
+ 169,
167
+ 103,
168
+ 823,
169
+ 188
170
+ ],
171
+ "page_idx": 1
172
+ },
173
+ {
174
+ "type": "text",
175
+ "text": "Today, we announce the next step - an initial release of \"Airavata\", an instruction-tuned model for Hindi built upon finetuning OpenHathi (SarvamAI, 2023) with diverse, instruction-tuning Hindi datasets to make it better suited for assistive tasks.",
176
+ "bbox": [
177
+ 169,
178
+ 194,
179
+ 823,
180
+ 236
181
+ ],
182
+ "page_idx": 1
183
+ },
184
+ {
185
+ "type": "text",
186
+ "text": "Along with the model, we also share the instruction tuning datasets<sup>1</sup> to enable further research for IndicLLMs. We rely on human-curated, license-friendly instruction-tuned datasets to build \"Airavata\". We do not use data generated from proprietary models like GPT-4 (Achiam et al., 2023), etc. We think this is a more sustainable way of building instruction-tuned models at scale for most Indic languages, where relying on distilled data from commercial models would increase costs and restrict their free usage in downstream applications due to licensing restrictions.",
187
+ "bbox": [
188
+ 169,
189
+ 243,
190
+ 823,
191
+ 327
192
+ ],
193
+ "page_idx": 1
194
+ },
195
+ {
196
+ "type": "text",
197
+ "text": "We also compile a collection of evaluation benchmarks $^2$ along with an evaluation framework to compare various LLMs for their abilities on diverse tasks when instructed in Hindi. Using this benchmark and human judgments, we compare different LLMs to quantify the current state of their Hindi capabilities. We conduct a detailed analysis of Airavata's performance on various Natural Language Understanding (NLU) and Natural Language Generation (NLG) tasks and find that the instruction fine-tuning helps align the model to various NLU tasks. There is significant potential for improvement in NLG tasks, which require the creation of larger, more diverse instruction datasets as well as innovations in aligning English model representations to Hindi representations to drive better cross-lingual transfer.",
198
+ "bbox": [
199
+ 169,
200
+ 334,
201
+ 826,
202
+ 459
203
+ ],
204
+ "page_idx": 1
205
+ },
206
+ {
207
+ "type": "text",
208
+ "text": "2 INSTRUCTION TUNING DATASET CREATION",
209
+ "text_level": 1,
210
+ "bbox": [
211
+ 171,
212
+ 479,
213
+ 570,
214
+ 494
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "High-quality instruction tuning datasets are important for the good performance of LLMs. However, there are few diverse datasets for Hindi. Following Wei et al. (2023), we rely on translating high-quality English-supervised instruction-tuning datasets into Hindi. We use IndicTrans2 (Gala et al., 2023), the state-of-the-art open-source MT model for Indian languages, for translation. Some previous works (Li et al., 2023; Wei et al., 2023) have used ChatGPT (OpenAI, 2022) to translate instructions and/or generate responses into Hindi to better use context during translation (IndicTrans2 and most MT models are sentence-level). However, this is not cost-effective, and the translation quality of ChatGPT (OpenAI, 2022) is lower than IndicTrans2 (Gala et al., 2023), and its generation quality in Hindi might not be up to the mark (Ahuja et al., 2023). A future avenue of work would be improving translation quality when document context is available.",
221
+ "bbox": [
222
+ 169,
223
+ 511,
224
+ 823,
225
+ 650
226
+ ],
227
+ "page_idx": 1
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "We sample examples from different datasets listed in Table 1 to ensure balanced representations across all the tasks while fitting into our instruction tuning budget. We translate the instructions, input, and outputs into Hindi. This results in a total of $404\\mathrm{k}$ examples spanning English and Hindi language. The translated Hindi examples were filtered to retain high-quality examples. Specifically, examples were retained only when the $\\mathrm{chrF}++$ score (Popovic, 2017) between the back-translated example and the corresponding English example was 50 or above. The final dataset used for instruction tuning contains $385\\mathrm{k}$ examples. Table 1 shows the details of the final training dataset. The dataset is available on the HuggingFace Hub. $^3$",
232
+ "bbox": [
233
+ 169,
234
+ 657,
235
+ 823,
236
+ 768
237
+ ],
238
+ "page_idx": 1
239
+ },
240
+ {
241
+ "type": "text",
242
+ "text": "We also create two native Hindi Instruction datasets:",
243
+ "bbox": [
244
+ 171,
245
+ 775,
246
+ 517,
247
+ 789
248
+ ],
249
+ "page_idx": 1
250
+ },
251
+ {
252
+ "type": "text",
253
+ "text": "- wikiHow: wikiHow<sup>4</sup> is an online wiki-style platform that serves as a valuable resource for a diverse array of how-to articles spanning numerous topics. The articles on the platform are humanmoderated, ensuring a high-quality standard. The questions users pose in these articles closely",
254
+ "bbox": [
255
+ 171,
256
+ 803,
257
+ 823,
258
+ 845
259
+ ],
260
+ "page_idx": 1
261
+ },
262
+ {
263
+ "type": "text",
264
+ "text": "$^{1}$ https://huggingface.co/datasets/ai4bharat/indic-instruct-data-v0.1 \n $^{2}$ https://huggingface.co/collections/ai4bharat/airavata-evaluation-suite-65b13b7b68165de71ba0b333 \n $^{3}$ https://huggingface.co/datasets/ai4bharat/indic-instruct-data-v0.1 \n $^{4}$ https://www.wikihow.com/Main-Page",
265
+ "bbox": [
266
+ 171,
267
+ 854,
268
+ 823,
269
+ 924
270
+ ],
271
+ "page_idx": 1
272
+ },
273
+ {
274
+ "type": "header",
275
+ "text": "Technical Report",
276
+ "bbox": [
277
+ 173,
278
+ 32,
279
+ 287,
280
+ 47
281
+ ],
282
+ "page_idx": 1
283
+ },
284
+ {
285
+ "type": "page_number",
286
+ "text": "2",
287
+ "bbox": [
288
+ 493,
289
+ 948,
290
+ 503,
291
+ 959
292
+ ],
293
+ "page_idx": 1
294
+ },
295
+ {
296
+ "type": "table",
297
+ "img_path": "images/f1829dd70675f9fca345574e42c4a5b580c007777c20ceb152821c0b6a594720.jpg",
298
+ "table_caption": [],
299
+ "table_footnote": [],
300
+ "table_body": "<table><tr><td rowspan=\"2\">Dataset</td><td colspan=\"2\">Unfiltered</td><td colspan=\"2\">Filtered</td></tr><tr><td>English</td><td>Hindi</td><td>English</td><td>Hindi</td></tr><tr><td>FLAN-v2 (Longpre et al., 2023)</td><td>67,463</td><td>67,463</td><td>67,463</td><td>65,228</td></tr><tr><td>Anthropic-HHH (Bai et al., 2022)</td><td>5,000</td><td>5,000</td><td>5,000</td><td>4,911</td></tr><tr><td>Dolly (Conover et al., 2023)</td><td>15,011</td><td>15,011</td><td>15,011</td><td>14,880</td></tr><tr><td>OpenAssistant (Köpf et al., 2023)</td><td>19,945</td><td>20,128</td><td>19,945</td><td>16384</td></tr><tr><td>LymSys-Chat (Zheng et al., 2023a)</td><td>50,000</td><td>50,000</td><td>50,000</td><td>37,422</td></tr><tr><td>WikiHow</td><td>20,400</td><td>6,055</td><td>20,400</td><td>6,055</td></tr><tr><td>Anudesh</td><td>5,234</td><td>7,577</td><td>5,234</td><td>7,577</td></tr><tr><td>Total</td><td>183,053</td><td>171,234</td><td>183,053</td><td>152,457</td></tr></table>",
301
+ "bbox": [
302
+ 225,
303
+ 99,
304
+ 772,
305
+ 268
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "Table 1: Instruction Fine-tuning Training Dataset Details",
312
+ "bbox": [
313
+ 308,
314
+ 277,
315
+ 686,
316
+ 292
317
+ ],
318
+ "page_idx": 2
319
+ },
320
+ {
321
+ "type": "text",
322
+ "text": "align with potential use cases for this model, making it a rich resource for training models. Additionally, this might also help induce reasoning capabilities and generate logical step-by-step responses. We curate around 20k and 6k articles in English and Hindi, respectively, resulting in a total of around 27k articles. We currently formulate the data as a completion task given either question or question along with a few initial steps. The dataset is released under the CC-0 license.",
323
+ "bbox": [
324
+ 183,
325
+ 321,
326
+ 823,
327
+ 391
328
+ ],
329
+ "page_idx": 2
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "- Anudesh: Anudesh is a crowd-sourced collection of prompts accompanied by responses generated from the Llama 2 70B model (Touvron et al., 2023). Participants are provided with clear guidelines detailing the nature of the interaction required, including the specific language to be employed. These languages encompass a range that includes Indic languages, English, transliterated Indic, as well as a blend of Indic and English in a code-mixed format. Contributors craft their prompts in adherence to these directives and the specified language criteria. Subsequently, these prompts are then paired with the corresponding translated outputs from the Llama 2 70B model (Touvron et al., 2023). The dataset is released under CC-BY-4.0 license. More details about the interactions will be released soon.",
334
+ "bbox": [
335
+ 171,
336
+ 397,
337
+ 823,
338
+ 521
339
+ ],
340
+ "page_idx": 2
341
+ },
342
+ {
343
+ "type": "text",
344
+ "text": "We provide a brief description of all the existing instruction datasets used in addition to the above (and corresponding licenses) below:",
345
+ "bbox": [
346
+ 169,
347
+ 536,
348
+ 823,
349
+ 565
350
+ ],
351
+ "page_idx": 2
352
+ },
353
+ {
354
+ "type": "list",
355
+ "sub_type": "text",
356
+ "list_items": [
357
+ "- FLAN-v2 (Longpre et al., 2023): A collection of NLP tasks that combines a number of existing NLP datasets with various data augmentations, introduced by Chung et al. (2022). We sample around 67K examples for our training mixture. The dataset is released under the Apache-2.0 license.",
358
+ "- Anthropic-HHH (Bai et al., 2022): A collection of human-collected preference data for aligning the models to be helpful and harmless. We sample 5K conversations from the \"chosen\" column for our training mixture. The dataset is released under the MIT license.",
359
+ "- Dolly (Conover et al., 2023): A corpus of more than 15K records generated by thousands of Databricks employees to enable LLMs to exhibit the magical interactivity of ChatGPT. The dataset is released under the CC-BY-SA-3.0 license.",
360
+ "- OpenAssistant (Köpf et al., 2023): A collection of human-generated, human-annotated assistant-style conversation corpus consisting of 38K messages, resulting in over 3K conversation trees and around 20K conversations. The dataset is released under the Apache-2.0.",
361
+ "- LymSys-Chat (Zheng et al., 2023a): A collection of 1M real-world conversations spanning 25 SOTA LLMs similar to OpenAssistant (Köpf et al., 2023). We sample 50K conversations for our training mixture. The dataset is released under the LMSYS-Chat-1M Dataset License Agreement.<sup>5</sup>",
362
+ "- NMT (Gala et al., 2023): A multi-domain human-annotated dataset containing 50K bitext English-Hindi translation pairs from BPCC-Human (Gala et al., 2023) to enable better crosslingual transfer. The dataset is released under the CC-BY-4.0 license."
363
+ ],
364
+ "bbox": [
365
+ 169,
366
+ 578,
367
+ 823,
368
+ 886
369
+ ],
370
+ "page_idx": 2
371
+ },
372
+ {
373
+ "type": "header",
374
+ "text": "Technical Report",
375
+ "bbox": [
376
+ 173,
377
+ 32,
378
+ 287,
379
+ 47
380
+ ],
381
+ "page_idx": 2
382
+ },
383
+ {
384
+ "type": "page_footnote",
385
+ "text": "$^{5}$ https://huggingface.co/datasets/lmsys/lmsys-chat-lm#lmsys-chat-1m-dataset-license-agreement",
386
+ "bbox": [
387
+ 171,
388
+ 896,
389
+ 823,
390
+ 922
391
+ ],
392
+ "page_idx": 2
393
+ },
394
+ {
395
+ "type": "page_number",
396
+ "text": "3",
397
+ "bbox": [
398
+ 493,
399
+ 948,
400
+ 503,
401
+ 959
402
+ ],
403
+ "page_idx": 2
404
+ },
405
+ {
406
+ "type": "table",
407
+ "img_path": "images/f2c6045da1d8fed9f881245dc4297f9432f333bcd73d2686d861a866764c37f3.jpg",
408
+ "table_caption": [],
409
+ "table_footnote": [],
410
+ "table_body": "<table><tr><td>Hyper-Parameter</td><td>Value</td></tr><tr><td>LoRA Rank</td><td>16</td></tr><tr><td>LoRA alpha</td><td>32</td></tr><tr><td>LoRA Dropout</td><td>0.05</td></tr><tr><td>LoRA Target Modules</td><td>q_Proj, v_Proj, k_Proj, gate_Proj up_Proj, down_Proj</td></tr><tr><td>Epochs</td><td>4</td></tr><tr><td>Learning rate</td><td>5e-4</td></tr><tr><td>Batch Size</td><td>128</td></tr><tr><td>Floating Point Precision</td><td>bffloat16</td></tr></table>",
411
+ "bbox": [
412
+ 292,
413
+ 99,
414
+ 707,
415
+ 256
416
+ ],
417
+ "page_idx": 3
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "Table 2: Hyperparameters for Fine-tuning",
422
+ "bbox": [
423
+ 357,
424
+ 265,
425
+ 637,
426
+ 282
427
+ ],
428
+ "page_idx": 3
429
+ },
430
+ {
431
+ "type": "text",
432
+ "text": "3 SUPERVISED FINE-TUNING",
433
+ "text_level": 1,
434
+ "bbox": [
435
+ 171,
436
+ 308,
437
+ 431,
438
+ 324
439
+ ],
440
+ "page_idx": 3
441
+ },
442
+ {
443
+ "type": "text",
444
+ "text": "We fine-tune the OpenHathi model using the above-compiled datasets. We perform parameter-efficient fine-tuning with LoRA (Hu et al., 2022). The hyperparameters used are listed in the Table 2.",
445
+ "bbox": [
446
+ 169,
447
+ 340,
448
+ 823,
449
+ 369
450
+ ],
451
+ "page_idx": 3
452
+ },
453
+ {
454
+ "type": "text",
455
+ "text": "During fine-tuning, the loss was computed only for the output tokens. We used the OpenInstruct framework<sup>6</sup> for fine-tuning and customizing it for our requirements (our custom version is available as IndicInstruct<sup>7</sup>). One fine-tuning example corresponds to one example in the dataset. However, this is suboptimal since many tokens are wasted as padding tokens. We plan to optimize this process by packing multiple dataset examples into a single fine-tuning example (Iyer et al., 2022; Krell et al., 2023).",
456
+ "bbox": [
457
+ 169,
458
+ 375,
459
+ 823,
460
+ 460
461
+ ],
462
+ "page_idx": 3
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "3.1 FULL VS. LORA FINETUNING",
467
+ "text_level": 1,
468
+ "bbox": [
469
+ 171,
470
+ 478,
471
+ 421,
472
+ 492
473
+ ],
474
+ "page_idx": 3
475
+ },
476
+ {
477
+ "type": "text",
478
+ "text": "There are two prevalent methods for instruction fine-tuning for LLMs: Full fine-tuning and LoRA. Full fine-tuning (FFT) involves updating all the model parameters, whereas LoRA (Hu et al., 2022) fine-tuning involves introducing a small set of additional parameters and only updating them. We perform an ablation study to examine both the fine-tuning strategies to train two models, one employing full fine-tuning and the other by LoRA fine-tuning. For the context of this ablation study, we consider the FLAN v2 (Longpre et al., 2023) subset from our instruction dataset spanning English and Hindi. For our evaluation, we used a subset of NLU tasks in Hindi, along with BoolQ (Clark et al., 2019) and MMLU (Hendrycks et al., 2021) tasks in English, as development sets to decide between full fine-tuning and LoRA fine-tuning (Figure 2). We observed that FFT models outperform the OpenHathi base model (SarvamAI, 2023) in IndicCopa (Doddapaneni et al., 2022) and IndicXParaphrase (Doddapaneni et al., 2022) tasks. However, the FFT model performed poorly on English tasks compared to the base model as well as the LoRA fine-tuned model. LoRA fine-tuned model demonstrated improvements or similar performance as the OpenHathi base model on both Hindi NLU and English tasks. As a result, we decided to proceed with LoRA fine-tuning for training our final model. We report results on the LoRA fine-tuned final model in the subsequent section.",
479
+ "bbox": [
480
+ 169,
481
+ 505,
482
+ 826,
483
+ 728
484
+ ],
485
+ "page_idx": 3
486
+ },
487
+ {
488
+ "type": "text",
489
+ "text": "3.2 MODEL SELECTION",
490
+ "text_level": 1,
491
+ "bbox": [
492
+ 171,
493
+ 746,
494
+ 354,
495
+ 760
496
+ ],
497
+ "page_idx": 3
498
+ },
499
+ {
500
+ "type": "text",
501
+ "text": "We fine-tune the OpenHathi model for 4 epochs and save the model after each epoch. We evaluate the checkpoint of each epoch on the dev set (IndicSentiment, IndicCOPA, IndicXNLI, and IndicQA from IndicXTREME (Doddapaneni et al., 2022), and Flores devtest (Goyal et al., 2021; Costa-jussa et al., 2022)) and compare the average performance. We observe that the checkpoint of epoch 3 performs well on NLU tasks, while the checkpoint of epoch 4 performs well on NLG tasks. We perform checkpoint averaging, where we interpolate the weights of the above two checkpoints to obtain a model that performs well across both NLU and NLG tasks. We found the best interpolation factor to be around 0.6.",
502
+ "bbox": [
503
+ 169,
504
+ 773,
505
+ 823,
506
+ 885
507
+ ],
508
+ "page_idx": 3
509
+ },
510
+ {
511
+ "type": "header",
512
+ "text": "Technical Report",
513
+ "bbox": [
514
+ 173,
515
+ 32,
516
+ 287,
517
+ 47
518
+ ],
519
+ "page_idx": 3
520
+ },
521
+ {
522
+ "type": "page_footnote",
523
+ "text": "$^{6}$ https://github.com/allenai/open-instruct",
524
+ "bbox": [
525
+ 189,
526
+ 896,
527
+ 553,
528
+ 909
529
+ ],
530
+ "page_idx": 3
531
+ },
532
+ {
533
+ "type": "page_footnote",
534
+ "text": "<sup>7</sup>https://github.com/AI4Bharat/IndicInstruct",
535
+ "bbox": [
536
+ 192,
537
+ 910,
538
+ 571,
539
+ 922
540
+ ],
541
+ "page_idx": 3
542
+ },
543
+ {
544
+ "type": "page_number",
545
+ "text": "4",
546
+ "bbox": [
547
+ 493,
548
+ 948,
549
+ 504,
550
+ 959
551
+ ],
552
+ "page_idx": 3
553
+ },
554
+ {
555
+ "type": "image",
556
+ "img_path": "images/998239512d6d5d19c4c7c13faaaffe796f694f348f9735c2b633498e3b0386f7.jpg",
557
+ "image_caption": [
558
+ "Figure 2: Ablation experiment to understand the performance gaps between Full fine-tuning and LoRA fine-tuning across a mix of English and Hindi NLU tasks."
559
+ ],
560
+ "image_footnote": [],
561
+ "bbox": [
562
+ 191,
563
+ 116,
564
+ 803,
565
+ 396
566
+ ],
567
+ "page_idx": 4
568
+ },
569
+ {
570
+ "type": "equation",
571
+ "text": "\n$$\n\\text {i n t e r p o l a t e d w e i g h t s} = 0. 6 \\times \\text {c h e c k p o i n t} _ {3} + (1 - 0. 6) \\times \\text {c h e c k p o i n t} _ {4}\n$$\n",
572
+ "text_format": "latex",
573
+ "bbox": [
574
+ 258,
575
+ 493,
576
+ 733,
577
+ 508
578
+ ],
579
+ "page_idx": 4
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "4 EVALUATION ON NLP BENCHMARKS",
584
+ "text_level": 1,
585
+ "bbox": [
586
+ 171,
587
+ 529,
588
+ 516,
589
+ 544
590
+ ],
591
+ "page_idx": 4
592
+ },
593
+ {
594
+ "type": "text",
595
+ "text": "We evaluate our model on the standard NLU and NLG benchmarks, which include native Hindi test sets from IndicXTREME (Doddapaneni et al., 2022) and Indic NLG Suite (Kumar et al., 2022). Further, to test the knowledge and reasoning capabilities of the model in Hindi, we translate the English benchmarks such as MMLU (Hendrycks et al., 2021), Hellaswag (Zellers et al., 2019), ARC (Clark et al., 2018), Winogrande (Sakaguchi et al., 2020) and BoolQ (Clark et al., 2019) and use these to evaluate our final model. The IndicTrans2 (Gala et al., 2023) model was employed for translating the benchmarks. Although this is not the perfect evaluation setup however, due to limited resources, we rely on this method as a proxy indicator for assessing trends in the performance of the model in terms of reasoning capabilities in Hindi. An important future direction would involve creating equivalent benchmarks in the native language instead of solely relying on translations.",
596
+ "bbox": [
597
+ 169,
598
+ 560,
599
+ 823,
600
+ 700
601
+ ],
602
+ "page_idx": 4
603
+ },
604
+ {
605
+ "type": "text",
606
+ "text": "4.1 RESULTS",
607
+ "text_level": 1,
608
+ "bbox": [
609
+ 171,
610
+ 717,
611
+ 277,
612
+ 729
613
+ ],
614
+ "page_idx": 4
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "Tables 3 to 6 shows the comparison of Airavata with the base model (OpenHathi) and the translate-test baseline using the Llama 2 7B Chat model. In the translate-test approach, the Hindi input is translated into English using the IndicTrans2 model and is used as a prompt to the Llama 2 7B Chat model. We observe that Airavata significantly outperforms the OpenHathi model on most tasks, demonstrating that fine-tuning on the IndicInstruct dataset helps align the base model to a variety of tasks. Moreover, the performance of the translate-test approach with the Llama 2 7B Chat model has a lot of variance, while Airavata consistently performs well across tasks. OpenHathi and Airavata exhibit comparable performance in the translation task. OpenHathi benefits from its extensive training on parallel corpora, resulting in a highly proficient base model for the translation task. We observe mixed results on generation tasks, indicating the scope for further improvement of Airvata, especially in open-ended text generation capabilities. Table 4 shows the performance of Airavata and other models on English test sets and the respective machine-translated Hindi test set. We observe a 5-15 point performance gap between the English baseline and the respective Hindi",
619
+ "bbox": [
620
+ 169,
621
+ 743,
622
+ 825,
623
+ 924
624
+ ],
625
+ "page_idx": 4
626
+ },
627
+ {
628
+ "type": "header",
629
+ "text": "Technical Report",
630
+ "bbox": [
631
+ 173,
632
+ 32,
633
+ 287,
634
+ 47
635
+ ],
636
+ "page_idx": 4
637
+ },
638
+ {
639
+ "type": "page_number",
640
+ "text": "5",
641
+ "bbox": [
642
+ 493,
643
+ 948,
644
+ 503,
645
+ 959
646
+ ],
647
+ "page_idx": 4
648
+ },
649
+ {
650
+ "type": "text",
651
+ "text": "baseline across various tasks for both OpenHathi and Airavata models. This indicates limited cross-lingual transfer of English knowledge to Hindi, highlighting the need for future research to improve cross-lingual alignment to maximize knowledge transfer between English and Hindi.",
652
+ "bbox": [
653
+ 169,
654
+ 103,
655
+ 823,
656
+ 148
657
+ ],
658
+ "page_idx": 5
659
+ },
660
+ {
661
+ "type": "table",
662
+ "img_path": "images/1e82deb006878251a78a60d5924b68f5db6619ceac89ff78c43b8888d8268d69.jpg",
663
+ "table_caption": [],
664
+ "table_footnote": [],
665
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td colspan=\"3\">0-Shot</td><td colspan=\"3\">5-Shot</td></tr><tr><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>IndicSentiment</td><td>72.89</td><td>97.85</td><td>95.81</td><td>96.59</td><td>98.43</td><td>97.01</td></tr><tr><td>IndicCopa</td><td>68.69</td><td>76.53</td><td>63.75</td><td>42.77</td><td>78.34</td><td>72.97</td></tr><tr><td>IndicXNLI</td><td>16.67</td><td>23.67</td><td>73.26</td><td>42.25</td><td>47.96</td><td>74.70</td></tr><tr><td>IndicXParaphrase</td><td>71.72</td><td>09.54</td><td>76.53</td><td>66.67</td><td>48.56</td><td>69.87</td></tr></table>",
666
+ "bbox": [
667
+ 173,
668
+ 157,
669
+ 823,
670
+ 272
671
+ ],
672
+ "page_idx": 5
673
+ },
674
+ {
675
+ "type": "table",
676
+ "img_path": "images/8fcf23ee3260aa1525dcff2a54e0e83b392a7f26ce5755cb5e313d7ca8767013.jpg",
677
+ "table_caption": [
678
+ "Table 3: F1 scores on Indic NLU and Commonsense Reasoning tasks"
679
+ ],
680
+ "table_footnote": [],
681
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Variant</td><td colspan=\"2\">0-Shot</td><td colspan=\"2\">5-Shot</td></tr><tr><td>OpenHathi</td><td>Airavata</td><td>OpenHathi</td><td>Airavata</td></tr><tr><td rowspan=\"2\">MMLU</td><td>English</td><td>36.16</td><td>41.39</td><td>40.12</td><td>43.28</td></tr><tr><td>Hindi (Translated)</td><td>32.27</td><td>34.96</td><td>35.13</td><td>36.00</td></tr><tr><td rowspan=\"2\">BoolQ</td><td>English</td><td>52.63</td><td>73.00</td><td>64.46</td><td>62.02</td></tr><tr><td>Hindi (Translated)</td><td>58.56</td><td>64.50</td><td>65.69</td><td>51.47</td></tr><tr><td rowspan=\"2\">ARC Easy</td><td>English</td><td>57.28</td><td>70.50</td><td>62.12</td><td>71.04</td></tr><tr><td>Hindi (Translated)</td><td>44.28</td><td>54.00</td><td>49.87</td><td>54.84</td></tr><tr><td rowspan=\"2\">Arc Challenge</td><td>English</td><td>39.85</td><td>45.90</td><td>46.25</td><td>48.29</td></tr><tr><td>Hindi (Translated)</td><td>32.68</td><td>35.92</td><td>36.60</td><td>36.26</td></tr><tr><td rowspan=\"2\">Hella Swag</td><td>English</td><td>31.48</td><td>34.37</td><td>32.45</td><td>36.83</td></tr><tr><td>Hindi (Translated)</td><td>25.59</td><td>25.37</td><td>24.85</td><td>24.84</td></tr><tr><td>Winogrande</td><td>English</td><td>49.17</td><td>49.72</td><td>-</td><td>-</td></tr></table>",
682
+ "bbox": [
683
+ 245,
684
+ 316,
685
+ 751,
686
+ 536
687
+ ],
688
+ "page_idx": 5
689
+ },
690
+ {
691
+ "type": "table",
692
+ "img_path": "images/c3a3744e7fd9f9a15a99b99154dc8a45256060578993565c6927e31db2245869.jpg",
693
+ "table_caption": [
694
+ "Table 4: Accuracy on English NLU and Commonsense Reasoning tasks and its translated variants"
695
+ ],
696
+ "table_footnote": [],
697
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Metric</td><td colspan=\"2\">0-Shot</td><td colspan=\"2\">5-shot</td></tr><tr><td>OpenHathi</td><td>Airavata</td><td>OpenHathi</td><td>Airavata</td></tr><tr><td rowspan=\"2\">Flores</td><td>chrF++</td><td>55.41</td><td>54.82</td><td>54.98</td><td>54.24</td></tr><tr><td>BLEURT</td><td>0.7103</td><td>0.6970</td><td>0.7115</td><td>0.7084</td></tr><tr><td rowspan=\"2\">IN22-Gen</td><td>chrF++</td><td>54.23</td><td>53.78</td><td>54.53</td><td>52.81</td></tr><tr><td>BLEURT</td><td>0.7156</td><td>0.7012</td><td>0.7181</td><td>0.7037</td></tr></table>",
698
+ "bbox": [
699
+ 284,
700
+ 580,
701
+ 712,
702
+ 699
703
+ ],
704
+ "page_idx": 5
705
+ },
706
+ {
707
+ "type": "text",
708
+ "text": "Table 5:chrF++ and BLEURT scores on English-Hindi translation task",
709
+ "bbox": [
710
+ 263,
711
+ 708,
712
+ 732,
713
+ 723
714
+ ],
715
+ "page_idx": 5
716
+ },
717
+ {
718
+ "type": "text",
719
+ "text": "5 HUMAN EVALUATION",
720
+ "text_level": 1,
721
+ "bbox": [
722
+ 171,
723
+ 752,
724
+ 387,
725
+ 767
726
+ ],
727
+ "page_idx": 5
728
+ },
729
+ {
730
+ "type": "text",
731
+ "text": "We assess Airavata using authentic prompts provided by real users, evaluating its performance across five distinct abilities outlined in Table 7.",
732
+ "bbox": [
733
+ 169,
734
+ 782,
735
+ 823,
736
+ 811
737
+ ],
738
+ "page_idx": 5
739
+ },
740
+ {
741
+ "type": "text",
742
+ "text": "We define a set of intents and domains of focus, which are then provided to users, along with clear instructions on the type of prompts they should construct. More details about the benchmark will be released soon.",
743
+ "bbox": [
744
+ 169,
745
+ 818,
746
+ 823,
747
+ 859
748
+ ],
749
+ "page_idx": 5
750
+ },
751
+ {
752
+ "type": "text",
753
+ "text": "Along with Airavata, we also evaluate ChatGPT (OpenAI, 2022), GPT-4 (Achiam et al., 2023) and BactrianX-llama-7B (Li et al., 2023) models for the same abilities. BactrianX-llama-7B is an instructed fine-tuned model for Hindi directly fine-tuned with the Llama base model on a multilingual instruction dataset. This multilingual instruction dataset consisted of machine-translated instructions",
754
+ "bbox": [
755
+ 169,
756
+ 867,
757
+ 823,
758
+ 925
759
+ ],
760
+ "page_idx": 5
761
+ },
762
+ {
763
+ "type": "header",
764
+ "text": "Technical Report",
765
+ "bbox": [
766
+ 173,
767
+ 32,
768
+ 287,
769
+ 47
770
+ ],
771
+ "page_idx": 5
772
+ },
773
+ {
774
+ "type": "page_number",
775
+ "text": "6",
776
+ "bbox": [
777
+ 493,
778
+ 948,
779
+ 503,
780
+ 959
781
+ ],
782
+ "page_idx": 5
783
+ },
784
+ {
785
+ "type": "table",
786
+ "img_path": "images/e950cfc05027c6c177de241a696a8cc75585ed9d5025abdf888636e100de785f.jpg",
787
+ "table_caption": [],
788
+ "table_footnote": [],
789
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Metric</td><td colspan=\"3\">1-shot</td></tr><tr><td>OpenHathi</td><td>Llama 2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>Indic QA (No Context)</td><td>F1</td><td>17</td><td>4.58</td><td>21.01</td></tr><tr><td>Indic QA (With Context)</td><td>F1</td><td>20.69</td><td>19.59</td><td>37.69</td></tr><tr><td rowspan=\"2\">Indic Headline</td><td>Rouge L</td><td>11.26</td><td>23.06</td><td>12.32</td></tr><tr><td>BLEURT</td><td>0.4682</td><td>0.4590</td><td>0.3793</td></tr><tr><td rowspan=\"2\">IndicWikiBio</td><td>Rouge L</td><td>20.45</td><td>41.01</td><td>10.66</td></tr><tr><td>BLEURT</td><td>0.5185</td><td>0.6415</td><td>0.4279</td></tr></table>",
790
+ "bbox": [
791
+ 254,
792
+ 99,
793
+ 743,
794
+ 268
795
+ ],
796
+ "page_idx": 6
797
+ },
798
+ {
799
+ "type": "table",
800
+ "img_path": "images/32bea75ef10c1b83524badb70bbdd68e7d9d44b7120594f92968384f2309ec6e.jpg",
801
+ "table_caption": [
802
+ "Table 6: F1, Rouge L and BLEURT scores on Indic NLG tasks"
803
+ ],
804
+ "table_footnote": [],
805
+ "table_body": "<table><tr><td>AbilityName</td><td>Ability</td></tr><tr><td>Long</td><td>Ability to generate long-form text like writing essays, speeches, reports, etc.</td></tr><tr><td>Fact-Ops</td><td>Ability to give factual opinions and explanations like seeking recommendations, seeking advice, opinions, explanations, etc.</td></tr><tr><td>Content</td><td>Ability to make content accessible like summarizations, layman explanations, etc</td></tr><tr><td>Lang-Creativity</td><td>Ability to be creative in language like finding anagrams, rhyming words, vocabulary enhancement, etc</td></tr><tr><td>Culture</td><td>Ability to answer questions related to Indian Culture.</td></tr></table>",
806
+ "bbox": [
807
+ 200,
808
+ 311,
809
+ 797,
810
+ 468
811
+ ],
812
+ "page_idx": 6
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "from Alpaca (Taori et al., 2023) and Dolly (Conover et al., 2023) datasets, followed by response generation from ChatGPT (OpenAI, 2022). Annotators were presented with a prompt and a randomly selected response from any of the models and were instructed to provide ratings based on the metrics outlined in Table 8.",
817
+ "bbox": [
818
+ 169,
819
+ 523,
820
+ 823,
821
+ 580
822
+ ],
823
+ "page_idx": 6
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "In addition to the above metrics, we also ask the human evaluators to give a final score on the likert scale of 1 to 5, reflecting their overall satisfaction with the response.",
828
+ "bbox": [
829
+ 169,
830
+ 585,
831
+ 823,
832
+ 616
833
+ ],
834
+ "page_idx": 6
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "We sample a set of 50 prompts $^{8}$ covering various intents and domains (more details about the benchmark will be made available soon) and generate the responses from all three models. The prompt-response pairs were shuffled randomly and anonymized, ensuring no indication of the generating model, before being presented to the annotators for evaluation. Annotators were instructed to assess responses strictly adhering to the aforementioned rubrics. We report the various results in Figures 3 to 5.",
839
+ "bbox": [
840
+ 169,
841
+ 621,
842
+ 823,
843
+ 704
844
+ ],
845
+ "page_idx": 6
846
+ },
847
+ {
848
+ "type": "table",
849
+ "img_path": "images/3638df67ecb9f8042edc6dc1684ab122271ee5e2d6466c20e8e200039851d6a4.jpg",
850
+ "table_caption": [
851
+ "Table 7: Description of abilities to test through human evaluation"
852
+ ],
853
+ "table_footnote": [],
854
+ "table_body": "<table><tr><td>Metric</td><td>Details</td><td>Range</td></tr><tr><td>IFA: Instruction Following Ability</td><td>This assesses the model&#x27;s ability to accurately and effectively follow the instructions provided in the prompt</td><td>0-2</td></tr><tr><td>CNS: Closeness to Native Speaker</td><td>This assesses how naturally and fluently the model&#x27;s responses align with the way a native Hindi speaker would express the same ideas.</td><td>0-2</td></tr><tr><td>CQ: Content Quality</td><td>This evaluates the response in terms of its factual accuracy, logical flow of ideas, and overall informational relevance.</td><td>0-2</td></tr></table>",
855
+ "bbox": [
856
+ 202,
857
+ 763,
858
+ 795,
859
+ 896
860
+ ],
861
+ "page_idx": 6
862
+ },
863
+ {
864
+ "type": "text",
865
+ "text": "Table 8: Rubrics for Human Evaluation",
866
+ "bbox": [
867
+ 366,
868
+ 905,
869
+ 629,
870
+ 919
871
+ ],
872
+ "page_idx": 6
873
+ },
874
+ {
875
+ "type": "header",
876
+ "text": "Technical Report",
877
+ "bbox": [
878
+ 173,
879
+ 32,
880
+ 287,
881
+ 47
882
+ ],
883
+ "page_idx": 6
884
+ },
885
+ {
886
+ "type": "page_footnote",
887
+ "text": "<sup>8</sup>https://huggingface.co/datasets/ai4bharat/human-eval",
888
+ "bbox": [
889
+ 191,
890
+ 720,
891
+ 660,
892
+ 736
893
+ ],
894
+ "page_idx": 6
895
+ },
896
+ {
897
+ "type": "page_number",
898
+ "text": "7",
899
+ "bbox": [
900
+ 493,
901
+ 948,
902
+ 503,
903
+ 959
904
+ ],
905
+ "page_idx": 6
906
+ },
907
+ {
908
+ "type": "image",
909
+ "img_path": "images/f07c063993fe40e386f05c63df65599bba2a0b77988f181fed2f931340669119.jpg",
910
+ "image_caption": [
911
+ "Figure 3: Average satisfaction scores for various models based on a Likert scale between 1 and 5 reported by Human annotators."
912
+ ],
913
+ "image_footnote": [],
914
+ "bbox": [
915
+ 192,
916
+ 114,
917
+ 803,
918
+ 396
919
+ ],
920
+ "page_idx": 7
921
+ },
922
+ {
923
+ "type": "image",
924
+ "img_path": "images/1e3ded6752c6ac81488037efdade9003491b8974f7d64b4bf6441d265e70b391.jpg",
925
+ "image_caption": [
926
+ "Figure 4: Human evaluation scores for assessing the instruction following and content generation abilities of the models based on the rubrics described in Table 8."
927
+ ],
928
+ "image_footnote": [],
929
+ "bbox": [
930
+ 192,
931
+ 486,
932
+ 805,
933
+ 767
934
+ ],
935
+ "page_idx": 7
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "Our observations suggest that while Airavata still trails significantly behind GPT-4 in terms of its ability to follow instructions and the quality of its content, it performs relatively better when generating natural-sounding Hindi content compared to both GPT-4 and ChatGPT. Notably, Airavata outperforms the BactrianX-llama-7B model by a significant margin. This difference in performance can be attributed to a lack of vocabulary expansion in BactrainX-llama-7B to accommodate addi",
940
+ "bbox": [
941
+ 169,
942
+ 853,
943
+ 823,
944
+ 925
945
+ ],
946
+ "page_idx": 7
947
+ },
948
+ {
949
+ "type": "header",
950
+ "text": "Technical Report",
951
+ "bbox": [
952
+ 173,
953
+ 32,
954
+ 289,
955
+ 47
956
+ ],
957
+ "page_idx": 7
958
+ },
959
+ {
960
+ "type": "page_number",
961
+ "text": "8",
962
+ "bbox": [
963
+ 493,
964
+ 948,
965
+ 503,
966
+ 959
967
+ ],
968
+ "page_idx": 7
969
+ },
970
+ {
971
+ "type": "image",
972
+ "img_path": "images/8c636034a48a07fbc7c96221dbfc14df8852b0d65871bbd08e2e77019801c865.jpg",
973
+ "image_caption": [
974
+ "Figure 5: Fine-grained human evaluation of content generation abilities of the models described in Table 7."
975
+ ],
976
+ "image_footnote": [],
977
+ "bbox": [
978
+ 191,
979
+ 116,
980
+ 803,
981
+ 396
982
+ ],
983
+ "page_idx": 8
984
+ },
985
+ {
986
+ "type": "text",
987
+ "text": "tional Hindi tokens and its lack of continual pre-training in Hindi. Furthermore, BactrainX-llama-7B may be trained on a lower-quality dataset for instruction tuning that was completely generated using ChatGPT. The OpenHathi model and Airavata, however, have addressed these shortcomings and show substantial improvements over the BactrainX-llama-7B model. We discuss these performance trends across various capabilities in more detail below.",
988
+ "bbox": [
989
+ 169,
990
+ 478,
991
+ 823,
992
+ 551
993
+ ],
994
+ "page_idx": 8
995
+ },
996
+ {
997
+ "type": "text",
998
+ "text": "The findings indicate that amongst all abilities, Airavata particularly excels in providing factual opinions and explanations, as demonstrated by the earlier examples. However, the model struggles with tasks demanding creative language usage, as our SFT data lacks components emphasizing creativity. When comparing GPT-4 and ChatGPT (GPT-3.5) performance, GPT-4 generally surpasses the latter in knowledge-intensive or creativity-demanding tasks. Nevertheless, it's noteworthy that ChatGPT either outperforms or is comparable in tasks emphasizing language generation, such as long-form content creation, offering factual opinions, and ensuring content accessibility.",
999
+ "bbox": [
1000
+ 169,
1001
+ 555,
1002
+ 823,
1003
+ 654
1004
+ ],
1005
+ "page_idx": 8
1006
+ },
1007
+ {
1008
+ "type": "text",
1009
+ "text": "We acknowledge that our current human evaluation is not robust and thorough due to the limited number of prompts in our set and each prompt and response pair being evaluated by only one annotator. However, our evaluation still offers valuable preliminary insights that will inform our efforts to improve the model. Expanding the instruction dataset to include content covering a diverse range of abilities can help improve the model's capabilities. At the same time, it is important to acknowledge that a significant portion of knowledge stems from English, which possesses the most extensive knowledge repository. Therefore, achieving better cross-lingual alignment between Hindi and English representations is essential for accurately answering factual questions and minimizing erroneous responses.",
1010
+ "bbox": [
1011
+ 169,
1012
+ 659,
1013
+ 826,
1014
+ 787
1015
+ ],
1016
+ "page_idx": 8
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "text": "6 TOXICITY AND MISINFORMATION",
1021
+ "text_level": 1,
1022
+ "bbox": [
1023
+ 171,
1024
+ 806,
1025
+ 488,
1026
+ 823
1027
+ ],
1028
+ "page_idx": 8
1029
+ },
1030
+ {
1031
+ "type": "text",
1032
+ "text": "We evaluate Airavata, OpenHathi, and Llama2-7B models with publicly available benchmark datasets, in both 0-shot and 5-shot settings. Our evaluation provides insights into key dimensions for LM safety. Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection and we use its Hindi subset (Das et al., 2022) to evaluate and compare the performance of all models. We translate the TruthfulQA, Implicit Hate, and a human-evaluated subset of the Toxigen dataset, to Hindi. This subset of Toxigen has been denoised to retain instances that have annotation",
1033
+ "bbox": [
1034
+ 169,
1035
+ 839,
1036
+ 823,
1037
+ 925
1038
+ ],
1039
+ "page_idx": 8
1040
+ },
1041
+ {
1042
+ "type": "header",
1043
+ "text": "Technical Report",
1044
+ "bbox": [
1045
+ 173,
1046
+ 32,
1047
+ 289,
1048
+ 47
1049
+ ],
1050
+ "page_idx": 8
1051
+ },
1052
+ {
1053
+ "type": "page_number",
1054
+ "text": "9",
1055
+ "bbox": [
1056
+ 493,
1057
+ 948,
1058
+ 504,
1059
+ 959
1060
+ ],
1061
+ "page_idx": 8
1062
+ },
1063
+ {
1064
+ "type": "table",
1065
+ "img_path": "images/0eccaae0c067e57a3d5fb25f14e290109c2ba4c14174da5b649c336fbeeaccbb.jpg",
1066
+ "table_caption": [],
1067
+ "table_footnote": [],
1068
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Variant</td><td colspan=\"3\">0-Shot</td><td colspan=\"3\">5-Shot</td></tr><tr><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>Multilingual HateCheck</td><td>Hindi</td><td>70.15</td><td>70.24</td><td>70.24</td><td>70.15</td><td>70.24</td><td>70.25</td></tr><tr><td rowspan=\"2\">Implicit Hate</td><td>English</td><td>50.65</td><td>57.92</td><td>62.33</td><td>51.41</td><td>65.02</td><td>62.44</td></tr><tr><td>Hindi (Translated)</td><td>52.45</td><td>53.21</td><td>61.15</td><td>49.99</td><td>52.98</td><td>58.84</td></tr><tr><td rowspan=\"2\">Toxigen (human evaluated)</td><td>English</td><td>44.91</td><td>83.35</td><td>78.63</td><td>42.71</td><td>66.34</td><td>72.24</td></tr><tr><td>Hindi (Translated)</td><td>47.75</td><td>83.97</td><td>78.56</td><td>42.83</td><td>73.20</td><td>74.80</td></tr><tr><td rowspan=\"2\">TruthfulQA (averaged MC1 &amp; MC2)</td><td>English</td><td>30.72</td><td>37.25</td><td>33.60</td><td>30.72</td><td>37.25</td><td>33.64</td></tr><tr><td>Hindi (Translated)</td><td>34.31</td><td>35.66</td><td>35.32</td><td>34.31</td><td>35.66</td><td>35.32</td></tr></table>",
1069
+ "bbox": [
1070
+ 171,
1071
+ 99,
1072
+ 826,
1073
+ 227
1074
+ ],
1075
+ "page_idx": 9
1076
+ },
1077
+ {
1078
+ "type": "text",
1079
+ "text": "Table 9: Accuracy on hate and toxicity identification, and answering factual questions.",
1080
+ "bbox": [
1081
+ 212,
1082
+ 236,
1083
+ 781,
1084
+ 252
1085
+ ],
1086
+ "page_idx": 9
1087
+ },
1088
+ {
1089
+ "type": "text",
1090
+ "text": "agreement from all annotators (Hosseini et al., 2023). While the Implicit Hate dataset (Hosseini et al., 2023) helps evaluate the model performance on detecting the subtle and implicit forms of hate speech, human-evaluated Toxigen data contains instances that are directed towards various demographics. We evaluate the model performance on detection of toxicity in these three datasets, and their translated instances using the accuracy metric. Further, for evaluating the model's capability towards answering factual questions, we use the TruthfulQA dataset (Lin et al., 2022) which contains multiple choice questions which are factual and can mimic common human falsehoods.",
1091
+ "bbox": [
1092
+ 169,
1093
+ 280,
1094
+ 826,
1095
+ 378
1096
+ ],
1097
+ "page_idx": 9
1098
+ },
1099
+ {
1100
+ "type": "text",
1101
+ "text": "Given the accuracy scores from our evaluation, in Table 9, Airavata is able to detect openly expressed hate in Hindi statements from MHC with an accuracy similar to the other two models, with similar performance in both 0- and 5-shot settings. On the more challenging instances which contain implicitly veiled hate speech, Airavata is able to identify hate with significantly better accuracy than the other two models within the translated Hindi instances. On the original Implicit Hate dataset, Llama2-7B seems to perform better, given a few examples. On the Translated Toxigen subset, Llama2-7B is able to detect targeted toxic instances against certain demographics with the highest accuracy among all three models. However, given a few examples, we observe a significant performance dip for Llama2-7B and Airavata outperforms it marginally. We observe similar performance on the original English dataset and note that Airavata is better at detecting targeted hate in Hindi, as compared to implicitly veiled hate speech. Its performance at detecting targeted hate is surprisingly better than detecting openly expressed hate speech from MHC. On the TruthfulQA dataset, in both 0- and 5-shot settings, Llama2-7B outperforms OpenHathi and Airavata. On the translated TruthfulQA data, a marginal dip in the performance can be observed which indicates that we need further investigation into the model's capability for generating misinformation.",
1102
+ "bbox": [
1103
+ 169,
1104
+ 385,
1105
+ 826,
1106
+ 594
1107
+ ],
1108
+ "page_idx": 9
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "Overall, these results may suggest that LLMs are able to identify toxicity and hateful speech, we think that further investigation is needed to evaluate toxicity and the presence of social biases within the content generated by LLMs. In the future, we plan to investigate additional existing benchmarks and novel evaluation measures to test LLMs for content safety and reliability.",
1113
+ "bbox": [
1114
+ 169,
1115
+ 599,
1116
+ 825,
1117
+ 657
1118
+ ],
1119
+ "page_idx": 9
1120
+ },
1121
+ {
1122
+ "type": "text",
1123
+ "text": "7 RESOURCES",
1124
+ "text_level": 1,
1125
+ "bbox": [
1126
+ 171,
1127
+ 679,
1128
+ 308,
1129
+ 694
1130
+ ],
1131
+ "page_idx": 9
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "You can find all the details about the project in this section. We release the following resources to facilitate further research in instruction-tuning for Indian language LLMs.",
1136
+ "bbox": [
1137
+ 169,
1138
+ 710,
1139
+ 823,
1140
+ 742
1141
+ ],
1142
+ "page_idx": 9
1143
+ },
1144
+ {
1145
+ "type": "list",
1146
+ "sub_type": "text",
1147
+ "list_items": [
1148
+ "- Airavata Github Repository (for fine-tuning and evaluation)",
1149
+ "Airavata SFT Dataset Collection: Huggingface",
1150
+ "- Airavata Evaluation Dataset Collection: Huggingface"
1151
+ ],
1152
+ "bbox": [
1153
+ 171,
1154
+ 753,
1155
+ 578,
1156
+ 811
1157
+ ],
1158
+ "page_idx": 9
1159
+ },
1160
+ {
1161
+ "type": "text",
1162
+ "text": "8 SUMMARY AND FUTURE OUTLOOK",
1163
+ "text_level": 1,
1164
+ "bbox": [
1165
+ 171,
1166
+ 834,
1167
+ 501,
1168
+ 849
1169
+ ],
1170
+ "page_idx": 9
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "text": "We release Airavata, an open-source, instruction-tuned Hindi model demonstrating promising performance across various tasks compared to existing open-source models. We provide both datasets and models to facilitate further research in improving Hindi LLMs. This marks an initial stride towards developing high-quality, open-source LLMs for Indian languages that require multi-faceted",
1175
+ "bbox": [
1176
+ 169,
1177
+ 867,
1178
+ 823,
1179
+ 925
1180
+ ],
1181
+ "page_idx": 9
1182
+ },
1183
+ {
1184
+ "type": "header",
1185
+ "text": "Technical Report",
1186
+ "bbox": [
1187
+ 173,
1188
+ 32,
1189
+ 287,
1190
+ 47
1191
+ ],
1192
+ "page_idx": 9
1193
+ },
1194
+ {
1195
+ "type": "page_number",
1196
+ "text": "10",
1197
+ "bbox": [
1198
+ 490,
1199
+ 946,
1200
+ 509,
1201
+ 959
1202
+ ],
1203
+ "page_idx": 9
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "text": "efforts ranging from the creation of extensive pre-training datasets, diverse instruction tuning datasets, and high-quality foundational models.",
1208
+ "bbox": [
1209
+ 174,
1210
+ 103,
1211
+ 823,
1212
+ 132
1213
+ ],
1214
+ "page_idx": 10
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "text": "9 LIMITATIONS",
1219
+ "text_level": 1,
1220
+ "bbox": [
1221
+ 174,
1222
+ 152,
1223
+ 315,
1224
+ 167
1225
+ ],
1226
+ "page_idx": 10
1227
+ },
1228
+ {
1229
+ "type": "text",
1230
+ "text": "Airavata, like other LLMs, faces common challenges such as potential hallucinations, resulting in the generation of fabricated information, and may struggle with accuracy in complex or specialized topics. There is also a risk of generating objectionable or biased content. Its understanding of cultural nuances and effectiveness in mixed-language contexts may be constrained. Furthermore, the model's performance is closely tied to the quality and scope of its training data, influencing its efficacy and reliability. It is crucial to note that this model is designed for research purposes and is not recommended for any production use-cases.",
1231
+ "bbox": [
1232
+ 174,
1233
+ 183,
1234
+ 823,
1235
+ 281
1236
+ ],
1237
+ "page_idx": 10
1238
+ },
1239
+ {
1240
+ "type": "header",
1241
+ "text": "Technical Report",
1242
+ "bbox": [
1243
+ 173,
1244
+ 32,
1245
+ 287,
1246
+ 47
1247
+ ],
1248
+ "page_idx": 10
1249
+ },
1250
+ {
1251
+ "type": "page_number",
1252
+ "text": "11",
1253
+ "bbox": [
1254
+ 491,
1255
+ 948,
1256
+ 506,
1257
+ 959
1258
+ ],
1259
+ "page_idx": 10
1260
+ },
1261
+ {
1262
+ "type": "text",
1263
+ "text": "REFERENCES",
1264
+ "text_level": 1,
1265
+ "bbox": [
1266
+ 174,
1267
+ 102,
1268
+ 285,
1269
+ 116
1270
+ ],
1271
+ "page_idx": 11
1272
+ },
1273
+ {
1274
+ "type": "list",
1275
+ "sub_type": "ref_text",
1276
+ "list_items": [
1277
+ "Josh Achiam, Steven Adler, Sandhini Agarwal, et al. Gpt-4 technical report. 2023. URL https://api-semanticscholar.org/CorpusID:257532815.",
1278
+ "Kabir Ahuja, Harshita Diddee, Rishav Hada, Millicent Ochieng, Krithika Ramesh, Prachi Jain, Akshay Nambi, Tanuja Ganu, Sameer Segal, Mohamed Ahmed, Kalika Bali, and Sunayana Sitaram. MEGA: Multilingual evaluation of generative AI. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4232-4267, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.258. URL https://aclanthology.org/2023.emnlp-main.258.",
1279
+ "Yuntao Bai, Andy Jones, Kamal Ndousse, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv: 2204.05862, 2022.",
1280
+ "James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, Wesam Manassra, Prafulla Dhariwal, Casey Chu, Yunxin Jiao, and Aditya Ramesh. Improving image generation with better captions, October 2023. URL https://cdn.openai.com/papers/dall-e-3.pdf.",
1281
+ "Hyung Won Chung, Le Hou, Shayne Longpre, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv: 2210.11416, 2022.",
1282
+ "Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. *BoolQ: Exploring the surprising difficulty of natural yes/no questions.* In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300.",
1283
+ "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv preprint arXiv: 1803.05457, 2018.",
1284
+ "Together Computer. Redpajama: an open dataset for training large language models, October 2023. URL https://github.com/togethercomputer/RedPajama-Data.",
1285
+ "Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm.",
1286
+ "Marta R. Costa-jussa, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv: 2207.04672, 2022.",
1287
+ "Mithun Das, Punyajoy Saha, Binny Mathew, and Animesh Mukherjee. HateCheckHIn: Evaluating Hindi hate speech detection models. In Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Jan Odijk, and Stelios Piperidis (eds.), Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5378-5387, Marseille, France, June 2022. European Language Resources Association. URL https://aclanthology.org/2022.lrec-1.575."
1288
+ ],
1289
+ "bbox": [
1290
+ 174,
1291
+ 127,
1292
+ 825,
1293
+ 924
1294
+ ],
1295
+ "page_idx": 11
1296
+ },
1297
+ {
1298
+ "type": "header",
1299
+ "text": "Technical Report",
1300
+ "bbox": [
1301
+ 173,
1302
+ 32,
1303
+ 287,
1304
+ 47
1305
+ ],
1306
+ "page_idx": 11
1307
+ },
1308
+ {
1309
+ "type": "page_number",
1310
+ "text": "12",
1311
+ "bbox": [
1312
+ 490,
1313
+ 946,
1314
+ 508,
1315
+ 959
1316
+ ],
1317
+ "page_idx": 11
1318
+ },
1319
+ {
1320
+ "type": "list",
1321
+ "sub_type": "ref_text",
1322
+ "list_items": [
1323
+ "Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.",
1324
+ "Sumanth Doddapaneni, Rahul Aralikatte, Gowtham Ramesh, Shreyansh Goyal, Mitesh M. Khapra, Anoop Kunchukuttan, and Pratyush Kumar. Towards leaving no indic language behind: Building monolingual corpora, benchmark and models for indic languages. Annual Meeting of the Association for Computational Linguistics, 2022. doi: 10.18653/v1/2023.acl-long.693.",
1325
+ "Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2023.",
1326
+ "Jay Gala, Pranjal A Chitale, A K Raghavan, Varun Gumm, Sumanth Doddapaneni, Aswanth Kumar M, Janki Atul Nawale, Anupama Sujatha, Ratish Puduppully, Vivek Raghavan, Pratyush Kumar, Mitesh M Khapra, Raj Dabre, and Anoop Kunchukuttan. Indictrans2: Towards high-quality and accessible machine translation models for all 22 scheduled indian languages. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=vfT4YuzAYA.",
1327
+ "Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The flores-101 evaluation benchmark for low-resource and multilingual machine translation. 2021.",
1328
+ "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ.",
1329
+ "Saghar Hosseini, Hamid Palangi, and Ahmed Hassan Awadallah. An empirical study of metrics to measure representational harms in pre-trained language models. arXiv preprint arXiv:2301.09211, 2023.",
1330
+ "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9.",
1331
+ "Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, Xian Li, Brian O'Horo, Gabriel Pereyra, Jeff Wang, Christopher Dewan, Asli Celikyilmaz, Luke Zettlemoyer, and Ves Stoyanov. Optiml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv: 2212.12017, 2022.",
1332
+ "Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, et al. Mistral 7b. arXiv preprint arXiv: 2310.06825, 2023.",
1333
+ "Mario Michael Krell, Matej Kosec, Sergio P. Perez, and Andrew William Fitzgibbon. Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance, 2023. URL https://openreview.net/forum?id=ZAzSf9pzCm.",
1334
+ "Aman Kumar, Himani Shrotriya, Prachi Sahu, Amogh Mishra, Raj Dabre, Ratish Puduppully, Anoop Kunchukuttan, Mitesh M. Khapra, and Pratyush Kumar. IndicNLG benchmark: Multilingual datasets for diverse NLG tasks in Indic languages. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5363-5394, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.360. URL https://aclanthology.org/2022.emnlp-main.360.",
1335
+ "Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi-Rui Tam, Keith Stevens, Abdullah Barhoum, Nguyen Minh Duc, Oliver Stanley, Richard Nagyfi, Shahul ES, Sameer Suri,"
1336
+ ],
1337
+ "bbox": [
1338
+ 171,
1339
+ 102,
1340
+ 825,
1341
+ 924
1342
+ ],
1343
+ "page_idx": 12
1344
+ },
1345
+ {
1346
+ "type": "header",
1347
+ "text": "Technical Report",
1348
+ "bbox": [
1349
+ 173,
1350
+ 32,
1351
+ 287,
1352
+ 47
1353
+ ],
1354
+ "page_idx": 12
1355
+ },
1356
+ {
1357
+ "type": "page_number",
1358
+ "text": "13",
1359
+ "bbox": [
1360
+ 490,
1361
+ 948,
1362
+ 506,
1363
+ 959
1364
+ ],
1365
+ "page_idx": 12
1366
+ },
1367
+ {
1368
+ "type": "list",
1369
+ "sub_type": "ref_text",
1370
+ "list_items": [
1371
+ "David Glushkov, Arnav Dantuluri, Andrew Maguire, Christoph Schuhmann, Huu Nguyen, and Alexander Mattick. Openassistant conversations - democratizing large language model alignment. arXiv preprint arXiv: 2304.07327, 2023.",
1372
+ "Haonan Li, Fajri Koto, Minghao Wu, Alham Fikri Aji, and Timothy Baldwin. Bacterian-x: Multilingual replicable instruction-following models with low-rank adaptation. arXiv preprint arXiv: 2305.15011, 2023.",
1373
+ "Stephanie Lin, Jacob Hilton, and Owain Evans. TruthfulQA: Measuring how models mimic human falsehoods. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3214-3252, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.229. URL https://aclanthology.org/2022.acl-1.ong.229.",
1374
+ "S. Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning. International Conference on Machine Learning, 2023. doi: 10.4855/50/arXiv.2301.13688.",
1375
+ "OpenAI. Gpt3.5, November 2022. URL https://platform.openai.com/docs/model_s/gpt-3-5.",
1376
+ "Maja Popovic. $\\mathrm{chrF}++$ : words helping character n-grams. In Proceedings of the Second Conference on Machine Translation, pp. 612-618, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/W17-4770. URL https://aclanthology.org/w17-4770.",
1377
+ "Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pp. 8732-8740. AAAI Press, 2020. doi: 10.1609/AAAI.V34I05.6399. URL https://doi.org/10.1609/aaai.v34i05.6399.",
1378
+ "SarvamAI. Openhathi series: An approach to build bilingual llms frugally, December 2023. URL https://www.sarvam.ai/blog/announcing-openhathi-series.",
1379
+ "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.",
1380
+ "Hugo Touvron, Louis Martin, Kevin Stone, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv: 2307.09288, 2023.",
1381
+ "Lewis Tunstall, Edward Beeching, Nathan Lambert, Nazneen Rajani, Kashif Rasul, Younes Belkada, Shengyi Huang, Leandro von Werra, Clémentine Fourrier, Nathan Habib, Nathan Sarrazin, Omar Sanseviero, Alexander M. Rush, and Thomas Wolf. Zephyr: Direct distillation of lm alignment. arXiv preprint arXiv: 2310.16944, 2023.",
1382
+ "Xiangpeng Wei, Haoran Wei, Huan Lin, Tianhao Li, Pei Zhang, Xingzhang Ren, Mei Li, Yu Wan, Zhiwei Cao, Binbin Xie, Tianxiang Hu, Shangjie Li, Binyuan Hui, Bowen Yu, Dayiheng Liu, Baosong Yang, Fei Huang, and Jun Xie. Polym: An open source polyglot large language model. arXiv preprint arXiv: 2307.06018, 2023.",
1383
+ "Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. HellaSwag: Can a machine really finish your sentence? In Anna Korhonen, David Traum, and Lluis Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 4791-4800, Florence, Italy, jul 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1472. URL https://aclanthology.org/P19-1472."
1384
+ ],
1385
+ "bbox": [
1386
+ 171,
1387
+ 103,
1388
+ 825,
1389
+ 924
1390
+ ],
1391
+ "page_idx": 13
1392
+ },
1393
+ {
1394
+ "type": "header",
1395
+ "text": "Technical Report",
1396
+ "bbox": [
1397
+ 173,
1398
+ 32,
1399
+ 287,
1400
+ 47
1401
+ ],
1402
+ "page_idx": 13
1403
+ },
1404
+ {
1405
+ "type": "page_number",
1406
+ "text": "14",
1407
+ "bbox": [
1408
+ 490,
1409
+ 948,
1410
+ 508,
1411
+ 959
1412
+ ],
1413
+ "page_idx": 13
1414
+ },
1415
+ {
1416
+ "type": "list",
1417
+ "sub_type": "ref_text",
1418
+ "list_items": [
1419
+ "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric. P Xing, Joseph E. Gonzalez, Ion Stoica, and Hao Zhang. Lmsys-chat-1m: A large-scale real-world llm conversation dataset, 2023a.",
1420
+ "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv: 2306.05685, 2023b."
1421
+ ],
1422
+ "bbox": [
1423
+ 171,
1424
+ 102,
1425
+ 823,
1426
+ 210
1427
+ ],
1428
+ "page_idx": 14
1429
+ },
1430
+ {
1431
+ "type": "header",
1432
+ "text": "Technical Report",
1433
+ "bbox": [
1434
+ 173,
1435
+ 32,
1436
+ 287,
1437
+ 47
1438
+ ],
1439
+ "page_idx": 14
1440
+ },
1441
+ {
1442
+ "type": "page_number",
1443
+ "text": "15",
1444
+ "bbox": [
1445
+ 491,
1446
+ 946,
1447
+ 506,
1448
+ 959
1449
+ ],
1450
+ "page_idx": 14
1451
+ },
1452
+ {
1453
+ "type": "text",
1454
+ "text": "A EXAMPLES",
1455
+ "text_level": 1,
1456
+ "bbox": [
1457
+ 171,
1458
+ 102,
1459
+ 305,
1460
+ 118
1461
+ ],
1462
+ "page_idx": 15
1463
+ },
1464
+ {
1465
+ "type": "text",
1466
+ "text": "In this section, we show example generations from Airavata. Note that the model's outputs in examples #A5 and #A6 did not contain newlines between bullet points. They have been added here for readability.",
1467
+ "bbox": [
1468
+ 169,
1469
+ 133,
1470
+ 826,
1471
+ 176
1472
+ ],
1473
+ "page_idx": 15
1474
+ },
1475
+ {
1476
+ "type": "text",
1477
+ "text": "A.1 EXAMPLES WHERE THE AIRAVATA MODEL GENERATES GOOD OUTPUT",
1478
+ "text_level": 1,
1479
+ "bbox": [
1480
+ 171,
1481
+ 193,
1482
+ 705,
1483
+ 208
1484
+ ],
1485
+ "page_idx": 15
1486
+ },
1487
+ {
1488
+ "type": "text",
1489
+ "text": "Airavata excels in specific content generation tasks, such as soliciting mail drafts (#A1) and addressing general how-to inquiries (#A5, #A6). Furthermore, it demonstrates proficiency in adhering to instructions related to output structure (#A3 and #A5).",
1490
+ "bbox": [
1491
+ 169,
1492
+ 218,
1493
+ 823,
1494
+ 262
1495
+ ],
1496
+ "page_idx": 15
1497
+ },
1498
+ {
1499
+ "type": "text",
1500
+ "text": "Example #A1",
1501
+ "text_level": 1,
1502
+ "bbox": [
1503
+ 204,
1504
+ 279,
1505
+ 287,
1506
+ 292
1507
+ ],
1508
+ "page_idx": 15
1509
+ },
1510
+ {
1511
+ "type": "text",
1512
+ "text": "Prompt",
1513
+ "bbox": [
1514
+ 204,
1515
+ 306,
1516
+ 253,
1517
+ 319
1518
+ ],
1519
+ "page_idx": 15
1520
+ },
1521
+ {
1522
+ "type": "text",
1523
+ "text": "Jijjya gahak k oAp ane aodir me dere k aanu b h, uak ef k l eep k aok maki n k r t k s ka saamahana ka saamahanaon ho gayai",
1524
+ "bbox": [
1525
+ 200,
1526
+ 329,
1527
+ 790,
1528
+ 358
1529
+ ],
1530
+ "page_idx": 15
1531
+ },
1532
+ {
1533
+ "type": "text",
1534
+ "text": "Model Response",
1535
+ "bbox": [
1536
+ 202,
1537
+ 371,
1538
+ 310,
1539
+ 383
1540
+ ],
1541
+ "page_idx": 15
1542
+ },
1543
+ {
1544
+ "type": "text",
1545
+ "text": "[əaahekkkaanam],1",
1546
+ "bbox": [
1547
+ 204,
1548
+ 395,
1549
+ 323,
1550
+ 407
1551
+ ],
1552
+ "page_idx": 15
1553
+ },
1554
+ {
1555
+ "type": "text",
1556
+ "text": "Muzhe A 1",
1557
+ "bbox": [
1558
+ 204,
1559
+ 409,
1560
+ 790,
1561
+ 449
1562
+ ],
1563
+ "page_idx": 15
1564
+ },
1565
+ {
1566
+ "type": "text",
1567
+ "text": "M 1",
1568
+ "bbox": [
1569
+ 204,
1570
+ 449,
1571
+ 789,
1572
+ 474
1573
+ ],
1574
+ "page_idx": 15
1575
+ },
1576
+ {
1577
+ "type": "text",
1578
+ "text": "I 1",
1579
+ "bbox": [
1580
+ 204,
1581
+ 474,
1582
+ 789,
1583
+ 501
1584
+ ],
1585
+ "page_idx": 15
1586
+ },
1587
+ {
1588
+ "type": "text",
1589
+ "text": "Aapokek 8eay 4r samnag k e liy gthnyalad | m y aepke k sakk ka m k rne k lai tay r h | i maanadarri so, [aapok",
1590
+ "bbox": [
1591
+ 204,
1592
+ 501,
1593
+ 789,
1594
+ 529
1595
+ ],
1596
+ "page_idx": 15
1597
+ },
1598
+ {
1599
+ "type": "text",
1600
+ "text": "Example #A2",
1601
+ "text_level": 1,
1602
+ "bbox": [
1603
+ 204,
1604
+ 561,
1605
+ 289,
1606
+ 574
1607
+ ],
1608
+ "page_idx": 15
1609
+ },
1610
+ {
1611
+ "type": "text",
1612
+ "text": "Prompt",
1613
+ "bbox": [
1614
+ 204,
1615
+ 588,
1616
+ 251,
1617
+ 599
1618
+ ],
1619
+ "page_idx": 15
1620
+ },
1621
+ {
1622
+ "type": "text",
1623
+ "text": "Bharat 5 m 2017-10-14 00:00 PM",
1624
+ "bbox": [
1625
+ 202,
1626
+ 609,
1627
+ 785,
1628
+ 627
1629
+ ],
1630
+ "page_idx": 15
1631
+ },
1632
+ {
1633
+ "type": "text",
1634
+ "text": "Model Response",
1635
+ "bbox": [
1636
+ 202,
1637
+ 638,
1638
+ 310,
1639
+ 652
1640
+ ],
1641
+ "page_idx": 15
1642
+ },
1643
+ {
1644
+ "type": "text",
1645
+ "text": "\"hmm, yah eek dvlvchysy mshavall he! harrat me bueo kco saammanit vayknti manan jalata he aor ahmanotar paur uoua oki aor ruky kivay a jalata he jab koeikc kofin ngnoy glo nana hohata he yaa janaonkarre lono hi ohti 1 ensai iessoliey hckovik buo aok aia an anduunbvat puiquyos se pairrit kivay gag he aor cngn bueumuluy sanygatni manana jalata he iisomai, buo te saaah baltvotl karat sammay sammamanpukr kwhvahar kcarnan aor unenke pere ro kooe jnejso jnevinm dshararot kauopmaogkkrna maana jalata h",
1646
+ "bbox": [
1647
+ 202,
1648
+ 662,
1649
+ 790,
1650
+ 720
1651
+ ],
1652
+ "page_idx": 15
1653
+ },
1654
+ {
1655
+ "type": "text",
1656
+ "text": "yah yad raynna a mahmehluyu g h k bueo k o aakar lao gne k e liy Maagvdvckk aor sahaahkarke k uqpe me darez qa jala he , aor anh aahit kkrne y a aapammaninit kkrne k bajay uonkn k sahlal a h letne k kdoishk kcrnni chaihy! yad keisni bujugne ne apakko kukcsh aunu cih t kaha yaaaske yzvalhacar mokcoe galtatki, lo zachit pratiokya de nana andr koytki co oek se sghaalnna ma hmluyu",
1657
+ "bbox": [
1658
+ 202,
1659
+ 729,
1660
+ 790,
1661
+ 784
1662
+ ],
1663
+ "page_idx": 15
1664
+ },
1665
+ {
1666
+ "type": "header",
1667
+ "text": "Technical Report",
1668
+ "bbox": [
1669
+ 173,
1670
+ 32,
1671
+ 289,
1672
+ 47
1673
+ ],
1674
+ "page_idx": 15
1675
+ },
1676
+ {
1677
+ "type": "page_number",
1678
+ "text": "16",
1679
+ "bbox": [
1680
+ 490,
1681
+ 946,
1682
+ 509,
1683
+ 960
1684
+ ],
1685
+ "page_idx": 15
1686
+ },
1687
+ {
1688
+ "type": "text",
1689
+ "text": "Example #A3",
1690
+ "text_level": 1,
1691
+ "bbox": [
1692
+ 202,
1693
+ 106,
1694
+ 287,
1695
+ 119
1696
+ ],
1697
+ "page_idx": 16
1698
+ },
1699
+ {
1700
+ "type": "text",
1701
+ "text": "Prompt",
1702
+ "text_level": 1,
1703
+ "bbox": [
1704
+ 204,
1705
+ 132,
1706
+ 253,
1707
+ 146
1708
+ ],
1709
+ "page_idx": 16
1710
+ },
1711
+ {
1712
+ "type": "text",
1713
+ "text": "M 5",
1714
+ "bbox": [
1715
+ 202,
1716
+ 155,
1717
+ 580,
1718
+ 172
1719
+ ],
1720
+ "page_idx": 16
1721
+ },
1722
+ {
1723
+ "type": "text",
1724
+ "text": "Model Response",
1725
+ "text_level": 1,
1726
+ "bbox": [
1727
+ 202,
1728
+ 185,
1729
+ 308,
1730
+ 198
1731
+ ],
1732
+ "page_idx": 16
1733
+ },
1734
+ {
1735
+ "type": "list",
1736
+ "sub_type": "text",
1737
+ "list_items": [
1738
+ "1. 安普宁斯 莱哈希耶夫 哈克列维诺夫",
1739
+ "2. 亞普耶夫·拉赫亚耶夫和克洛波,「普拉赫亚耶夫·克希耶夫」等与伊兹霍夫列夫",
1740
+ "3. 3.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18.19.20.21.22.23.24.25.26.27.28.29.30.31.32.33.34.35.36.37.38.39.40.41.42.43.44.45.46.47.48.49.50.51.52.53.54.55.56.57.58.59.60.61.62.63.64.65.66.67.68.69.70.71.72.73.74.75.76.77.78.79.80.81.82.83.84.85.86.87.88.89.90",
1741
+ "4. 亞普耶克阿雅基可郭可萨亚德明赫拉达",
1742
+ "5. 3"
1743
+ ],
1744
+ "bbox": [
1745
+ 205,
1746
+ 209,
1747
+ 488,
1748
+ 277
1749
+ ],
1750
+ "page_idx": 16
1751
+ },
1752
+ {
1753
+ "type": "text",
1754
+ "text": "Example #A4",
1755
+ "text_level": 1,
1756
+ "bbox": [
1757
+ 202,
1758
+ 311,
1759
+ 287,
1760
+ 325
1761
+ ],
1762
+ "page_idx": 16
1763
+ },
1764
+ {
1765
+ "type": "text",
1766
+ "text": "Prompt",
1767
+ "text_level": 1,
1768
+ "bbox": [
1769
+ 204,
1770
+ 339,
1771
+ 251,
1772
+ 352
1773
+ ],
1774
+ "page_idx": 16
1775
+ },
1776
+ {
1777
+ "type": "text",
1778
+ "text": "M' Aapaneon Samaay Pnaeehakn kaiiok kaike s uahar kakata ? Muzo aang aang budu d aand a and uankai y",
1779
+ "bbox": [
1780
+ 202,
1781
+ 362,
1782
+ 686,
1783
+ 378
1784
+ ],
1785
+ "page_idx": 16
1786
+ },
1787
+ {
1788
+ "type": "text",
1789
+ "text": "Model Response",
1790
+ "text_level": 1,
1791
+ "bbox": [
1792
+ 202,
1793
+ 391,
1794
+ 308,
1795
+ 404
1796
+ ],
1797
+ "page_idx": 16
1798
+ },
1799
+ {
1800
+ "type": "list",
1801
+ "sub_type": "text",
1802
+ "list_items": [
1803
+ "1. Aapnane kayai kao prashmikcata 4e: uun kaoyk co prashmikcata 4e jinhe puotarokne koi saabres Aichik Aovshakatla H e Aor us",
1804
+ "2. 8ek Aunusuui bi nana: Eek Aunusuui bi nana aei aor ues paar teke rere! yeh Aapokn OApneon saamay kai behltar pralxchnn karene anr Agan ne Ikghaikoi kpr aapat karrne me madud karega",
1805
+ "3. यान दिक्षत् रोत्ति काने अपिके पूर्थन को खँकर शोर और यपिके कासम पर यान को एविति करे",
1806
+ "4. 5eek le: Ninayimilit 5eek k eonene sre Aapokk o Rrnaa Kcarrne OoR Uyataadukk bane ruhno m Mada d mela",
1807
+ "5. AApunne AApunne kO PuRsKcUt kC: AApunne AApunne kO PuRsKcUt kC: AApunne AApunne kO Preririt rHnene O and AApunne Lkshy o Ko praput kKarne 6"
1808
+ ],
1809
+ "bbox": [
1810
+ 205,
1811
+ 415,
1812
+ 789,
1813
+ 521
1814
+ ],
1815
+ "page_idx": 16
1816
+ },
1817
+ {
1818
+ "type": "header",
1819
+ "text": "Technical Report",
1820
+ "bbox": [
1821
+ 173,
1822
+ 32,
1823
+ 289,
1824
+ 47
1825
+ ],
1826
+ "page_idx": 16
1827
+ },
1828
+ {
1829
+ "type": "page_number",
1830
+ "text": "17",
1831
+ "bbox": [
1832
+ 490,
1833
+ 946,
1834
+ 508,
1835
+ 959
1836
+ ],
1837
+ "page_idx": 16
1838
+ },
1839
+ {
1840
+ "type": "text",
1841
+ "text": "Example #A5",
1842
+ "text_level": 1,
1843
+ "bbox": [
1844
+ 204,
1845
+ 273,
1846
+ 287,
1847
+ 286
1848
+ ],
1849
+ "page_idx": 17
1850
+ },
1851
+ {
1852
+ "type": "text",
1853
+ "text": "Prompt",
1854
+ "bbox": [
1855
+ 204,
1856
+ 300,
1857
+ 251,
1858
+ 311
1859
+ ],
1860
+ "page_idx": 17
1861
+ },
1862
+ {
1863
+ "type": "text",
1864
+ "text": "Meneo abahot Jagah nioekr Re k Lepaia kay a par k h i sre rll a n hi Aya jisso muu o dipshan ho ra ha he m u",
1865
+ "bbox": [
1866
+ 202,
1867
+ 323,
1868
+ 790,
1869
+ 351
1870
+ ],
1871
+ "page_idx": 17
1872
+ },
1873
+ {
1874
+ "type": "text",
1875
+ "text": "Model Response",
1876
+ "text_level": 1,
1877
+ "bbox": [
1878
+ 204,
1879
+ 366,
1880
+ 308,
1881
+ 378
1882
+ ],
1883
+ "page_idx": 17
1884
+ },
1885
+ {
1886
+ "type": "text",
1887
+ "text": "yah h s u nak k d 1 k a aapok o aovsakad k a aamnana karana p a 20 raha 1 y aay aay r aon mahvcduup 1 k 4e 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1",
1888
+ "bbox": [
1889
+ 205,
1890
+ 388,
1891
+ 790,
1892
+ 417
1893
+ ],
1894
+ "page_idx": 17
1895
+ },
1896
+ {
1897
+ "type": "list",
1898
+ "sub_type": "text",
1899
+ "list_items": [
1900
+ "1. kivsiti vikiklaksak yahalahaahkar se batai kvr-kihmi maanansikk rakhayyepeshovar se batai kkrnna Aapvke Aabvshad kco prabhit kkrne ande hse kkm kranne ki dixhara m pahelaka kcdm ho saekrtala he! ve Aappki bi aabvana o kso samdaonr o ar uon parr kaabpu ane ke lieup euk k yagjbnanbgnane nme anapokki maadudk ckr sakotne",
1901
+ "2. Nijoyamitil vayayam: sharariyrik ganiitivichiy eodzifkni ndozicii, jra prakukirik mnnoads aabana ne vallr hote y 1 ayai t kek k 0eepananekevayayam jere chalnna, rerrnna yaa kuch hllkca kchvad bhi abvssadprkctatata khi bhavnna ka kcm kcrne me madud kkr sokklat",
1902
+ "3. Prukutit me saumay bhitatae: Prukutit me saumay bhitatae se t nannav ka kam karnrno and r kclvana k h aovnna ka kcbadva dne m madudmellu saokatni 1 Aap utahlne jia saksot e, Pcduln yaa kcr saksot e ya abangvani k a kam kcrke prukutit me apanri ruvi vikasisitkarakr saksot e",
1903
+ "4. 4",
1904
+ "5. 5",
1905
+ "6. Maiauokulneos Takaknienoks koi Auajmaa: cayan, gahri aas o ane k e aayaa y aogg sah it dihinna Maiauokulneos Taknaike Aarama kranne O and veatman n gn parr cyaan kckit kkrnr ne m madud kkr sakknti",
1906
+ "7. Aotam-TezBaal kA Othayasakr: PpAepnnt Nndi Leonna, sntuolilat Aahaar Zhaan aor Kxvshy Jiaovn ananay Rzne josii rsvb-TezBaal praohaoa o me shaimel hnoana aovshyok h! iusmae nan kcrnn, kvatab pao na y a Tlvi dezto u Naatla kkrna josoi choiote shaimel hio sakotni",
1907
+ "8. 8.",
1908
+ "9. Peshovkar Mavad Lef: yad Ad Aapokek Aevahsada k a Prabghun Karana buhit k dngata h, tio o parramshr prapat karene parr 1eckar k r Euk prakshikl mnnanisik k svalkhya pehovkar Aapokko mukacabalat lntn siai gne me madudkr saktata h aor Aapokko aokkak SakaikarLakmak mananisikkala vikciknilt k arer nme maudud k k rakrakta h i gad rake, aeevashad k do krarana aasana nahi h, letekn nahni Samaehn anr rannaniitayek ae sathy, Aapp pragati kkrakote h e orr beohitrar mahmukumkrk sarkotne h"
1909
+ ],
1910
+ "bbox": [
1911
+ 205,
1912
+ 417,
1913
+ 789,
1914
+ 746
1915
+ ],
1916
+ "page_idx": 17
1917
+ },
1918
+ {
1919
+ "type": "header",
1920
+ "text": "Technical Report",
1921
+ "bbox": [
1922
+ 173,
1923
+ 32,
1924
+ 289,
1925
+ 47
1926
+ ],
1927
+ "page_idx": 17
1928
+ },
1929
+ {
1930
+ "type": "page_number",
1931
+ "text": "18",
1932
+ "bbox": [
1933
+ 491,
1934
+ 946,
1935
+ 506,
1936
+ 959
1937
+ ],
1938
+ "page_idx": 17
1939
+ },
1940
+ {
1941
+ "type": "text",
1942
+ "text": "Example #A6",
1943
+ "text_level": 1,
1944
+ "bbox": [
1945
+ 204,
1946
+ 306,
1947
+ 287,
1948
+ 318
1949
+ ],
1950
+ "page_idx": 18
1951
+ },
1952
+ {
1953
+ "type": "text",
1954
+ "text": "Prompt",
1955
+ "text_level": 1,
1956
+ "bbox": [
1957
+ 204,
1958
+ 332,
1959
+ 251,
1960
+ 345
1961
+ ],
1962
+ "page_idx": 18
1963
+ },
1964
+ {
1965
+ "type": "text",
1966
+ "text": "Mn Ue Tn e n e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e 0",
1967
+ "bbox": [
1968
+ 202,
1969
+ 354,
1970
+ 790,
1971
+ 411
1972
+ ],
1973
+ "page_idx": 18
1974
+ },
1975
+ {
1976
+ "type": "text",
1977
+ "text": "Model Response",
1978
+ "text_level": 1,
1979
+ "bbox": [
1980
+ 204,
1981
+ 424,
1982
+ 310,
1983
+ 436
1984
+ ],
1985
+ "page_idx": 18
1986
+ },
1987
+ {
1988
+ "type": "text",
1989
+ "text": "Aapke prs k e 1e 7hnnnabad! bngluu j anen pnr Aapko sannny loanoi k a hao sieniokne me kucchunootiokkna k a saamnna Ppaq sckatla, letekin n cnilta mnt kara-madudc ke liyep yhahf eek charra-d-rc-eharra maargdshikka dti gai:",
1990
+ "bbox": [
1991
+ 205,
1992
+ 446,
1993
+ 790,
1994
+ 474
1995
+ ],
1996
+ "page_idx": 18
1997
+ },
1998
+ {
1999
+ "type": "text",
2000
+ "text": "Kraadu Suriarogne ne kaiy e Kdum a",
2001
+ "bbox": [
2002
+ 205,
2003
+ 474,
2004
+ 415,
2005
+ 488
2006
+ ],
2007
+ "page_idx": 18
2008
+ },
2009
+ {
2010
+ "type": "text",
2011
+ "text": "Kanafotek saarokkar a dara pradana n kia jai rne mukat konadk padayakm",
2012
+ "bbox": [
2013
+ 205,
2014
+ 488,
2015
+ 509,
2016
+ 501
2017
+ ],
2018
+ "page_idx": 18
2019
+ },
2020
+ {
2021
+ "type": "list",
2022
+ "sub_type": "text",
2023
+ "list_items": [
2024
+ "2. kranz 51000000000000000000000000000000000000000000000000000000000000000000000000000000000",
2025
+ "3. kkrnnd 100mamal, uhrarana rna aor shdavallv sioa knrne knrne e kprk n d 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 1",
2026
+ "4. kkrnue ykrae aar r sranrnnnaa ae bare me janaonne ke lioe kcnue kniltae gha pniirakai pndna shuk aar | Aay uutunu chanlalp arer kkrnue sahok yea vataiiaap snta ko deyane ka vitikcpu bi chun n sncote jinnonme knrnue bnaa",
2027
+ "5. kagadug gati, fiklpmn aarre rivv sha ka anannd let jinne hApay smaqda saekatle h! ayh Aapakno nrae shddo aer Aabmavakotyko kana umane oar unonkai vveoepang kkrane ne madoud karegaa",
2028
+ "6. 5e nik gntiivichoy a r aat anr vilai me knaa cka uayog a kare ne k aagaaas kare Udaharra kke lire, yadie apak paas kaipni shajana he, to pahel ve hah jia tse samay kcnz dme kuc zotte aakayxsh y a mabnadun an Aajmargal",
2029
+ "7. kranzug me kkrssi dokst y a parraray k sdoasy saaat k r! uohn e hazra sizxanoe aor unnsr svalalpuxne soe aap nie chiljse sioane anr suhargk arerke ae zovsaur prpaf k",
2030
+ "8. 安邦克萨克萨克都克萨普拉尼安克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克"
2031
+ ],
2032
+ "bbox": [
2033
+ 205,
2034
+ 502,
2035
+ 789,
2036
+ 712
2037
+ ],
2038
+ "page_idx": 18
2039
+ },
2040
+ {
2041
+ "type": "header",
2042
+ "text": "Technical Report",
2043
+ "bbox": [
2044
+ 173,
2045
+ 32,
2046
+ 289,
2047
+ 47
2048
+ ],
2049
+ "page_idx": 18
2050
+ },
2051
+ {
2052
+ "type": "page_number",
2053
+ "text": "19",
2054
+ "bbox": [
2055
+ 490,
2056
+ 946,
2057
+ 508,
2058
+ 959
2059
+ ],
2060
+ "page_idx": 18
2061
+ },
2062
+ {
2063
+ "type": "text",
2064
+ "text": "A.2 EXAMPLES WHERE AIRAVATA OUTPUT HAS ERRORS",
2065
+ "text_level": 1,
2066
+ "bbox": [
2067
+ 171,
2068
+ 103,
2069
+ 578,
2070
+ 118
2071
+ ],
2072
+ "page_idx": 19
2073
+ },
2074
+ {
2075
+ "type": "text",
2076
+ "text": "The model is prone to hallucinations and can present distorted facts as against the plot of the novel in #B1 or may fail to follow the instructions provided (#B2).",
2077
+ "bbox": [
2078
+ 169,
2079
+ 128,
2080
+ 823,
2081
+ 159
2082
+ ],
2083
+ "page_idx": 19
2084
+ },
2085
+ {
2086
+ "type": "text",
2087
+ "text": "Example #B1",
2088
+ "text_level": 1,
2089
+ "bbox": [
2090
+ 204,
2091
+ 176,
2092
+ 287,
2093
+ 189
2094
+ ],
2095
+ "page_idx": 19
2096
+ },
2097
+ {
2098
+ "type": "text",
2099
+ "text": "Prompt",
2100
+ "bbox": [
2101
+ 204,
2102
+ 203,
2103
+ 253,
2104
+ 215
2105
+ ],
2106
+ "page_idx": 19
2107
+ },
2108
+ {
2109
+ "type": "text",
2110
+ "text": "Ghurmawdar $\\text{贝}$ Bharati jni kai prasifukd uupnayarss \"gunahkai davat\"ke bae me batao and kahne kahnne k muy kiradar suh a o r chadr ke bae me saikshat vargnakre?",
2111
+ "bbox": [
2112
+ 200,
2113
+ 226,
2114
+ 789,
2115
+ 253
2116
+ ],
2117
+ "page_idx": 19
2118
+ },
2119
+ {
2120
+ "type": "text",
2121
+ "text": "Model Response",
2122
+ "bbox": [
2123
+ 202,
2124
+ 268,
2125
+ 310,
2126
+ 281
2127
+ ],
2128
+ "page_idx": 19
2129
+ },
2130
+ {
2131
+ "type": "text",
2132
+ "text": "gunahn a k a d e vatla t 1 18 10 15 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2",
2133
+ "bbox": [
2134
+ 200,
2135
+ 292,
2136
+ 792,
2137
+ 401
2138
+ ],
2139
+ "page_idx": 19
2140
+ },
2141
+ {
2142
+ "type": "text",
2143
+ "text": "Example #B2",
2144
+ "text_level": 1,
2145
+ "bbox": [
2146
+ 204,
2147
+ 434,
2148
+ 287,
2149
+ 446
2150
+ ],
2151
+ "page_idx": 19
2152
+ },
2153
+ {
2154
+ "type": "text",
2155
+ "text": "Prompt",
2156
+ "bbox": [
2157
+ 204,
2158
+ 460,
2159
+ 251,
2160
+ 473
2161
+ ],
2162
+ "page_idx": 19
2163
+ },
2164
+ {
2165
+ "type": "text",
2166
+ "text": "Mn mnnn nnnn nnnn nn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nann",
2167
+ "bbox": [
2168
+ 200,
2169
+ 483,
2170
+ 790,
2171
+ 513
2172
+ ],
2173
+ "page_idx": 19
2174
+ },
2175
+ {
2176
+ "type": "text",
2177
+ "text": "Model Response",
2178
+ "bbox": [
2179
+ 202,
2180
+ 525,
2181
+ 310,
2182
+ 539
2183
+ ],
2184
+ "page_idx": 19
2185
+ },
2186
+ {
2187
+ "type": "text",
2188
+ "text": "Mukalakl kalal aek prakar kai Amuul kalka he jia mamav sharir and 8i 8n ka Pratiniinilv karkotie h Aay eak bukmukai kall a kali rup h Jijsepo parrapereikk 8r sarmkaklatin ddoanl tareko se kiya jia sakkata, aer iokska oepag Aekssar cokla, Mntikcklal aor Pradshon jaeo vimnna maahymoa mo kya jala tla.",
2189
+ "bbox": [
2190
+ 200,
2191
+ 549,
2192
+ 790,
2193
+ 590
2194
+ ],
2195
+ "page_idx": 19
2196
+ },
2197
+ {
2198
+ "type": "header",
2199
+ "text": "Technical Report",
2200
+ "bbox": [
2201
+ 173,
2202
+ 32,
2203
+ 289,
2204
+ 47
2205
+ ],
2206
+ "page_idx": 19
2207
+ },
2208
+ {
2209
+ "type": "page_number",
2210
+ "text": "20",
2211
+ "bbox": [
2212
+ 488,
2213
+ 946,
2214
+ 509,
2215
+ 960
2216
+ ],
2217
+ "page_idx": 19
2218
+ }
2219
+ ]
2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15006/5ae61d62-33bc-41be-af65-9a8f9ec0f98c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd76f2e3249638fc1abb717ea471dbb5c3e60da90bc1c9d4253731615ce3bf25
3
+ size 2025573
2401.15xxx/2401.15006/full.md ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AIRAVATA: INTRODUCING HINDI INSTRUCTION-TUNED LLM
2
+
3
+ Jay Gala $^{1}$ Thanmay Jayakumar $^{1}$ Jaavid Aktar Husain $^{1,3}$ Aswanth Kumar $^{4}$
4
+
5
+ Mohammed Safi Ur Rahman Khan<sup>1</sup> Diptesh Kanojia<sup>5</sup> Ratish Puduppully<sup>6</sup>
6
+
7
+ Mitesh M. Khapra $^{1,2}$ Raj Dabre $^{7}$ Rudra Murthy $^{8}$ Anoop Kunchukuttan $^{1,2,9}$
8
+
9
+ $^{1}$ Nilekani Centre at AI4Bharat $^{2}$ IIT Madras $^{3}$ IIIT D&M Kancheepuram
10
+
11
+ $^{4}$ Flipkart $^{5}$ University of Surrey $^{6}$ A*STAR $^{7}$ NICT $^{8}$ IBM Research $^{9}$ Microsoft
12
+
13
+ https://ai4bharat.github.io/airavata
14
+
15
+ ![](images/eb4680e0728a77773011d0bbcc9936fc37dc85913dfe1b3274c98ff4489fd13d.jpg)
16
+ Figure 1: Image Courtesy: DALL-E 3 (Betker et al., 2023).
17
+
18
+ # 1 INTRODUCTION
19
+
20
+ The recent year has witnessed tremendous interest and activity in the world of Large Language Models (LLMs). LLMs hold the potential to unlock exciting applications in artificial intelligence due to their ability to comprehend complex natural language instructions and excel in a broad spectrum of tasks involving language, knowledge, reasoning, and creative generation. To foster research, innovation, and widespread adoption, an open ecosystem is essential. We have observed significant advancements in this area with the launch of models like Llama 2 (Touvron et al., 2023) and Mistral (Jiang et al., 2023), as well as their instruction-tuned variants such as Llama 2 Chat (Touvron et al., 2023), Mistral-Instruct (Jiang et al., 2023), and Zephyr (Tunstall et al., 2023), among others. Major progress has also been made in developing datasets for pre-training such as RedPajama (Computer, 2023)), instruction tuning (e.g., Alpaca (Taori et al., 2023), UltraChat (Ding et al., 2023), Dolly (Conover et al., 2023), OpenAssistant (Kopf et al., 2023), LMSYS-Chat (Zheng et al., 2023a)), and evaluation benchmarks (e.g., AlpacaEval (Dubois et al., 2023), MT-Bench (Zheng et al., 2023b)). However, most of these advancements have been predominantly centered around the English language.
21
+
22
+ There is limited support for Indian languages, which can be attributed to the incidental inclusion of some Indian language data that slipped through the data filters during the pre-training of these language models. However, the representation of data, the efficacy of tokenizers, and task performance for Indian languages are considerably behind that of English. The performance in Indian
23
+
24
+ languages, even on closed-source models such as ChatGPT (OpenAI, 2022), GPT-4 (Achiam et al., 2023), and others, is inferior compared to English (Ahuja et al., 2023). Therefore, there is an urgent need to develop a similar ecosystem of tools, models, and datasets for Indian languages to foster research and innovation. In pursuit of this objective, the recent collaboration with Sarvam AI led to release of OpenHathi (SarvamAI, 2023), an open-source foundational model for Hindi, developed by extending Llama 2 (Touvron et al., 2023).
25
+
26
+ Today, we announce the next step - an initial release of "Airavata", an instruction-tuned model for Hindi built upon finetuning OpenHathi (SarvamAI, 2023) with diverse, instruction-tuning Hindi datasets to make it better suited for assistive tasks.
27
+
28
+ Along with the model, we also share the instruction tuning datasets<sup>1</sup> to enable further research for IndicLLMs. We rely on human-curated, license-friendly instruction-tuned datasets to build "Airavata". We do not use data generated from proprietary models like GPT-4 (Achiam et al., 2023), etc. We think this is a more sustainable way of building instruction-tuned models at scale for most Indic languages, where relying on distilled data from commercial models would increase costs and restrict their free usage in downstream applications due to licensing restrictions.
29
+
30
+ We also compile a collection of evaluation benchmarks $^2$ along with an evaluation framework to compare various LLMs for their abilities on diverse tasks when instructed in Hindi. Using this benchmark and human judgments, we compare different LLMs to quantify the current state of their Hindi capabilities. We conduct a detailed analysis of Airavata's performance on various Natural Language Understanding (NLU) and Natural Language Generation (NLG) tasks and find that the instruction fine-tuning helps align the model to various NLU tasks. There is significant potential for improvement in NLG tasks, which require the creation of larger, more diverse instruction datasets as well as innovations in aligning English model representations to Hindi representations to drive better cross-lingual transfer.
31
+
32
+ # 2 INSTRUCTION TUNING DATASET CREATION
33
+
34
+ High-quality instruction tuning datasets are important for the good performance of LLMs. However, there are few diverse datasets for Hindi. Following Wei et al. (2023), we rely on translating high-quality English-supervised instruction-tuning datasets into Hindi. We use IndicTrans2 (Gala et al., 2023), the state-of-the-art open-source MT model for Indian languages, for translation. Some previous works (Li et al., 2023; Wei et al., 2023) have used ChatGPT (OpenAI, 2022) to translate instructions and/or generate responses into Hindi to better use context during translation (IndicTrans2 and most MT models are sentence-level). However, this is not cost-effective, and the translation quality of ChatGPT (OpenAI, 2022) is lower than IndicTrans2 (Gala et al., 2023), and its generation quality in Hindi might not be up to the mark (Ahuja et al., 2023). A future avenue of work would be improving translation quality when document context is available.
35
+
36
+ We sample examples from different datasets listed in Table 1 to ensure balanced representations across all the tasks while fitting into our instruction tuning budget. We translate the instructions, input, and outputs into Hindi. This results in a total of $404\mathrm{k}$ examples spanning English and Hindi language. The translated Hindi examples were filtered to retain high-quality examples. Specifically, examples were retained only when the $\mathrm{chrF}++$ score (Popovic, 2017) between the back-translated example and the corresponding English example was 50 or above. The final dataset used for instruction tuning contains $385\mathrm{k}$ examples. Table 1 shows the details of the final training dataset. The dataset is available on the HuggingFace Hub. $^3$
37
+
38
+ We also create two native Hindi Instruction datasets:
39
+
40
+ - wikiHow: wikiHow<sup>4</sup> is an online wiki-style platform that serves as a valuable resource for a diverse array of how-to articles spanning numerous topics. The articles on the platform are humanmoderated, ensuring a high-quality standard. The questions users pose in these articles closely
41
+
42
+ $^{1}$ https://huggingface.co/datasets/ai4bharat/indic-instruct-data-v0.1
43
+ $^{2}$ https://huggingface.co/collections/ai4bharat/airavata-evaluation-suite-65b13b7b68165de71ba0b333
44
+ $^{3}$ https://huggingface.co/datasets/ai4bharat/indic-instruct-data-v0.1
45
+ $^{4}$ https://www.wikihow.com/Main-Page
46
+
47
+ <table><tr><td rowspan="2">Dataset</td><td colspan="2">Unfiltered</td><td colspan="2">Filtered</td></tr><tr><td>English</td><td>Hindi</td><td>English</td><td>Hindi</td></tr><tr><td>FLAN-v2 (Longpre et al., 2023)</td><td>67,463</td><td>67,463</td><td>67,463</td><td>65,228</td></tr><tr><td>Anthropic-HHH (Bai et al., 2022)</td><td>5,000</td><td>5,000</td><td>5,000</td><td>4,911</td></tr><tr><td>Dolly (Conover et al., 2023)</td><td>15,011</td><td>15,011</td><td>15,011</td><td>14,880</td></tr><tr><td>OpenAssistant (Köpf et al., 2023)</td><td>19,945</td><td>20,128</td><td>19,945</td><td>16384</td></tr><tr><td>LymSys-Chat (Zheng et al., 2023a)</td><td>50,000</td><td>50,000</td><td>50,000</td><td>37,422</td></tr><tr><td>WikiHow</td><td>20,400</td><td>6,055</td><td>20,400</td><td>6,055</td></tr><tr><td>Anudesh</td><td>5,234</td><td>7,577</td><td>5,234</td><td>7,577</td></tr><tr><td>Total</td><td>183,053</td><td>171,234</td><td>183,053</td><td>152,457</td></tr></table>
48
+
49
+ Table 1: Instruction Fine-tuning Training Dataset Details
50
+
51
+ align with potential use cases for this model, making it a rich resource for training models. Additionally, this might also help induce reasoning capabilities and generate logical step-by-step responses. We curate around 20k and 6k articles in English and Hindi, respectively, resulting in a total of around 27k articles. We currently formulate the data as a completion task given either question or question along with a few initial steps. The dataset is released under the CC-0 license.
52
+
53
+ - Anudesh: Anudesh is a crowd-sourced collection of prompts accompanied by responses generated from the Llama 2 70B model (Touvron et al., 2023). Participants are provided with clear guidelines detailing the nature of the interaction required, including the specific language to be employed. These languages encompass a range that includes Indic languages, English, transliterated Indic, as well as a blend of Indic and English in a code-mixed format. Contributors craft their prompts in adherence to these directives and the specified language criteria. Subsequently, these prompts are then paired with the corresponding translated outputs from the Llama 2 70B model (Touvron et al., 2023). The dataset is released under CC-BY-4.0 license. More details about the interactions will be released soon.
54
+
55
+ We provide a brief description of all the existing instruction datasets used in addition to the above (and corresponding licenses) below:
56
+
57
+ - FLAN-v2 (Longpre et al., 2023): A collection of NLP tasks that combines a number of existing NLP datasets with various data augmentations, introduced by Chung et al. (2022). We sample around 67K examples for our training mixture. The dataset is released under the Apache-2.0 license.
58
+ - Anthropic-HHH (Bai et al., 2022): A collection of human-collected preference data for aligning the models to be helpful and harmless. We sample 5K conversations from the "chosen" column for our training mixture. The dataset is released under the MIT license.
59
+ - Dolly (Conover et al., 2023): A corpus of more than 15K records generated by thousands of Databricks employees to enable LLMs to exhibit the magical interactivity of ChatGPT. The dataset is released under the CC-BY-SA-3.0 license.
60
+ - OpenAssistant (Köpf et al., 2023): A collection of human-generated, human-annotated assistant-style conversation corpus consisting of 38K messages, resulting in over 3K conversation trees and around 20K conversations. The dataset is released under the Apache-2.0.
61
+ - LymSys-Chat (Zheng et al., 2023a): A collection of 1M real-world conversations spanning 25 SOTA LLMs similar to OpenAssistant (Köpf et al., 2023). We sample 50K conversations for our training mixture. The dataset is released under the LMSYS-Chat-1M Dataset License Agreement.<sup>5</sup>
62
+ - NMT (Gala et al., 2023): A multi-domain human-annotated dataset containing 50K bitext English-Hindi translation pairs from BPCC-Human (Gala et al., 2023) to enable better crosslingual transfer. The dataset is released under the CC-BY-4.0 license.
63
+
64
+ <table><tr><td>Hyper-Parameter</td><td>Value</td></tr><tr><td>LoRA Rank</td><td>16</td></tr><tr><td>LoRA alpha</td><td>32</td></tr><tr><td>LoRA Dropout</td><td>0.05</td></tr><tr><td>LoRA Target Modules</td><td>q_Proj, v_Proj, k_Proj, gate_Proj up_Proj, down_Proj</td></tr><tr><td>Epochs</td><td>4</td></tr><tr><td>Learning rate</td><td>5e-4</td></tr><tr><td>Batch Size</td><td>128</td></tr><tr><td>Floating Point Precision</td><td>bffloat16</td></tr></table>
65
+
66
+ Table 2: Hyperparameters for Fine-tuning
67
+
68
+ # 3 SUPERVISED FINE-TUNING
69
+
70
+ We fine-tune the OpenHathi model using the above-compiled datasets. We perform parameter-efficient fine-tuning with LoRA (Hu et al., 2022). The hyperparameters used are listed in the Table 2.
71
+
72
+ During fine-tuning, the loss was computed only for the output tokens. We used the OpenInstruct framework<sup>6</sup> for fine-tuning and customizing it for our requirements (our custom version is available as IndicInstruct<sup>7</sup>). One fine-tuning example corresponds to one example in the dataset. However, this is suboptimal since many tokens are wasted as padding tokens. We plan to optimize this process by packing multiple dataset examples into a single fine-tuning example (Iyer et al., 2022; Krell et al., 2023).
73
+
74
+ # 3.1 FULL VS. LORA FINETUNING
75
+
76
+ There are two prevalent methods for instruction fine-tuning for LLMs: Full fine-tuning and LoRA. Full fine-tuning (FFT) involves updating all the model parameters, whereas LoRA (Hu et al., 2022) fine-tuning involves introducing a small set of additional parameters and only updating them. We perform an ablation study to examine both the fine-tuning strategies to train two models, one employing full fine-tuning and the other by LoRA fine-tuning. For the context of this ablation study, we consider the FLAN v2 (Longpre et al., 2023) subset from our instruction dataset spanning English and Hindi. For our evaluation, we used a subset of NLU tasks in Hindi, along with BoolQ (Clark et al., 2019) and MMLU (Hendrycks et al., 2021) tasks in English, as development sets to decide between full fine-tuning and LoRA fine-tuning (Figure 2). We observed that FFT models outperform the OpenHathi base model (SarvamAI, 2023) in IndicCopa (Doddapaneni et al., 2022) and IndicXParaphrase (Doddapaneni et al., 2022) tasks. However, the FFT model performed poorly on English tasks compared to the base model as well as the LoRA fine-tuned model. LoRA fine-tuned model demonstrated improvements or similar performance as the OpenHathi base model on both Hindi NLU and English tasks. As a result, we decided to proceed with LoRA fine-tuning for training our final model. We report results on the LoRA fine-tuned final model in the subsequent section.
77
+
78
+ # 3.2 MODEL SELECTION
79
+
80
+ We fine-tune the OpenHathi model for 4 epochs and save the model after each epoch. We evaluate the checkpoint of each epoch on the dev set (IndicSentiment, IndicCOPA, IndicXNLI, and IndicQA from IndicXTREME (Doddapaneni et al., 2022), and Flores devtest (Goyal et al., 2021; Costa-jussa et al., 2022)) and compare the average performance. We observe that the checkpoint of epoch 3 performs well on NLU tasks, while the checkpoint of epoch 4 performs well on NLG tasks. We perform checkpoint averaging, where we interpolate the weights of the above two checkpoints to obtain a model that performs well across both NLU and NLG tasks. We found the best interpolation factor to be around 0.6.
81
+
82
+ ![](images/998239512d6d5d19c4c7c13faaaffe796f694f348f9735c2b633498e3b0386f7.jpg)
83
+ Figure 2: Ablation experiment to understand the performance gaps between Full fine-tuning and LoRA fine-tuning across a mix of English and Hindi NLU tasks.
84
+
85
+ $$
86
+ \text {i n t e r p o l a t e d w e i g h t s} = 0. 6 \times \text {c h e c k p o i n t} _ {3} + (1 - 0. 6) \times \text {c h e c k p o i n t} _ {4}
87
+ $$
88
+
89
+ # 4 EVALUATION ON NLP BENCHMARKS
90
+
91
+ We evaluate our model on the standard NLU and NLG benchmarks, which include native Hindi test sets from IndicXTREME (Doddapaneni et al., 2022) and Indic NLG Suite (Kumar et al., 2022). Further, to test the knowledge and reasoning capabilities of the model in Hindi, we translate the English benchmarks such as MMLU (Hendrycks et al., 2021), Hellaswag (Zellers et al., 2019), ARC (Clark et al., 2018), Winogrande (Sakaguchi et al., 2020) and BoolQ (Clark et al., 2019) and use these to evaluate our final model. The IndicTrans2 (Gala et al., 2023) model was employed for translating the benchmarks. Although this is not the perfect evaluation setup however, due to limited resources, we rely on this method as a proxy indicator for assessing trends in the performance of the model in terms of reasoning capabilities in Hindi. An important future direction would involve creating equivalent benchmarks in the native language instead of solely relying on translations.
92
+
93
+ # 4.1 RESULTS
94
+
95
+ Tables 3 to 6 shows the comparison of Airavata with the base model (OpenHathi) and the translate-test baseline using the Llama 2 7B Chat model. In the translate-test approach, the Hindi input is translated into English using the IndicTrans2 model and is used as a prompt to the Llama 2 7B Chat model. We observe that Airavata significantly outperforms the OpenHathi model on most tasks, demonstrating that fine-tuning on the IndicInstruct dataset helps align the base model to a variety of tasks. Moreover, the performance of the translate-test approach with the Llama 2 7B Chat model has a lot of variance, while Airavata consistently performs well across tasks. OpenHathi and Airavata exhibit comparable performance in the translation task. OpenHathi benefits from its extensive training on parallel corpora, resulting in a highly proficient base model for the translation task. We observe mixed results on generation tasks, indicating the scope for further improvement of Airvata, especially in open-ended text generation capabilities. Table 4 shows the performance of Airavata and other models on English test sets and the respective machine-translated Hindi test set. We observe a 5-15 point performance gap between the English baseline and the respective Hindi
96
+
97
+ baseline across various tasks for both OpenHathi and Airavata models. This indicates limited cross-lingual transfer of English knowledge to Hindi, highlighting the need for future research to improve cross-lingual alignment to maximize knowledge transfer between English and Hindi.
98
+
99
+ <table><tr><td rowspan="2"></td><td colspan="3">0-Shot</td><td colspan="3">5-Shot</td></tr><tr><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>IndicSentiment</td><td>72.89</td><td>97.85</td><td>95.81</td><td>96.59</td><td>98.43</td><td>97.01</td></tr><tr><td>IndicCopa</td><td>68.69</td><td>76.53</td><td>63.75</td><td>42.77</td><td>78.34</td><td>72.97</td></tr><tr><td>IndicXNLI</td><td>16.67</td><td>23.67</td><td>73.26</td><td>42.25</td><td>47.96</td><td>74.70</td></tr><tr><td>IndicXParaphrase</td><td>71.72</td><td>09.54</td><td>76.53</td><td>66.67</td><td>48.56</td><td>69.87</td></tr></table>
100
+
101
+ Table 3: F1 scores on Indic NLU and Commonsense Reasoning tasks
102
+
103
+ <table><tr><td rowspan="2"></td><td rowspan="2">Variant</td><td colspan="2">0-Shot</td><td colspan="2">5-Shot</td></tr><tr><td>OpenHathi</td><td>Airavata</td><td>OpenHathi</td><td>Airavata</td></tr><tr><td rowspan="2">MMLU</td><td>English</td><td>36.16</td><td>41.39</td><td>40.12</td><td>43.28</td></tr><tr><td>Hindi (Translated)</td><td>32.27</td><td>34.96</td><td>35.13</td><td>36.00</td></tr><tr><td rowspan="2">BoolQ</td><td>English</td><td>52.63</td><td>73.00</td><td>64.46</td><td>62.02</td></tr><tr><td>Hindi (Translated)</td><td>58.56</td><td>64.50</td><td>65.69</td><td>51.47</td></tr><tr><td rowspan="2">ARC Easy</td><td>English</td><td>57.28</td><td>70.50</td><td>62.12</td><td>71.04</td></tr><tr><td>Hindi (Translated)</td><td>44.28</td><td>54.00</td><td>49.87</td><td>54.84</td></tr><tr><td rowspan="2">Arc Challenge</td><td>English</td><td>39.85</td><td>45.90</td><td>46.25</td><td>48.29</td></tr><tr><td>Hindi (Translated)</td><td>32.68</td><td>35.92</td><td>36.60</td><td>36.26</td></tr><tr><td rowspan="2">Hella Swag</td><td>English</td><td>31.48</td><td>34.37</td><td>32.45</td><td>36.83</td></tr><tr><td>Hindi (Translated)</td><td>25.59</td><td>25.37</td><td>24.85</td><td>24.84</td></tr><tr><td>Winogrande</td><td>English</td><td>49.17</td><td>49.72</td><td>-</td><td>-</td></tr></table>
104
+
105
+ Table 4: Accuracy on English NLU and Commonsense Reasoning tasks and its translated variants
106
+
107
+ <table><tr><td rowspan="2"></td><td rowspan="2">Metric</td><td colspan="2">0-Shot</td><td colspan="2">5-shot</td></tr><tr><td>OpenHathi</td><td>Airavata</td><td>OpenHathi</td><td>Airavata</td></tr><tr><td rowspan="2">Flores</td><td>chrF++</td><td>55.41</td><td>54.82</td><td>54.98</td><td>54.24</td></tr><tr><td>BLEURT</td><td>0.7103</td><td>0.6970</td><td>0.7115</td><td>0.7084</td></tr><tr><td rowspan="2">IN22-Gen</td><td>chrF++</td><td>54.23</td><td>53.78</td><td>54.53</td><td>52.81</td></tr><tr><td>BLEURT</td><td>0.7156</td><td>0.7012</td><td>0.7181</td><td>0.7037</td></tr></table>
108
+
109
+ Table 5:chrF++ and BLEURT scores on English-Hindi translation task
110
+
111
+ # 5 HUMAN EVALUATION
112
+
113
+ We assess Airavata using authentic prompts provided by real users, evaluating its performance across five distinct abilities outlined in Table 7.
114
+
115
+ We define a set of intents and domains of focus, which are then provided to users, along with clear instructions on the type of prompts they should construct. More details about the benchmark will be released soon.
116
+
117
+ Along with Airavata, we also evaluate ChatGPT (OpenAI, 2022), GPT-4 (Achiam et al., 2023) and BactrianX-llama-7B (Li et al., 2023) models for the same abilities. BactrianX-llama-7B is an instructed fine-tuned model for Hindi directly fine-tuned with the Llama base model on a multilingual instruction dataset. This multilingual instruction dataset consisted of machine-translated instructions
118
+
119
+ <table><tr><td rowspan="2"></td><td rowspan="2">Metric</td><td colspan="3">1-shot</td></tr><tr><td>OpenHathi</td><td>Llama 2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>Indic QA (No Context)</td><td>F1</td><td>17</td><td>4.58</td><td>21.01</td></tr><tr><td>Indic QA (With Context)</td><td>F1</td><td>20.69</td><td>19.59</td><td>37.69</td></tr><tr><td rowspan="2">Indic Headline</td><td>Rouge L</td><td>11.26</td><td>23.06</td><td>12.32</td></tr><tr><td>BLEURT</td><td>0.4682</td><td>0.4590</td><td>0.3793</td></tr><tr><td rowspan="2">IndicWikiBio</td><td>Rouge L</td><td>20.45</td><td>41.01</td><td>10.66</td></tr><tr><td>BLEURT</td><td>0.5185</td><td>0.6415</td><td>0.4279</td></tr></table>
120
+
121
+ Table 6: F1, Rouge L and BLEURT scores on Indic NLG tasks
122
+
123
+ <table><tr><td>AbilityName</td><td>Ability</td></tr><tr><td>Long</td><td>Ability to generate long-form text like writing essays, speeches, reports, etc.</td></tr><tr><td>Fact-Ops</td><td>Ability to give factual opinions and explanations like seeking recommendations, seeking advice, opinions, explanations, etc.</td></tr><tr><td>Content</td><td>Ability to make content accessible like summarizations, layman explanations, etc</td></tr><tr><td>Lang-Creativity</td><td>Ability to be creative in language like finding anagrams, rhyming words, vocabulary enhancement, etc</td></tr><tr><td>Culture</td><td>Ability to answer questions related to Indian Culture.</td></tr></table>
124
+
125
+ from Alpaca (Taori et al., 2023) and Dolly (Conover et al., 2023) datasets, followed by response generation from ChatGPT (OpenAI, 2022). Annotators were presented with a prompt and a randomly selected response from any of the models and were instructed to provide ratings based on the metrics outlined in Table 8.
126
+
127
+ In addition to the above metrics, we also ask the human evaluators to give a final score on the likert scale of 1 to 5, reflecting their overall satisfaction with the response.
128
+
129
+ We sample a set of 50 prompts $^{8}$ covering various intents and domains (more details about the benchmark will be made available soon) and generate the responses from all three models. The prompt-response pairs were shuffled randomly and anonymized, ensuring no indication of the generating model, before being presented to the annotators for evaluation. Annotators were instructed to assess responses strictly adhering to the aforementioned rubrics. We report the various results in Figures 3 to 5.
130
+
131
+ Table 7: Description of abilities to test through human evaluation
132
+
133
+ <table><tr><td>Metric</td><td>Details</td><td>Range</td></tr><tr><td>IFA: Instruction Following Ability</td><td>This assesses the model&#x27;s ability to accurately and effectively follow the instructions provided in the prompt</td><td>0-2</td></tr><tr><td>CNS: Closeness to Native Speaker</td><td>This assesses how naturally and fluently the model&#x27;s responses align with the way a native Hindi speaker would express the same ideas.</td><td>0-2</td></tr><tr><td>CQ: Content Quality</td><td>This evaluates the response in terms of its factual accuracy, logical flow of ideas, and overall informational relevance.</td><td>0-2</td></tr></table>
134
+
135
+ Table 8: Rubrics for Human Evaluation
136
+
137
+ ![](images/f07c063993fe40e386f05c63df65599bba2a0b77988f181fed2f931340669119.jpg)
138
+ Figure 3: Average satisfaction scores for various models based on a Likert scale between 1 and 5 reported by Human annotators.
139
+
140
+ ![](images/1e3ded6752c6ac81488037efdade9003491b8974f7d64b4bf6441d265e70b391.jpg)
141
+ Figure 4: Human evaluation scores for assessing the instruction following and content generation abilities of the models based on the rubrics described in Table 8.
142
+
143
+ Our observations suggest that while Airavata still trails significantly behind GPT-4 in terms of its ability to follow instructions and the quality of its content, it performs relatively better when generating natural-sounding Hindi content compared to both GPT-4 and ChatGPT. Notably, Airavata outperforms the BactrianX-llama-7B model by a significant margin. This difference in performance can be attributed to a lack of vocabulary expansion in BactrainX-llama-7B to accommodate addi
144
+
145
+ ![](images/8c636034a48a07fbc7c96221dbfc14df8852b0d65871bbd08e2e77019801c865.jpg)
146
+ Figure 5: Fine-grained human evaluation of content generation abilities of the models described in Table 7.
147
+
148
+ tional Hindi tokens and its lack of continual pre-training in Hindi. Furthermore, BactrainX-llama-7B may be trained on a lower-quality dataset for instruction tuning that was completely generated using ChatGPT. The OpenHathi model and Airavata, however, have addressed these shortcomings and show substantial improvements over the BactrainX-llama-7B model. We discuss these performance trends across various capabilities in more detail below.
149
+
150
+ The findings indicate that amongst all abilities, Airavata particularly excels in providing factual opinions and explanations, as demonstrated by the earlier examples. However, the model struggles with tasks demanding creative language usage, as our SFT data lacks components emphasizing creativity. When comparing GPT-4 and ChatGPT (GPT-3.5) performance, GPT-4 generally surpasses the latter in knowledge-intensive or creativity-demanding tasks. Nevertheless, it's noteworthy that ChatGPT either outperforms or is comparable in tasks emphasizing language generation, such as long-form content creation, offering factual opinions, and ensuring content accessibility.
151
+
152
+ We acknowledge that our current human evaluation is not robust and thorough due to the limited number of prompts in our set and each prompt and response pair being evaluated by only one annotator. However, our evaluation still offers valuable preliminary insights that will inform our efforts to improve the model. Expanding the instruction dataset to include content covering a diverse range of abilities can help improve the model's capabilities. At the same time, it is important to acknowledge that a significant portion of knowledge stems from English, which possesses the most extensive knowledge repository. Therefore, achieving better cross-lingual alignment between Hindi and English representations is essential for accurately answering factual questions and minimizing erroneous responses.
153
+
154
+ # 6 TOXICITY AND MISINFORMATION
155
+
156
+ We evaluate Airavata, OpenHathi, and Llama2-7B models with publicly available benchmark datasets, in both 0-shot and 5-shot settings. Our evaluation provides insights into key dimensions for LM safety. Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection and we use its Hindi subset (Das et al., 2022) to evaluate and compare the performance of all models. We translate the TruthfulQA, Implicit Hate, and a human-evaluated subset of the Toxigen dataset, to Hindi. This subset of Toxigen has been denoised to retain instances that have annotation
157
+
158
+ <table><tr><td rowspan="2"></td><td rowspan="2">Variant</td><td colspan="3">0-Shot</td><td colspan="3">5-Shot</td></tr><tr><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td><td>OpenHathi</td><td>Llama2 7B Chat (translate-test)</td><td>Airavata</td></tr><tr><td>Multilingual HateCheck</td><td>Hindi</td><td>70.15</td><td>70.24</td><td>70.24</td><td>70.15</td><td>70.24</td><td>70.25</td></tr><tr><td rowspan="2">Implicit Hate</td><td>English</td><td>50.65</td><td>57.92</td><td>62.33</td><td>51.41</td><td>65.02</td><td>62.44</td></tr><tr><td>Hindi (Translated)</td><td>52.45</td><td>53.21</td><td>61.15</td><td>49.99</td><td>52.98</td><td>58.84</td></tr><tr><td rowspan="2">Toxigen (human evaluated)</td><td>English</td><td>44.91</td><td>83.35</td><td>78.63</td><td>42.71</td><td>66.34</td><td>72.24</td></tr><tr><td>Hindi (Translated)</td><td>47.75</td><td>83.97</td><td>78.56</td><td>42.83</td><td>73.20</td><td>74.80</td></tr><tr><td rowspan="2">TruthfulQA (averaged MC1 &amp; MC2)</td><td>English</td><td>30.72</td><td>37.25</td><td>33.60</td><td>30.72</td><td>37.25</td><td>33.64</td></tr><tr><td>Hindi (Translated)</td><td>34.31</td><td>35.66</td><td>35.32</td><td>34.31</td><td>35.66</td><td>35.32</td></tr></table>
159
+
160
+ Table 9: Accuracy on hate and toxicity identification, and answering factual questions.
161
+
162
+ agreement from all annotators (Hosseini et al., 2023). While the Implicit Hate dataset (Hosseini et al., 2023) helps evaluate the model performance on detecting the subtle and implicit forms of hate speech, human-evaluated Toxigen data contains instances that are directed towards various demographics. We evaluate the model performance on detection of toxicity in these three datasets, and their translated instances using the accuracy metric. Further, for evaluating the model's capability towards answering factual questions, we use the TruthfulQA dataset (Lin et al., 2022) which contains multiple choice questions which are factual and can mimic common human falsehoods.
163
+
164
+ Given the accuracy scores from our evaluation, in Table 9, Airavata is able to detect openly expressed hate in Hindi statements from MHC with an accuracy similar to the other two models, with similar performance in both 0- and 5-shot settings. On the more challenging instances which contain implicitly veiled hate speech, Airavata is able to identify hate with significantly better accuracy than the other two models within the translated Hindi instances. On the original Implicit Hate dataset, Llama2-7B seems to perform better, given a few examples. On the Translated Toxigen subset, Llama2-7B is able to detect targeted toxic instances against certain demographics with the highest accuracy among all three models. However, given a few examples, we observe a significant performance dip for Llama2-7B and Airavata outperforms it marginally. We observe similar performance on the original English dataset and note that Airavata is better at detecting targeted hate in Hindi, as compared to implicitly veiled hate speech. Its performance at detecting targeted hate is surprisingly better than detecting openly expressed hate speech from MHC. On the TruthfulQA dataset, in both 0- and 5-shot settings, Llama2-7B outperforms OpenHathi and Airavata. On the translated TruthfulQA data, a marginal dip in the performance can be observed which indicates that we need further investigation into the model's capability for generating misinformation.
165
+
166
+ Overall, these results may suggest that LLMs are able to identify toxicity and hateful speech, we think that further investigation is needed to evaluate toxicity and the presence of social biases within the content generated by LLMs. In the future, we plan to investigate additional existing benchmarks and novel evaluation measures to test LLMs for content safety and reliability.
167
+
168
+ # 7 RESOURCES
169
+
170
+ You can find all the details about the project in this section. We release the following resources to facilitate further research in instruction-tuning for Indian language LLMs.
171
+
172
+ - Airavata Github Repository (for fine-tuning and evaluation)
173
+ Airavata SFT Dataset Collection: Huggingface
174
+ - Airavata Evaluation Dataset Collection: Huggingface
175
+
176
+ # 8 SUMMARY AND FUTURE OUTLOOK
177
+
178
+ We release Airavata, an open-source, instruction-tuned Hindi model demonstrating promising performance across various tasks compared to existing open-source models. We provide both datasets and models to facilitate further research in improving Hindi LLMs. This marks an initial stride towards developing high-quality, open-source LLMs for Indian languages that require multi-faceted
179
+
180
+ efforts ranging from the creation of extensive pre-training datasets, diverse instruction tuning datasets, and high-quality foundational models.
181
+
182
+ # 9 LIMITATIONS
183
+
184
+ Airavata, like other LLMs, faces common challenges such as potential hallucinations, resulting in the generation of fabricated information, and may struggle with accuracy in complex or specialized topics. There is also a risk of generating objectionable or biased content. Its understanding of cultural nuances and effectiveness in mixed-language contexts may be constrained. Furthermore, the model's performance is closely tied to the quality and scope of its training data, influencing its efficacy and reliability. It is crucial to note that this model is designed for research purposes and is not recommended for any production use-cases.
185
+
186
+ # REFERENCES
187
+
188
+ Josh Achiam, Steven Adler, Sandhini Agarwal, et al. Gpt-4 technical report. 2023. URL https://api-semanticscholar.org/CorpusID:257532815.
189
+ Kabir Ahuja, Harshita Diddee, Rishav Hada, Millicent Ochieng, Krithika Ramesh, Prachi Jain, Akshay Nambi, Tanuja Ganu, Sameer Segal, Mohamed Ahmed, Kalika Bali, and Sunayana Sitaram. MEGA: Multilingual evaluation of generative AI. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 4232-4267, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.258. URL https://aclanthology.org/2023.emnlp-main.258.
190
+ Yuntao Bai, Andy Jones, Kamal Ndousse, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv: 2204.05862, 2022.
191
+ James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, Wesam Manassra, Prafulla Dhariwal, Casey Chu, Yunxin Jiao, and Aditya Ramesh. Improving image generation with better captions, October 2023. URL https://cdn.openai.com/papers/dall-e-3.pdf.
192
+ Hyung Won Chung, Le Hou, Shayne Longpre, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv: 2210.11416, 2022.
193
+ Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. *BoolQ: Exploring the surprising difficulty of natural yes/no questions.* In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, Volume 1 (Long and Short Papers), pp. 2924–2936, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1300. URL https://aclanthology.org/N19-1300.
194
+ Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv preprint arXiv: 1803.05457, 2018.
195
+ Together Computer. Redpajama: an open dataset for training large language models, October 2023. URL https://github.com/togethercomputer/RedPajama-Data.
196
+ Mike Conover, Matt Hayes, Ankit Mathur, Jianwei Xie, Jun Wan, Sam Shah, Ali Ghodsi, Patrick Wendell, Matei Zaharia, and Reynold Xin. Free dolly: Introducing the world's first truly open instruction-tuned llm, 2023. URL https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm.
197
+ Marta R. Costa-jussa, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv: 2207.04672, 2022.
198
+ Mithun Das, Punyajoy Saha, Binny Mathew, and Animesh Mukherjee. HateCheckHIn: Evaluating Hindi hate speech detection models. In Nicoletta Calzolari, Frédéric Béchet, Philippe Blache, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Jan Odijk, and Stelios Piperidis (eds.), Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5378-5387, Marseille, France, June 2022. European Language Resources Association. URL https://aclanthology.org/2022.lrec-1.575.
199
+
200
+ Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. Enhancing chat language models by scaling high-quality instructional conversations. arXiv preprint arXiv:2305.14233, 2023.
201
+ Sumanth Doddapaneni, Rahul Aralikatte, Gowtham Ramesh, Shreyansh Goyal, Mitesh M. Khapra, Anoop Kunchukuttan, and Pratyush Kumar. Towards leaving no indic language behind: Building monolingual corpora, benchmark and models for indic languages. Annual Meeting of the Association for Computational Linguistics, 2022. doi: 10.18653/v1/2023.acl-long.693.
202
+ Yann Dubois, Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Alpacafarm: A simulation framework for methods that learn from human feedback, 2023.
203
+ Jay Gala, Pranjal A Chitale, A K Raghavan, Varun Gumm, Sumanth Doddapaneni, Aswanth Kumar M, Janki Atul Nawale, Anupama Sujatha, Ratish Puduppully, Vivek Raghavan, Pratyush Kumar, Mitesh M Khapra, Raj Dabre, and Anoop Kunchukuttan. Indictrans2: Towards high-quality and accessible machine translation models for all 22 scheduled indian languages. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=vfT4YuzAYA.
204
+ Naman Goyal, Cynthia Gao, Vishrav Chaudhary, Peng-Jen Chen, Guillaume Wenzek, Da Ju, Sanjana Krishnan, Marc'Aurelio Ranzato, Francisco Guzmán, and Angela Fan. The flores-101 evaluation benchmark for low-resource and multilingual machine translation. 2021.
205
+ Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ.
206
+ Saghar Hosseini, Hamid Palangi, and Ahmed Hassan Awadallah. An empirical study of metrics to measure representational harms in pre-trained language models. arXiv preprint arXiv:2301.09211, 2023.
207
+ Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9.
208
+ Srinivasan Iyer, Xi Victoria Lin, Ramakanth Pasunuru, Todor Mihaylov, Daniel Simig, Ping Yu, Kurt Shuster, Tianlu Wang, Qing Liu, Punit Singh Koura, Xian Li, Brian O'Horo, Gabriel Pereyra, Jeff Wang, Christopher Dewan, Asli Celikyilmaz, Luke Zettlemoyer, and Ves Stoyanov. Optiml: Scaling language model instruction meta learning through the lens of generalization. arXiv preprint arXiv: 2212.12017, 2022.
209
+ Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, et al. Mistral 7b. arXiv preprint arXiv: 2310.06825, 2023.
210
+ Mario Michael Krell, Matej Kosec, Sergio P. Perez, and Andrew William Fitzgibbon. Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance, 2023. URL https://openreview.net/forum?id=ZAzSf9pzCm.
211
+ Aman Kumar, Himani Shrotriya, Prachi Sahu, Amogh Mishra, Raj Dabre, Ratish Puduppully, Anoop Kunchukuttan, Mitesh M. Khapra, and Pratyush Kumar. IndicNLG benchmark: Multilingual datasets for diverse NLG tasks in Indic languages. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 5363-5394, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.360. URL https://aclanthology.org/2022.emnlp-main.360.
212
+ Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi-Rui Tam, Keith Stevens, Abdullah Barhoum, Nguyen Minh Duc, Oliver Stanley, Richard Nagyfi, Shahul ES, Sameer Suri,
213
+
214
+ David Glushkov, Arnav Dantuluri, Andrew Maguire, Christoph Schuhmann, Huu Nguyen, and Alexander Mattick. Openassistant conversations - democratizing large language model alignment. arXiv preprint arXiv: 2304.07327, 2023.
215
+ Haonan Li, Fajri Koto, Minghao Wu, Alham Fikri Aji, and Timothy Baldwin. Bacterian-x: Multilingual replicable instruction-following models with low-rank adaptation. arXiv preprint arXiv: 2305.15011, 2023.
216
+ Stephanie Lin, Jacob Hilton, and Owain Evans. TruthfulQA: Measuring how models mimic human falsehoods. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3214-3252, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.229. URL https://aclanthology.org/2022.acl-1.ong.229.
217
+ S. Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning. International Conference on Machine Learning, 2023. doi: 10.4855/50/arXiv.2301.13688.
218
+ OpenAI. Gpt3.5, November 2022. URL https://platform.openai.com/docs/model_s/gpt-3-5.
219
+ Maja Popovic. $\mathrm{chrF}++$ : words helping character n-grams. In Proceedings of the Second Conference on Machine Translation, pp. 612-618, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/W17-4770. URL https://aclanthology.org/w17-4770.
220
+ Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pp. 8732-8740. AAAI Press, 2020. doi: 10.1609/AAAI.V34I05.6399. URL https://doi.org/10.1609/aaai.v34i05.6399.
221
+ SarvamAI. Openhathi series: An approach to build bilingual llms frugally, December 2023. URL https://www.sarvam.ai/blog/announcing-openhathi-series.
222
+ Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.
223
+ Hugo Touvron, Louis Martin, Kevin Stone, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv: 2307.09288, 2023.
224
+ Lewis Tunstall, Edward Beeching, Nathan Lambert, Nazneen Rajani, Kashif Rasul, Younes Belkada, Shengyi Huang, Leandro von Werra, Clémentine Fourrier, Nathan Habib, Nathan Sarrazin, Omar Sanseviero, Alexander M. Rush, and Thomas Wolf. Zephyr: Direct distillation of lm alignment. arXiv preprint arXiv: 2310.16944, 2023.
225
+ Xiangpeng Wei, Haoran Wei, Huan Lin, Tianhao Li, Pei Zhang, Xingzhang Ren, Mei Li, Yu Wan, Zhiwei Cao, Binbin Xie, Tianxiang Hu, Shangjie Li, Binyuan Hui, Bowen Yu, Dayiheng Liu, Baosong Yang, Fei Huang, and Jun Xie. Polym: An open source polyglot large language model. arXiv preprint arXiv: 2307.06018, 2023.
226
+ Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. HellaSwag: Can a machine really finish your sentence? In Anna Korhonen, David Traum, and Lluis Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 4791-4800, Florence, Italy, jul 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1472. URL https://aclanthology.org/P19-1472.
227
+
228
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric. P Xing, Joseph E. Gonzalez, Ion Stoica, and Hao Zhang. Lmsys-chat-1m: A large-scale real-world llm conversation dataset, 2023a.
229
+ Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv: 2306.05685, 2023b.
230
+
231
+ # A EXAMPLES
232
+
233
+ In this section, we show example generations from Airavata. Note that the model's outputs in examples #A5 and #A6 did not contain newlines between bullet points. They have been added here for readability.
234
+
235
+ # A.1 EXAMPLES WHERE THE AIRAVATA MODEL GENERATES GOOD OUTPUT
236
+
237
+ Airavata excels in specific content generation tasks, such as soliciting mail drafts (#A1) and addressing general how-to inquiries (#A5, #A6). Furthermore, it demonstrates proficiency in adhering to instructions related to output structure (#A3 and #A5).
238
+
239
+ # Example #A1
240
+
241
+ Prompt
242
+
243
+ Jijjya gahak k oAp ane aodir me dere k aanu b h, uak ef k l eep k aok maki n k r t k s ka saamahana ka saamahanaon ho gayai
244
+
245
+ Model Response
246
+
247
+ [əaahekkkaanam],1
248
+
249
+ Muzhe A 1
250
+
251
+ M 1
252
+
253
+ I 1
254
+
255
+ Aapokek 8eay 4r samnag k e liy gthnyalad | m y aepke k sakk ka m k rne k lai tay r h | i maanadarri so, [aapok
256
+
257
+ # Example #A2
258
+
259
+ Prompt
260
+
261
+ Bharat 5 m 2017-10-14 00:00 PM
262
+
263
+ Model Response
264
+
265
+ "hmm, yah eek dvlvchysy mshavall he! harrat me bueo kco saammanit vayknti manan jalata he aor ahmanotar paur uoua oki aor ruky kivay a jalata he jab koeikc kofin ngnoy glo nana hohata he yaa janaonkarre lono hi ohti 1 ensai iessoliey hckovik buo aok aia an anduunbvat puiquyos se pairrit kivay gag he aor cngn bueumuluy sanygatni manana jalata he iisomai, buo te saaah baltvotl karat sammay sammamanpukr kwhvahar kcarnan aor unenke pere ro kooe jnejso jnevinm dshararot kauopmaogkkrna maana jalata h
266
+
267
+ yah yad raynna a mahmehluyu g h k bueo k o aakar lao gne k e liy Maagvdvckk aor sahaahkarke k uqpe me darez qa jala he , aor anh aahit kkrne y a aapammaninit kkrne k bajay uonkn k sahlal a h letne k kdoishk kcrnni chaihy! yad keisni bujugne ne apakko kukcsh aunu cih t kaha yaaaske yzvalhacar mokcoe galtatki, lo zachit pratiokya de nana andr koytki co oek se sghaalnna ma hmluyu
268
+
269
+ # Example #A3
270
+
271
+ # Prompt
272
+
273
+ M 5
274
+
275
+ # Model Response
276
+
277
+ 1. 安普宁斯 莱哈希耶夫 哈克列维诺夫
278
+ 2. 亞普耶夫·拉赫亚耶夫和克洛波,「普拉赫亚耶夫·克希耶夫」等与伊兹霍夫列夫
279
+ 3. 3.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18.19.20.21.22.23.24.25.26.27.28.29.30.31.32.33.34.35.36.37.38.39.40.41.42.43.44.45.46.47.48.49.50.51.52.53.54.55.56.57.58.59.60.61.62.63.64.65.66.67.68.69.70.71.72.73.74.75.76.77.78.79.80.81.82.83.84.85.86.87.88.89.90
280
+ 4. 亞普耶克阿雅基可郭可萨亚德明赫拉达
281
+ 5. 3
282
+
283
+ # Example #A4
284
+
285
+ # Prompt
286
+
287
+ M' Aapaneon Samaay Pnaeehakn kaiiok kaike s uahar kakata ? Muzo aang aang budu d aand a and uankai y
288
+
289
+ # Model Response
290
+
291
+ 1. Aapnane kayai kao prashmikcata 4e: uun kaoyk co prashmikcata 4e jinhe puotarokne koi saabres Aichik Aovshakatla H e Aor us
292
+ 2. 8ek Aunusuui bi nana: Eek Aunusuui bi nana aei aor ues paar teke rere! yeh Aapokn OApneon saamay kai behltar pralxchnn karene anr Agan ne Ikghaikoi kpr aapat karrne me madud karega
293
+ 3. यान दिक्षत् रोत्ति काने अपिके पूर्थन को खँकर शोर और यपिके कासम पर यान को एविति करे
294
+ 4. 5eek le: Ninayimilit 5eek k eonene sre Aapokk o Rrnaa Kcarrne OoR Uyataadukk bane ruhno m Mada d mela
295
+ 5. AApunne AApunne kO PuRsKcUt kC: AApunne AApunne kO PuRsKcUt kC: AApunne AApunne kO Preririt rHnene O and AApunne Lkshy o Ko praput kKarne 6
296
+
297
+ # Example #A5
298
+
299
+ Prompt
300
+
301
+ Meneo abahot Jagah nioekr Re k Lepaia kay a par k h i sre rll a n hi Aya jisso muu o dipshan ho ra ha he m u
302
+
303
+ # Model Response
304
+
305
+ yah h s u nak k d 1 k a aapok o aovsakad k a aamnana karana p a 20 raha 1 y aay aay r aon mahvcduup 1 k 4e 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
306
+
307
+ 1. kivsiti vikiklaksak yahalahaahkar se batai kvr-kihmi maanansikk rakhayyepeshovar se batai kkrnna Aapvke Aabvshad kco prabhit kkrne ande hse kkm kranne ki dixhara m pahelaka kcdm ho saekrtala he! ve Aappki bi aabvana o kso samdaonr o ar uon parr kaabpu ane ke lieup euk k yagjbnanbgnane nme anapokki maadudk ckr sakotne
308
+ 2. Nijoyamitil vayayam: sharariyrik ganiitivichiy eodzifkni ndozicii, jra prakukirik mnnoads aabana ne vallr hote y 1 ayai t kek k 0eepananekevayayam jere chalnna, rerrnna yaa kuch hllkca kchvad bhi abvssadprkctatata khi bhavnna ka kcm kcrne me madud kkr sokklat
309
+ 3. Prukutit me saumay bhitatae: Prukutit me saumay bhitatae se t nannav ka kam karnrno and r kclvana k h aovnna ka kcbadva dne m madudmellu saokatni 1 Aap utahlne jia saksot e, Pcduln yaa kcr saksot e ya abangvani k a kam kcrke prukutit me apanri ruvi vikasisitkarakr saksot e
310
+ 4. 4
311
+ 5. 5
312
+ 6. Maiauokulneos Takaknienoks koi Auajmaa: cayan, gahri aas o ane k e aayaa y aogg sah it dihinna Maiauokulneos Taknaike Aarama kranne O and veatman n gn parr cyaan kckit kkrnr ne m madud kkr sakknti
313
+ 7. Aotam-TezBaal kA Othayasakr: PpAepnnt Nndi Leonna, sntuolilat Aahaar Zhaan aor Kxvshy Jiaovn ananay Rzne josii rsvb-TezBaal praohaoa o me shaimel hnoana aovshyok h! iusmae nan kcrnn, kvatab pao na y a Tlvi dezto u Naatla kkrna josoi choiote shaimel hio sakotni
314
+ 8. 8.
315
+ 9. Peshovkar Mavad Lef: yad Ad Aapokek Aevahsada k a Prabghun Karana buhit k dngata h, tio o parramshr prapat karene parr 1eckar k r Euk prakshikl mnnanisik k svalkhya pehovkar Aapokko mukacabalat lntn siai gne me madudkr saktata h aor Aapokko aokkak SakaikarLakmak mananisikkala vikciknilt k arer nme maudud k k rakrakta h i gad rake, aeevashad k do krarana aasana nahi h, letekn nahni Samaehn anr rannaniitayek ae sathy, Aapp pragati kkrakote h e orr beohitrar mahmukumkrk sarkotne h
316
+
317
+ # Example #A6
318
+
319
+ # Prompt
320
+
321
+ Mn Ue Tn e n e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e e 0
322
+
323
+ # Model Response
324
+
325
+ Aapke prs k e 1e 7hnnnabad! bngluu j anen pnr Aapko sannny loanoi k a hao sieniokne me kucchunootiokkna k a saamnna Ppaq sckatla, letekin n cnilta mnt kara-madudc ke liyep yhahf eek charra-d-rc-eharra maargdshikka dti gai:
326
+
327
+ Kraadu Suriarogne ne kaiy e Kdum a
328
+
329
+ Kanafotek saarokkar a dara pradana n kia jai rne mukat konadk padayakm
330
+
331
+ 2. kranz 51000000000000000000000000000000000000000000000000000000000000000000000000000000000
332
+ 3. kkrnnd 100mamal, uhrarana rna aor shdavallv sioa knrne knrne e kprk n d 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 2000r o 1
333
+ 4. kkrnue ykrae aar r sranrnnnaa ae bare me janaonne ke lioe kcnue kniltae gha pniirakai pndna shuk aar | Aay uutunu chanlalp arer kkrnue sahok yea vataiiaap snta ko deyane ka vitikcpu bi chun n sncote jinnonme knrnue bnaa
334
+ 5. kagadug gati, fiklpmn aarre rivv sha ka anannd let jinne hApay smaqda saekatle h! ayh Aapakno nrae shddo aer Aabmavakotyko kana umane oar unonkai vveoepang kkrane ne madoud karegaa
335
+ 6. 5e nik gntiivichoy a r aat anr vilai me knaa cka uayog a kare ne k aagaaas kare Udaharra kke lire, yadie apak paas kaipni shajana he, to pahel ve hah jia tse samay kcnz dme kuc zotte aakayxsh y a mabnadun an Aajmargal
336
+ 7. kranzug me kkrssi dokst y a parraray k sdoasy saaat k r! uohn e hazra sizxanoe aor unnsr svalalpuxne soe aap nie chiljse sioane anr suhargk arerke ae zovsaur prpaf k
337
+ 8. 安邦克萨克萨克都克萨普拉尼安克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克克萨克
338
+
339
+ # A.2 EXAMPLES WHERE AIRAVATA OUTPUT HAS ERRORS
340
+
341
+ The model is prone to hallucinations and can present distorted facts as against the plot of the novel in #B1 or may fail to follow the instructions provided (#B2).
342
+
343
+ # Example #B1
344
+
345
+ Prompt
346
+
347
+ Ghurmawdar $\text{贝}$ Bharati jni kai prasifukd uupnayarss "gunahkai davat"ke bae me batao and kahne kahnne k muy kiradar suh a o r chadr ke bae me saikshat vargnakre?
348
+
349
+ Model Response
350
+
351
+ gunahn a k a d e vatla t 1 18 10 15 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2
352
+
353
+ # Example #B2
354
+
355
+ Prompt
356
+
357
+ Mn mnnn nnnn nnnn nn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nn nnnn nann
358
+
359
+ Model Response
360
+
361
+ Mukalakl kalal aek prakar kai Amuul kalka he jia mamav sharir and 8i 8n ka Pratiniinilv karkotie h Aay eak bukmukai kall a kali rup h Jijsepo parrapereikk 8r sarmkaklatin ddoanl tareko se kiya jia sakkata, aer iokska oepag Aekssar cokla, Mntikcklal aor Pradshon jaeo vimnna maahymoa mo kya jala tla.
2401.15xxx/2401.15006/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1027a64d5d12cc2c5c0ddfdd855e7d7d62a5525ff3555612c1b957090b9a2926
3
+ size 527757
2401.15xxx/2401.15006/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15024/74a0cb30-2b06-4a8e-94be-fd2157d0e9b8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574a970607cf33127155987aefd27e99a2198662e92c54fcbd85c88a75fce6fb
3
+ size 563458
2401.15xxx/2401.15024/full.md ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SLICEGPT: COMPRESS LARGE LANGUAGE MODELS BY DELETING ROWS AND COLUMNNS
2
+
3
+ Saleh Ashkboos†*
4
+
5
+ ETH Zurich
6
+
7
+ saleh.ashkboos@inf.ethz.ch
8
+
9
+ Maximilian L. Croci†
10
+
11
+ Microsoft Research
12
+
13
+ t-mcroci@microsoft.com
14
+
15
+ Marcelo Gennari do Nascimento
16
+
17
+ Microsoft
18
+
19
+ marceloge@microsoft.com
20
+
21
+ Torsten Hoefler
22
+
23
+ ETH Zurich
24
+
25
+ torsten.hoefler@inf.ethz.ch
26
+
27
+ James Hensman
28
+
29
+ Microsoft Research
30
+
31
+ jameshensman@microsoft.com
32
+
33
+ # ABSTRACT
34
+
35
+ Large language models have become the cornerstone of natural language processing, but their use comes with substantial costs in terms of compute and memory resources. Sparsification provides a solution to alleviate these resource constraints, and recent works have shown that trained models can be sparsified post-hoc. Existing sparsification techniques face challenges as they need additional data structures and offer constrained speedup with current hardware. In this paper we present SliceGPT, a new post-training sparsification scheme which replaces each weight matrix with a smaller (dense) matrix, reducing the embedding dimension of the network. Through extensive experimentation we show that SliceGPT can remove up to $25\%$ of the model parameters (including embeddings) for LLAMA-2 70B, OPT 66B and Phi-2 models while maintaining $99\%$ , $99\%$ and $90\%$ zero-shot task performance of the dense model respectively. Our sliced models run on fewer GPUs and run faster without any additional code optimization: on 24GB consumer GPUs we reduce the total compute for inference on LLAMA-2 70B to $64\%$ of that of the dense model; on 40GB A100 GPUs we reduce it to $66\%$ . We offer a new insight, computational invariance in transformer networks, which enables SliceGPT and we hope it will inspire and enable future avenues to reduce memory and computation demands for pre-trained models. Code is available at: https://github.com/microsoft/TransformerCompression.
36
+
37
+ # 1 INTRODUCTION
38
+
39
+ Large language models (LLMs) are neural networks with billions of parameters, trained on trillions of tokens (Zhao et al., 2023). The cost of training an LLM has caused a shift to re-using pre-trained models for multiple tasks, the foundation model paradigm. The size of LLMs makes deploying a pre-trained model an expensive undertaking. Many models require multiple GPUs to be able to compute a prediction, and because the models are autoregressive, multiple forward passes of the neural network are needed to generate text responses. It is therefore of widespread interest to reduce the computational requirements of these models, usually performed via post-training techniques referred to as model compression.
40
+
41
+ A majority of model compression techniques fall into one of four categories: distillation, tensor decomposition (which includes low-rank factorization), pruning and quantization (Hoefler et al., 2021; Gholami et al., 2021; Zhu et al., 2023; Gupta & Agrawal, 2021). In this work we focus on pruning,
42
+
43
+ ![](images/62655f1643657e635d57694a630db2cd5e4723c67ef9d90c8f17bd5a0a14bba7.jpg)
44
+ Unstructured sparsity
45
+
46
+ ![](images/017cf8095c17c525eed652ae6e46fab3cc8c95fc93f7d4dc49f483f13d60250b.jpg)
47
+ Figure 1: Matrix multiplication of the signal $\mathbf{X}$ and a weight matrix $\mathbf{W}$ under different types of sparsity. Left: unstructured sparsity, where some elements of $\mathbf{W}$ are zero, and $\mathbf{X}$ is dense. Middle: 2:4 structured sparsity, where each block of four weight matrix entries contains two zeros, and $\mathbf{X}$ is dense. Right: SliceGPT, where after introducing transformation $\mathbf{Q}$ , all the sparsity is arranged to the bottom rows of $\mathbf{W}$ and the corresponding columns of $\mathbf{X}$ are removed.
48
+
49
+ ![](images/373562dd4f2ce1ca019deb1a3c89e0c7d68684916760064c2b2867b1629e759e.jpg)
50
+ 2:4 Structured sparsity
51
+
52
+ ![](images/207dd90c40e03faf315123fa44060312dde7f06db9331af783dfd1c2b90e1a8e.jpg)
53
+
54
+ ![](images/3bdf44cba1e30b0dafb94e49f6e1e9410d99f5fb838fe86379b71c0922b778ef.jpg)
55
+ Slicing (ours)
56
+
57
+ ![](images/5282cbc82fd288fdfb465fcd1d4f50ed020969c08fcd36ae15c7b022bec4ef26.jpg)
58
+
59
+ though we hope that our methodology may influence future work on other areas. Whilst pruning methods have been around for some time, many approaches require recovery fine-tuning (RFT) after pruning to maintain performance, making the overall process an expensive and hard-to-scale task. With SliceGPT we compress large models using a single GPU in just a few hours and maintain competitive performance on generation and downstream tasks even without RFT.
60
+
61
+ Pruning methods work by setting some elements of the weight matrices in an LLM to zero, and (optionally) updating the surrounding elements of the matrix to compensate. The result is a sparse pattern which means that some floating point operations can be skipped in the matrix multiplications required in the forward pass of the neural network. The relative speedup of the operations depends on the level of sparsity and the sparsity pattern: more structured sparsity is associated with more computational gain. In contrast to other pruning methods, SliceGPT prunes away (slices off!) entire rows or columns of the weight matrices. Before slicing, we perform a single transformation of the network which leaves the predictions invariant, but allows the slicing to have only a small effect.
62
+
63
+ The result is that weight matrices are smaller, and the signals passed between blocks of the neural network are smaller too: we reduce the embedding dimension of the neural network.
64
+
65
+ Figure 1 compares our approach with existing sparsity methods. Our contributions are as follows:
66
+
67
+ 1. We introduce the idea of computational invariance: we show that we can apply orthogonal-matrix transformations to each weight matrix in a transformer without changing the model.
68
+ 2. We use this to edit each block in a transformer architecture, such that we are projecting the signal matrix $^{1}$ between blocks onto its own principal components. We remove columns or rows of the transformed weight matrices to reduce the model size. We call the transformation and removal of weights SliceGPT.
69
+ 3. We conduct multiple experiments on OPT (Zhang et al., 2022) and LLAMA-2 (Touvron et al., 2023) LLMs, demonstrating that SliceGPT is able to compress these models by up to $30\%$ with superior perplexity to the state of the art 2:4 scheme. On downstream tasks we additionally experiment with Phi-2 and show that all models can be sliced by up to $30\%$ while maintaining $>90\%$ of the dense performance.
70
+
71
+ # 2 BACKGROUND
72
+
73
+ In this section, we first describe some necessary background on transformer architectures, which allows us to introduce notation which we will use to prove our main results. Then we describe related work on sparsification for compressing such architectures.
74
+
75
+ # 2.1 TRANSFORMER NETWORKS
76
+
77
+ Transformer networks (Vaswani et al., 2017) are a class of neural networks that have been shown to be effective at a wide range of tasks including language modeling. The transformer architecture is composed of a series of layers, each of which is composed of a multi-head self-attention block followed by a feed-forward network block. Between each block, there is a LayerNorm (Ba et al., 2016) (or RMSNorm (Zhang & Sennrich, 2019)) block. Figure 2 illustrates part of a transformer network: an attention block connected to a Feed Forward Network (FFN) block through a LayerNorm block, with residual connections. The following describes the operations of each component (ignoring dropout, which is not applied post-training).
78
+
79
+ Embeddings Let $D$ be the embedding dimension of our transformer, $N$ be the sequence length. The transformer model takes as input a sequence of token IDs and position IDs, and uses them to index the embedding matrices, producing the initial signal $\mathbf{X}$ with shape $N\times D$ . In what follows we consider, without loss of generality, a single embedding matrix $\mathbf{W}_{\mathrm{embd}}$ indexed by input sequence $\boldsymbol{s}$ .
80
+
81
+ LayerNorm After embeddings, the signal matrix is passed through a LayerNorm operation, which subtracts the mean from each row of the matrix, divides the row by its standard deviation, rescales (columnwise), and adds an offset. We write the LayerNorm block as
82
+
83
+ $$
84
+ \operatorname {L a y e r N o r m} (\mathbf {X}) = \operatorname {R M S N o r m} (\mathbf {X M}) \operatorname {d i a g} (\boldsymbol {\alpha}) \sqrt {D} + \mathbf {1} _ {N} \boldsymbol {\beta} ^ {\top} \tag {1}
85
+ $$
86
+
87
+ where $\mathrm{RMSNorm}(\mathbf{X})$ applies $\pmb{x}\gets \pmb {x} / \| \pmb {x}\|$ to each row of $\mathbf{X}$ . The vector parameter $\alpha$ and offset (vector) parameter $\beta$ are learned independently at each LayerNorm instance. The constant matrix $\mathbf{M} = \mathbf{I} - \frac{1}{D}\mathbf{1}\mathbf{1}^{\top}$ is a $D\times D$ matrix which subtracts the mean from each row of $\mathbf{X}$ .
88
+
89
+ Attention Blocks The attention block has four matrices: $\mathbf{W}_k, \mathbf{W}_q, \mathbf{W}_v$ and $\mathbf{W}_o$ , each of dimension $D \times D$ . The input signal arriving into the block is projected into the Key $(\mathbf{X}\mathbf{W}_k)$ , Query $(\mathbf{X}\mathbf{W}_q)$ , and Value $(\mathbf{X}\mathbf{W}_v)$ matrices, which are then split into multiple heads. A nonlinear operation is applied at each head before the signals are combined and multiplied by the output weight matrix $\mathbf{W}_o$ . Since the first three weight matrices are applied separately to the inputs, we can concatenate them and perform a single matrix multiplication (denoted by the white box around these matrices in Figure 2). We can consider the concatenation of these matrices to be a single linear layer, which we denote $\mathbf{W}_{\mathrm{in}}$ . We also refer to the output matrix as $\mathbf{W}_{\mathrm{out}}$ . We treat the attention block as $\sigma (\mathbf{X}\mathbf{W}_{\mathrm{in}} + \mathbf{b}_{\mathrm{in}})\mathbf{W}_{\mathrm{out}} + \mathbf{b}_{\mathrm{out}}^3$ , where $\sigma$ represents the multi-head attention operation.
90
+
91
+ FFN Blocks The other type of block that appears in transformer architectures is a Feed Forward Network (FFN) block. In many cases, this is a Multi-layer Perceptron (MLP), which consists of a linear layer $\mathbf{W}_1$ , followed by an element-wise operation $\sigma$ , followed by a second linear layer: $\sigma(\mathbf{X}\mathbf{W}_1 + \boldsymbol{b}_1)\mathbf{W}_2 + \boldsymbol{b}_2$ . Some architectures have adopted the gated format, where an additional matrix is used, and the operation is $\left(\sigma(\mathbf{X}\mathbf{W}_1 + \boldsymbol{b}_1) \circ (\mathbf{X}\mathbf{W}_2)\right)\mathbf{W}_3$ , where $\circ$ is an element-wise product. Much like the first three linear layers in the attention module, we can consider the concatenation of $\mathbf{W}_1$ and $\mathbf{W}_2$ to be a single linear operation, and denote it $\mathbf{W}_{\mathrm{in}}$ . We can therefore denote the operation of MLP or gated FFN layers as $\sigma(\mathbf{X}\mathbf{W}_{\mathrm{in}})\mathbf{W}_{\mathrm{out}}$ , where $\sigma$ takes a different meaning to that in an attention.
92
+
93
+ Language Modelling (LM) Head All of the transformer networks to which we apply SliceGPT in this paper have a decoder-only structure following (Radford et al., 2018): after multiple layers applying alternating attention and FFN blocks, a head block computes logits which are used to compute the loss during training and token prediction on deployment. The head operation is $\mathbf{X}\mathbf{W}_{\mathrm{head}} + \mathbf{b}_{\mathrm{head}}$ , where $\mathbf{X}$ is the output of the last transformer block.
94
+
95
+ Forward pass Once the model is trained and all of the parameters are set, the computations required in a transformer network to produce predictions involve passing signal matrices from one block to the next until the head node is reached. Since we are able to define both FFN and attention blocks in the form $\sigma (\mathbf{X}\mathbf{W}_{\mathrm{in}} + \boldsymbol {b}_{\mathrm{in}})\mathbf{W}_{\mathrm{out}} + \boldsymbol {b}_{\mathrm{out}}$ , where we understand that $\sigma$ represents either a point-wise or multi-head-attention nonlinearity, we are able to describe the forward pass using Algorithm 1.
96
+
97
+ Algorithm 1 The forward pass of a transformer network
98
+ Require: $\{\mathbf{W}_{\mathrm{in}}^{\ell},\mathbf{b}_{\mathrm{in}}^{\ell},\mathbf{W}_{\mathrm{out}}^{\ell}\mathbf{b}_{\mathrm{out}}^{\ell}\}_{\ell = 1}^{L}$ // weights and biases of FFN and attention blocks
99
+ Require: $\{\sigma_{\ell}\}_{\ell = 1}^{L}$ // nonlinearity associated with each block
100
+ Require: $\{\mathrm{Norm}_{\ell}\}_{\ell = 0}^{L}$ // LayerNorm or RMSNorm instances to perform between blocks
101
+ Require: $\mathbf{W}_{\mathrm{embd}},\mathbf{W}_{\mathrm{head}},\mathbf{b}_{\mathrm{head}}$ // embedding and head matrices
102
+ Require: $s$ // input sequence
103
+ 1: $\mathbf{X}\gets \mathbf{W}_{\mathrm{embd}}[s,:]$ // index embeddings
104
+ 2: $\mathbf{X}\gets \mathrm{Norm}_0(\mathbf{X})$ // normalize
105
+ 3: for $\ell = 1\dots L$ do
106
+ 4: $\mathbf{Z}\gets \sigma_{\ell}(\mathbf{X}\mathbf{W}_{\mathrm{in}}^{\ell} + \mathbf{b}_{\mathrm{in}}^{\ell})\mathbf{W}_{\mathrm{out}}^{\ell} + \mathbf{b}_{\mathrm{out}}^{\ell}$ // apply FFN or attention
107
+ 5: $\mathbf{X}\gets \mathrm{Norm}_{\ell}(\mathbf{X} + \mathbf{Z})$ // normalize and apply residual connection
108
+ 6: end for
109
+ 7: return $\mathbf{X}\mathbf{W}_{\mathrm{head}} + \mathbf{b}_{\mathrm{head}}$ // apply model head
110
+
111
+ # 2.2 RELATED WORK
112
+
113
+ In the simplest setting, one can employ magnitude-based sparsification, which involves setting the smallest weights in the model to zero (Han et al., 2016; Zhu & Gupta, 2017; Gale et al., 2019). Although magnitude sparsification is scalable, its application to LLMs gives too strong a degradation in performance (Frantar & Alistarh, 2023). Optimal Brain Surgeon (OBS) (Hassibi et al., 1993; LeCun et al., 1989), a more sophisticated method, systematically removes weights that have the least impact on the loss function. The method compensates for the error introduced by weight removal by updating the un-pruned weights using the inverse of the Hessian matrix. Unfortunately, OBS is impractical for models with a few million parameters due to the need to calculate and store the inverse of the Hessian matrix. To address the computational limitation posed by OBS, recent research has explored two approaches: approximating the inverse of the Hessian matrix such as WoodFisher (Singh & Alistarh, 2020) or applying it separately to each layer such as in Optimal Brain Compression (OBC, Frantar & Alistarh, 2022), known as layer-wise pruning. While these techniques have proven effective for medium-sized networks, they are not practical for large language models, where individual layer weight matrices typically contain more than $10^{8}$ parameters.
114
+
115
+ GPTQ (Frantar et al., 2022) has solved this issue by quantizing (representing the parameter using lower precision) the weight matrix of LLMs using a column-by-column scheme and updating all not-yet-quantized weights in the next columns.
116
+
117
+ SparseGPT (Frantar & Alistarh, 2023) applied the same idea for pruning and sparsifies the LLMs using unstructured and semi-structured pruning, and Sun et al. (2023) simplified the idea by using only the diagonal of the Hessian. Since achieving end-to-end speed improvements through unstructured pruning is a demanding task, they also attempted a similar technique to induce sparsity with semi-structured patterns like 2:4 and 4:8 (Mishra et al., 2021). However, implementing such structures does not maintain the accuracy of the model.
118
+
119
+ ![](images/6cf96d51045c5b918f8d4685c41fa2427e3a3567f26ef8eb06266e709cbe3666.jpg)
120
+ Figure 2: A single layer in a transformer network. The signals (inputs) arising from the previous blocks of the networks arrive at the bottom of the figure, before being passed through attention, LayerNorm, and FFN. The attention and FFN blocks both have input and output linear operations (blue) which we denote in the text as $\mathbf{W}_{\mathrm{in}}, \mathbf{W}_{\mathrm{out}}$ . The linear operations of LayerNorm $\mathbf{M}$ and $\mathrm{diag}(\alpha)$ are highlighted. This and subsequent figures do not show biases.
121
+
122
+ Another approach to compression is low-rank approximation, where each weight matrix is replaced with the product of two matrices with a smaller inner dimension, usually followed by a fine-tuning step (Hu et al., 2021; Mahabadi et al., 2021; Noach & Goldberg, 2020; Tukan et al., 2020). To achieve compression, the inner dimension must be smaller than half of the original dimension. In contrast, our method replaces each weight matrix with a single smaller one, reducing the embedding dimension without the need for fine-tuning.
123
+
124
+ We propose to delete rows and columns of weight matrices, which is similar to pruning of filters and channels in the convnet literature. There, sparsity-inducing regularization is added to batch-norm factors (Liu et al., 2017) or network structures (Huang & Wang, 2018), and the network is trained or fine-tuned, resulting in the pruning of channels or parts of the network. Perhaps the most analogous methods to ours are ThiNet (Luo et al., 2017; He et al., 2017), which apply linear operations between layers (as will we), interleaved with more fine-tuning with regularization. In this literature, the model sizes are typically several orders of magnitude smaller than in LLMs, for example the VGG16 network has 138M parameters, comparable with the very smallest OPT model that we consider. The huge size of LLMs makes methods that involve extensive fine-tuning unappealing, especially when outer-loops are needed to select regularization parameters.
125
+
126
+ Recently, some works have been proposed that apply structured pruning to LLMs, followed by continued training (or fine-tuning) to recover the performance that is lost. For example LLM-pruner (Ma et al., 2023a) removes connected structures from an LLM before further training. Contemporarily with our work, LLM Surgeon (van der Ouderaa et al., 2023) interweaves recovery fine-tuning with pruning. We provide results for SliceGPT as a single-shot method and with post-slicing recovery fine-tuning.
127
+
128
+ # 3 SLICEGPT
129
+
130
+ Our SliceGPT method relies on a computational invariance that is inherent in the transformer architecture. By this, we mean that it is possible to apply an orthogonal transformation to the output of one component, so long as it is undone in the next. Our key insight is that the RMSNorm operation which is performed between blocks of the network does not affect the transformation: the operations commute. In this section, we first describe how the invariance occurs in RMSNorm-connected transformer networks, then we note how networks trained with LayerNorm connections can be converted to RMSNorm. Next, we describe our method to compute transformations at each layer using Principal Component Analysis (PCA), such that the signal between blocks is projected onto its principal components. Finally, we describe how deleting the minor principal components corresponds to slicing away rows or columns of the modified network.
131
+
132
+ # 3.1 COMPUTATIONAL INVARIANCE IN TRANSFORMER NETWORKS
133
+
134
+ Let $\mathbf{Q}$ denote an orthogonal matrix: we have $\mathbf{Q}^{\top}\mathbf{Q} = \mathbf{Q}\mathbf{Q}^{\top} = \mathbf{I}$ . Note that multiplying a vector $\mathbf{x}$ by $\mathbf{Q}$ does not change the norm of the vector, since $\| \mathbf{Q}\mathbf{x}\| = \sqrt{\mathbf{x}^{\top}\mathbf{Q}^{\top}\mathbf{Q}\mathbf{x}} = \sqrt{\mathbf{x}^{\top}\mathbf{x}} = \| \mathbf{x}\|$ . In this work, the dimensions of $\mathbf{Q}$ will always match the embedding dimension of the transformer $D$ .
135
+
136
+ Suppose that $\mathbf{X}_{\ell}$ is the output of one block of the transformer, which is then processed by RMSNorm, and then inputted to the subsequent block as $\mathrm{RMSnorm}(\mathbf{X}_{\ell})$ . If we insert linear layers with the orthogonal matrix $\mathbf{Q}$ before $\mathrm{RMSnorm}$ and $\mathbf{Q}^{\top}$ after $\mathrm{RMSnorm}$ , the network remains unchanged, since each row of the signal matrix is multiplied by $\mathbf{Q}$ , normalized and multiplied by $\mathbf{Q}^{\top}$ . We have
137
+
138
+ $$
139
+ \operatorname {R M S N o r m} \left(\mathbf {X} _ {\ell} \mathbf {Q}\right) \mathbf {Q} ^ {\top} = \operatorname {R M S N o r m} \left(\mathbf {X} _ {\ell}\right). \tag {2}
140
+ $$
141
+
142
+ A proof of this relation appears in Appendix A.1. Now, since each attention or FFN block of the network has a linear operation on both the input and output, we can absorb the additional operations $\mathbf{Q}$ into the linear layers of the blocks. Since the network contains residual connections, we must also apply $\mathbf{Q}$ to the output of all previous layers (all the way back to the embedding) and to all subsequent layers (all the way up to the LM Head).
143
+
144
+ An invariant function is one for which a transformation to the input does not result in a change to the output. In our case, we can apply any orthogonal transformation $\mathbf{Q}$ to the weights of the transformer without changing the result, so the computation can be performed in any transformed state. We refer to this as a computational invariance, and define it in the following theorem.
145
+
146
+ Theorem 1. Let $\mathbf{W}_{in}^{\ell}$ and $\mathbf{W}_{out}^{\ell}$ be the weight matrices of the linear layers of the $\ell$ -th block of an RMSNorm-connected transformer network, and $\mathbf{b}_{in}^{\ell}, \mathbf{b}_{out}^{\ell}$ be the corresponding biases, if any, and let $\mathbf{W}_{embd}$ and $\mathbf{W}_{head}$ be the embedding and head matrices. Let $\mathbf{Q}$ be an orthogonal matrix of dimension $D$ . Then the following network is equivalent to the original transformer network:
147
+
148
+ $$
149
+ \tilde {\mathbf {W}} _ {e m b d} = \mathbf {W} _ {e m b d} \mathbf {Q}, \tag {3}
150
+ $$
151
+
152
+ $$
153
+ \tilde {\mathbf {W}} _ {i n} ^ {\ell} = \mathbf {Q} ^ {\top} \mathbf {W} _ {i n} ^ {\ell}, \tag {4}
154
+ $$
155
+
156
+ $$
157
+ \tilde {\mathbf {W}} _ {\text {o u t}} ^ {\ell} = \mathbf {W} _ {\text {o u t}} ^ {\ell} \mathbf {Q}, \tag {5}
158
+ $$
159
+
160
+ $$
161
+ \tilde {\boldsymbol {b}} _ {o u t} ^ {\ell} = \mathbf {Q} ^ {\top} \boldsymbol {b} _ {o u t} ^ {\ell}, \tag {6}
162
+ $$
163
+
164
+ $$
165
+ \tilde {\mathbf {W}} _ {h e a d} = \mathbf {Q} ^ {\top} \mathbf {W} _ {h e a d}. \tag {7}
166
+ $$
167
+
168
+ The input and head biases are copied: $\tilde{\pmb{b}}_{in}^{\ell} = \pmb{b}_{in}^{\ell}$ , $\tilde{\pmb{b}}_{head} = \pmb{b}_{head}$ .
169
+
170
+ Proof. We can show that the transformed network computes the same results as the original by stepping through Algorithm 1. Suppose that on line 1, the original network has computed $\mathbf{X}$ , then the modified network has computed $\tilde{\mathbf{X}} = \mathbf{XQ}$ , using Equation 3. Applying RMSNorm on line 2, we see that the operation of the two networks matches: by Equation 2 we have $\mathrm{RMSnorm}(\tilde{\mathbf{X}}) = \mathrm{RMSnorm}(\mathbf{XQ}) = \mathrm{RMSnorm}(\mathbf{X})\mathbf{Q}$ . Applying the nonlinearity on line 4, we see that $\tilde{\mathbf{X}}\tilde{\mathbf{W}}_{\mathrm{in}}^{\ell} = \mathbf{X}\mathbf{W}_{\mathrm{in}}^{\ell}$ , using Equation 4 and it follows that $\tilde{\mathbf{Z}} = \mathbf{ZQ}$ . On line 5 the residual connection means we have $(\tilde{\mathbf{X}} + \tilde{\mathbf{Z}}) = (\mathbf{X} + \mathbf{Z})\mathbf{Q}$ , and applying RMSnorm results in assignment of $\tilde{\mathbf{X}} = \mathbf{XQ}$ . This follows through to the end of the loop. Finally, on line 7, the transformations are undone as $\mathbf{X}\mathbf{W}_{\mathrm{head}} = \tilde{\mathbf{X}}\tilde{\mathbf{W}}_{\mathrm{head}}$ using Equation 7.
171
+
172
+ # 3.2 LAYERNORM TRANSFORMERS CAN BE CONVERTED TO RMSNORM
173
+
174
+ The computational invariance of the transformer network applies only to RMSNorm-connected networks. Before working on those with LayerNorm, we convert the network to RMSNorm by absorbing the linear blocks of LayerNorm into the adjacent blocks. Figure 3 shows such a transformation on the transformer network (see Figure 2). In each block, we multiply the output matrix $\mathbf{W}_{\mathrm{out}}$ by the mean-subtraction matrix $\mathbf{M}$ , which accounts for the mean subtraction that would happen in the subsequent LayerNorm. The input matrices $\mathbf{W}_{\mathrm{in}}$ are pre-multiplied by the scales of the preceding LayerNorm blocks. The embedding matrix $\mathbf{W}_{\mathrm{embd}}$ must be mean-subtracted, and $\mathbf{W}_{\mathrm{head}}$ must be re-scaled by the last LayerNorm scales. This is a straightforward change in the order of operations and does not affect the network output.
175
+
176
+ # 3.3 A TRANSFORMATION PER BLOCK
177
+
178
+ Now that every LayerNorm in the transformer has been converted to RMSNorm, we can select any $\mathbf{Q}$ to modify the model. Our initial plan was to collect signals from the model, construct an orthogonal matrix using those signals and to delete parts of the network. We quickly saw that the signals at different blocks of the network were not aligned, and that we would need to apply a different orthogonal matrix at each block, $\mathbf{Q}_{\ell}$ .
179
+
180
+ Allowing the orthogonal matrix used in each block to differ can be shown to leave the model
181
+
182
+ ![](images/a73d897d47e8d7ceac37c4a089b31c52821f03a4ee1b00b5769cd0bdf419bb5a.jpg)
183
+ Figure 3: Converting a transformer network from LayerNorm to RMSNorm: the scale matrix $\mathrm{diag}(\alpha)$ is absorbed into the subsequent matrix $\mathbf{W}_{\mathrm{in}}$ . Figure shows the block in combined colors. We use $(\alpha)$ for brevity. The mean-subtraction matrix $\mathbf{M}$ is applied to each matrix $\mathbf{W}_{\mathrm{out}}$ . Layer-norm becomes RMSNorm, up to a constant $\sqrt{D}$ (not shown). Here, the scaling $(\alpha^{\prime})$ comes from the previous block.
184
+
185
+ ![](images/1bf17bce2137b60b0c9aa95d84c224a41064fa609c47494056b9bb10edf1e67f.jpg)
186
+ Figure 4: With the network converted to RMSNorm (see Figure 3), we apply the computational-invariance idea. The input weight matrices $\mathrm{diag}(\alpha)\mathbf{W}_{\mathrm{in}}$ are pre-multiplied by $\mathbf{Q}^{\top}$ . The output matrices $\mathbf{W}_{\mathrm{out}}\mathbf{M}$ are post-multiplied by $\mathbf{Q}$ . In the skip-connection, a new linear layer is added $\mathbf{Q}_{\ell}^{\top}\mathbf{Q}_{\ell +1}$ . After these modifications, the matrices can be sliced (hatched areas).
187
+
188
+ unchanged using the same proof as Theorem 1, with the exception of line 5 of Algorithm 1. Here we see that the residual connection and the output of the block must have the same rotation. To fix this, we modify the residual connection by applying the linear transformation $\mathbf{Q}_{\ell -1}^{\top}\mathbf{Q}_{\ell}$ to the residual. Figure 4 shows how different rotations can be applied to different blocks with the additional linear operation in the residual connection. Unlike the modifications to the weight matrices, these additional operations cannot be pre-computed and add a small $(D\times D)$ overhead to the model. Nonetheless, they are needed to allow slicing the model (Section 3.4) and we see real speedup overall (Section 4).
189
+
190
+ To compute the matrices $\mathbf{Q}_{\ell}$ , we use PCA. We select a calibration dataset from the training set, run it through the model (after converting LayerNorm operations into RMSNorm), and extract the orthogonal matrix of the layer. We use the output of the transformed network to calculate the orthogonal matrices of the next layers. More precisely, if $\mathbf{X}_{\ell,i}$ is the output of the $\ell^{\mathrm{th}}$ RMSNorm block for the $i^{\mathrm{th}}$ sequence in the calibration dataset, we compute
191
+
192
+ $$
193
+ \mathbf {C} _ {\ell} = \sum_ {i} \mathbf {X} _ {\ell , i} ^ {\top} \mathbf {X} _ {\ell , i} \tag {8}
194
+ $$
195
+
196
+ and set $\mathbf{Q}_{\ell}$ to the be the eigenvectors of $\mathbf{C}_{\ell}$ , sorted by decreasing eigenvalues.
197
+
198
+ # 3.4 SLICING
199
+
200
+ The goal of Principal Component Analysis is usually to take a data matrix $\mathbf{X}$ and compute a lower dimensional representation $\mathbf{Z}$ , and an approximate reconstruction $\tilde{\mathbf{X}}$ :
201
+
202
+ $$
203
+ \mathbf {Z} = \mathbf {X Q D}, \quad \tilde {\mathbf {X}} = \mathbf {Z D} ^ {\top} \mathbf {Q} ^ {\top}. \tag {9}
204
+ $$
205
+
206
+ where $\mathbf{Q}$ is the eigenvectors of $\mathbf{X}^{\top}\mathbf{X}$ , and $\mathbf{D}$ is a $D\times D_{\mathrm{small}}$ deletion matrix (containing $D_{\mathrm{small}}$ columns of the $D\times D$ identity matrix), which removes some of the columns of the matrix to the left. The reconstruction is $L_{2}$ optimal, in the sense that $\mathbf{Q}\mathbf{D}$ is a linear mapping that minimizes $\| \mathbf{X} - \tilde{\mathbf{X}}\|^2$ .
207
+
208
+ When we apply PCA to the signal matrix $\mathbf{X}$ between blocks, we never materialize the $N\times D$ signal matrix, but we apply the deletion matrix $\mathbf{D}$ to the operations preceding and succeeding the construction of that matrix, which have already been multiplied by $\mathbf{Q}$ in the above. We delete rows of $\mathbf{W}_{\mathrm{in}}$ and columns of $\mathbf{W}_{\mathrm{out}}$ and $\mathbf{W}_{\mathrm{embd}}$ . We also delete both rows and columns of the matrix $\mathbf{Q}_{\ell -1}^{\top}\mathbf{Q}_{\ell}$ that we have inserted into the residual connection (see Figure 4).
209
+
210
+ # 4 EXPERIMENTAL VALIDATION
211
+
212
+ Setup We use Hugging Face Transformers (Wolf et al., 2019) to implement our code with PyTorch (Paszke et al., 2019). The computation of $\mathbf{Q}$ is performed on a single H100 GPU with 80GB of memory, taking approximately 3.5 hours to complete for the LLAMA-2 70B model. During the PCA calculation, we use double precision for computing the eigenvectors of the covariance matrix. We find that using single precision for eigenvector calculations in PyTorch leads to a discrepancy in the final accuracy, as detailed in Appendix A.2.
213
+
214
+ We experiment with two different calibration sets: the WikiText-2 training dataset (Merit et al., 2016) and the Alpaca training dataset (Taori et al., 2023). An ablation study on the calibration set size and sequence length is presented in Appendix A.3.
215
+
216
+ Models, Tasks, and GPUs We evaluate all our experiments on OPT (Zhang et al., 2022), LLAMA-2 (Touvron et al., 2023) model families, and additionally evaluate Phi-2 (in our zero-shot task) experiments. We exclude OPT 175B, as it is outperformed by smaller LLAMA-2 models. Nonetheless, we anticipate that this larger model will yield improved results, as larger models typically offer more promising opportunities for compression (see Section 4.1). We evaluate our scheme on both language generation as well as popular zero-shot tasks. To demonstrate the comprehensive speedup achieved by SliceGPT we use: Quadro RTX6000 GPUs with 24GB of memory as a representative example of consumer-level GPUs; 40GB A100s and 80GB H100s to provide datacenter-level benchmarks.
217
+
218
+ Baseline Setup We initially planned to compare our results against a scheme that pruned columns (or rows) with the smallest norm but found that this baseline was very poor, with the WikiText-2 perplexity of the model soaring into the 1000s after pruning just a few columns. Instead, we compare SliceGPT against SparseGPT (Frantar & Alistarh, 2023) employing a 2:4 sparsity ratio, as this is the only sparsity scheme which achieves speedup (Mishra et al., 2021).
219
+
220
+ # 4.1 RESULTS
221
+
222
+ Generation Task We begin by showcasing our findings using the WikiText-2 dataset. In this context, we evaluate the performance of both the OPT and LLAMA-2 model families across different sizes when using this dataset for slicing. Table 1 shows the perplexity obtained by various slicing levels. SliceGPT exhibits superior performance when applied to OPT models compared to LLAMA-2 models which matches our intuition from the spectrum analysis of those models (see Appendix A.4 for our discussion). The performance of SliceGPT improves as the model size increases. Comparing SliceGPT with SparseGPT, we see that that SparseGPT 2:4 performs worse than SliceGPT with $25\%$ slicing in all LLAMA-2 models. For OPT, we see that $30\%$ sliced models beat 2:4 sparsity for all model sizes except 2.7B.
223
+
224
+ Table 1: OPT and Llama-2 perplexity results on WikiText2. The calibration set size and sequence length are 1024 and 2048, respectively.
225
+
226
+ <table><tr><td rowspan="2">Method</td><td colspan="7">OPT</td><td colspan="3">LLAMA-2</td></tr><tr><td>125M</td><td>1.3B</td><td>2.7B</td><td>6.7B</td><td>13B</td><td>30B</td><td>66B</td><td>7B</td><td>13B</td><td>70B</td></tr><tr><td>Dense</td><td>27.64</td><td>14.61</td><td>12.46</td><td>10.85</td><td>10.12</td><td>9.56</td><td>9.33</td><td>5.47</td><td>4.88</td><td>3.32</td></tr><tr><td>SparseGPT 2:4</td><td>45.07</td><td>29.61</td><td>14.90</td><td>13.00</td><td>11.80</td><td>10.53</td><td>10.22</td><td>8.69</td><td>7.07</td><td>4.98</td></tr><tr><td>SliceGPT (10%)</td><td>29.34</td><td>15.10</td><td>12.75</td><td>10.92</td><td>10.27</td><td>9.65</td><td>9.43</td><td>5.89</td><td>5.21</td><td>3.69</td></tr><tr><td>SliceGPT (20%)</td><td>34.26</td><td>16.43</td><td>13.73</td><td>11.48</td><td>10.66</td><td>9.87</td><td>9.57</td><td>6.64</td><td>5.81</td><td>4.25</td></tr><tr><td>SliceGPT (25%)</td><td>37.74</td><td>17.46</td><td>14.56</td><td>11.90</td><td>10.94</td><td>10.04</td><td>9.68</td><td>7.24</td><td>6.30</td><td>4.60</td></tr><tr><td>SliceGPT (30%)</td><td>43.98</td><td>19.09</td><td>15.83</td><td>12.51</td><td>11.33</td><td>10.27</td><td>9.85</td><td>8.12</td><td>6.99</td><td>5.05</td></tr></table>
227
+
228
+ Zero-shot Tasks We assess SliceGPT across five well-known zero-shot tasks: PIQA (Bisk et al., 2020); WinoGrande (Sakaguchi et al., 2021); HellaSwag (Zellers et al., 2019); ARC-e and ARC-c (Clark et al., 2018). Following similar work (Frantar & Alistarh, 2023; Dettmers et al., 2022; Frantar et al., 2022; Dettmers et al., 2023), we use the LM Evaluation Harness (Gao et al., 2021) with default parameters in our evaluations.
229
+
230
+ Figure 5 shows the average scores achieved by the sliced models across these tasks. The top row of the plot shows the mean accuracy when WikiText-2 is used for calibration, and the bottom row shows the accuracy when Alpaca is used for calibration. We observe a similar pattern to the generation task in the results: the OPT models are more amenable to compression than the LLAMA-2 models, and the reduction in accuracy is less pronounced in the larger models. Here we also include the Phi-2 model: we see that sliced versions of the Phi-2 model are comparable with sliced versions of the LLAMA-2 7B model. The largest OPT and LLAMA-2 models can be compressed very effectively, with just a few percentage points loss when removing $30\%$ of the 66B OPT model.
231
+
232
+ We additionally experiment here with recovery fine-tuning (RFT). We apply a small amount of RFT to sliced LLAMA-2 and Phi-2 models using LoRA (Hu et al., 2021), following the idea from Ma
233
+
234
+ et al. (2023a). For models sliced with WikiText-2 we use approximately 1k sequences, for those sliced with the Alpaca dataset we use 5k. For all RFT we use lora_r = 32, lora_alpha = 10 and sequence length 1024, and use defaults for all other hyperparameters in the Hugging Face PEFT package (Mangrulkar et al., 2022).
235
+
236
+ Figure 6 shows the results. We see a marked difference between RFT on WikiText-2 and Alpaca datasets, with the Alpaca dataset giving much higher performing models. We attribute this difference to the similarity between Alpaca and the benchmark tasks. For the largest LLAMA-2 70B model sliced at $30\%$ , with RFT on Alpaca we are able to achieve an average accuracy of $74.3\%$ , compared to $76.6\%$ on the dense model. The sliced model has approximately 51.6B parameters and considerably improved throughput as we demonstrate later.
237
+
238
+ We see that Phi-2 is not able to recover the drop in accuracy from slicing using only the WikiText-2 dataset, but using Alpaca we are able to recover several percentage points. The average accuracy of Phi-2 with $25\%$ slicing and RFT is $65.2\%$ , compared to $72.2\%$ with the dense model. The sliced model has approximately 2.2B parameters and retains $90.3\%$ of the accuracy of the 2.8B model. This shows that even small LMs can benefit from post-training pruning. Tables of accuracies across each task are provided in Appendix A.5.
239
+
240
+ ![](images/28816c79b962e6bf438610da7dac6e9bc7f9b266a984b0b7b442eb622b8d3710.jpg)
241
+ Figure 5: Mean zero-shot accuracy on OPT, LLAMA-2 and Phi-2 across multiple tasks after slicing with the WikiText-2 (top) and Alpaca (bottom) datasets for calibration.
242
+
243
+ ![](images/5cbde1da372dd6f76ae5622257b5b96e7661a928f34af224ab8746c52ae141a7.jpg)
244
+ Figure 6: Mean zero-shot accuracy on Llama-2 and Phi-2 across multiple tasks after slicing and recovery fine-tuning (RFT). Left: WikiText-2 used for calibration and RFT. Right: Alpaca used for calibration and RFT. Despite an extensive search, we were not able to find RFT parameters that enabled improved performance in the OPT models.
245
+
246
+ ![](images/6328353a432fcd77aaba844942fda591dd84fea68dcef08f11db039029edde56.jpg)
247
+
248
+ Benchmarking Throughput Unlike conventional sparsity methods, which introduce sparsity in $\mathbf{W}_{\mathrm{in}}$ and $\mathbf{W}_{\mathrm{out}}$ , SliceGPT also introduces (structured) sparsity in $\mathbf{X}$ : entire columns of $\mathbf{X}$ are sliced off, reducing the embedding dimension. This enhances both the computational complexity (in flops) and data movement within our compressed model.
249
+
250
+ The token throughput of models sliced at $25\%$ and $50\%$ are compared to the dense model on 80GB H100 GPUs. We set the sequence length to 128 and find the maximum throughput by doubling the batch size until the GPUs run out of memory or the throughput drops off. The $25\%$ sliced models achieve up to $1.55 \times$ throughput improvement over the dense model. At $50\%$ slicing the largest models require only one GPU instead of two, with large increases in throughput: $3.13 \times$ and $1.87 \times$ . This means that for a fixed number of GPUs, these models achieve $6.26 \times$ and $3.75 \times$ throughput of a dense model. We note that the WikiText2 perplexity of SliceGPT at $50\%$ is worse than SparseGPT 2:4, but the throughput is much higher than could be achieved with a sparse method that does not slice X. For models of size 13B, the performance increase from batch-size increasing is less pronounced because the models take up little of the GPU memory. On consumer grade GPUs (with less memory) the throughput for these smaller models is likely to be improved. For full details see Appendix A.6.
251
+
252
+ Inference Time Next we study the end-to-end runtime of a model compressed with SliceGPT. Table 2 compares the time of generating a single token in OPT 66B and LLAMA-2 70B models on Quadro RTX6000 and A100 GPUs. We observe a speedup of $16 - 17\%$ on RTX6000 GPUs when employing $25\%$ slicing, and $11 - 13\%$ on A100s. We reduce the number of GPUs used in both cases, providing energy and cost savings relative to deployment of the dense model. For LLAMA-2 70B, the compute required using RTX6000 GPUs is reduced to $64\%$ , from 1764 GPUs to $1075\mathrm{GPUms}^4$ . We attribute this improvement to our approach of substituting weight matrices with smaller ones and using dense kernels in our compressed models, which is infeasible with other pruning schemes.
253
+
254
+ Table 2: Average per-token inference time of SliceGPT when generating sequences of length 128 (with batch size of 1). In each case, we show the time taken in ms, the number of GPUs required and the total compute in GPUs.
255
+
256
+ <table><tr><td>GPU Type</td><td>Slicing</td><td colspan="2">OPT 66B</td><td colspan="2">LLAMA-2 70B</td></tr><tr><td rowspan="2">A100 (40GB)</td><td>Dense</td><td>114ms on 4 GPUs</td><td>456 GPUs</td><td>125ms on 4 GPUs</td><td>500 GPUs</td></tr><tr><td>25%</td><td>102ms on 3 GPUs</td><td>306 GPUs</td><td>110ms on 3 GPUs</td><td>330 GPUs</td></tr><tr><td rowspan="2">Quadro RTX6000(24GB)</td><td>Dense</td><td>237ms on 6 GPUs</td><td>1422 GPUs</td><td>252ms on 7 GPUs</td><td>1764 GPUs</td></tr><tr><td>25%</td><td>204ms on 5 GPUs</td><td>1020 GPUs</td><td>215ms on 5 GPUs</td><td>1075 GPUs</td></tr></table>
257
+
258
+ End-to-end performance gains are not feasible with our baseline SparseGPT 2:4 at the time of writing. Instead, we compare SliceGPT with SparseGPT 2:4 by comparing the relative timing of each operation involved in a transformer layer. We find that SliceGPT (25%) is competitive with SparseGPT (2:4) in terms of speedup and perplexity for large models. For further details see Appendix A.7.
259
+
260
+ Compute cost All LLAMA-2, OPT and Phi-2 models can be sliced on a single GPU in 1 to 3 hours. With recovery fine-tuning we compress all LMs in 1 to 5 hours total, as shown in Table 3.
261
+
262
+ Table 3: Compute cost of slicing $30\%$ with SliceGPT and performing recovery fine-tuning using the Alpaca dataset. Here we use a calibration set size of 1024 for LLAMA-2 models and 2048 for Phi-2, and calibration sequence length 2048 in all cases.
263
+
264
+ <table><tr><td rowspan="2">Model</td><td colspan="2">SliceGPT 30%</td><td colspan="2">Recovery fine-tuning</td><td rowspan="2">Total</td></tr><tr><td>Time</td><td>GPUs</td><td>Time</td><td>GPUs</td></tr><tr><td>LLAMA-2 7B</td><td>0h44m</td><td>1xH100 80GB</td><td>0h23m</td><td>1xH100 80GB</td><td>1h07m</td></tr><tr><td>LLAMA-2 13B</td><td>1h08m</td><td>1xH100 80GB</td><td>0h44m</td><td>1xH100 80GB</td><td>1h52m</td></tr><tr><td>LLAMA-2 70B</td><td>3h31m</td><td>1xH100 80GB</td><td>1h35m</td><td>4xH100 80GB</td><td>5h06m</td></tr><tr><td>Phi-2</td><td>0h49m</td><td>1xV100 32GB</td><td>1h59m</td><td>1xV100 32GB</td><td>2h48m</td></tr></table>
265
+
266
+ # 5 CONCLUSION AND FUTURE WORK
267
+
268
+ We've introduced SliceGPT which allows for structured pruning for large language models. We reduce the cost of inference of Llama-2 70B on 40GB A100 GPUs to $66\%$ of that of the dense model without any additional code optimization, requiring fewer GPUs (from 4 to 3) while maintaining better held-out perplexity than SparseGPT 2:4. On 24GB RTX6000 GPUs, the cost of inference is reduced to $64\%$ , requiring 2 fewer GPUs (from 7 to 5). On zero-shot downstream tasks, slicing OPT 66B, Llama-2 70B and Phi-2 at $25\%$ maintains $99\%$ , $96\%$ and $87\%$ of the dense performance. With recovery fine-tuning $25\%$ -sliced Llama-2 70B and Phi-2 increase to $99\%$ and $90\%$ respectively.
269
+
270
+ Opportunities remain to build on our method. Smaller but dense LMs perform better than LMs with 13B parameters or less pruned to similar sizes, though we do not expect this to remain the case for long. Our pruned models have more parameters than those pruned with SparseGPT but our method allows for larger batch sizes to be loaded into GPU memory, and has no overhead for sparsity structure: perhaps a combined method could obtain the best of both. Other methods of computing $\mathbf{Q}$ could improve the results. To further decrease the inference time and GPU count, complementary methods including quantization (Xiao et al., 2023; Dettmers et al., 2022; Ashkboos et al., 2023; Dettmers et al., 2023; Frantar et al., 2022), and structural pruning (e.g. Ma et al., 2023b) could be used.
271
+
272
+ We hope that our observation of computational invariance can help future research in improving the efficiency of deep learning models, and perhaps inspire new theoretical insights.
273
+
274
+ # ACKNOWLEDGEMENTS
275
+
276
+ We thank Dmitry Kats, Pashmina Cameron, Pavel Myshkov and Liana Mikaelyan for their invaluable contributions to the source code. We additionally thank Pashmina Cameron for her helpful feedback when reviewing early versions of the paper.
277
+
278
+ # REFERENCES
279
+
280
+ Saleh Ashkboos, Ilia Markov, Elias Frantar, Tingxuan Zhong, Xincheng Wang, Jie Ren, Torsten Hoefler, and Dan Alisthar. Towards end-to-end 4-bit inference on generative large language models. arXiv preprint arXiv:2310.09259, 2023.
281
+ Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.
282
+ Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. Piaq: Reasoning about physical commonsense in natural language. In Thirty-Fourth AAAI Conference on Artificial Intelligence, 2020.
283
+ Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. ArXiv, abs/1803.05457, 2018. URL https://api_semanticscholar.org/CorpusID:3922816.
284
+ Tim Dettmers, Mike Lewis, Younes Belkada, and Luke Zettlemoyer. LLM. int8(): 8-bit matrix multiplication for transformers at scale. arXiv preprint arXiv:2208.07339, 2022.
285
+ Tim Dettmers, Ruslan Svirschevski, Vage Egiazarian, Denis Kuznedelev, Elias Frantar, Saleh Ashkboos, Alexander Borzunov, Torsten Hoefler, and Dan Alistarh. Spqr: A sparse-quantized representation for near-lossless LLM weight compression. arXiv preprint arXiv:2306.03078, 2023.
286
+ Elias Frantar and Dan Alistarh. Optimal brain compression: A framework for accurate post-training quantization and pruning. Advances in Neural Information Processing Systems, 35:4475-4488, 2022.
287
+ Elias Frantar and Dan Alistarh. SparseGPT: Massive language models can be accurately pruned in one-shot. 2023.
288
+
289
+ Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alisthar. GPTQ: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323, 2022.
290
+ Trevor Gale, Erich Olsen, and Sara Hooker. The state of sparsity in deep neural networks, 2019.
291
+ Leo Gao, Jonathan Tow, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Kyle McDonell, Niklas Muennighoff, et al. A framework for few-shot language model evaluation. Version v0.0.1. Sept, 2021.
292
+ Amir Gholami, Sehoon Kim, Zhen Dong, Zhewei Yao, Michael W. Mahoney, and Kurt Keutzer. A survey of quantization methods for efficient neural network inference. CoRR, abs/2103.13630, 2021. URL https://arxiv.org/abs/2103.13630.
293
+ Manish Gupta and Puneet Agrawal. Compression of deep learning models for text: A survey, 2021.
294
+ Song Han, Huizi Mao, and William J. Dally. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding, 2016.
295
+ Babak Hassibi, David G Stork, and Gregory J Wolff. Optimal brain surgeon and general network pruning. In IEEE international conference on neural networks, pp. 293-299. IEEE, 1993.
296
+ Yihui He, Xiangyu Zhang, and Jian Sun. Channel pruning for accelerating very deep neural networks. In Proceedings of the IEEE international conference on computer vision, pp. 1389-1397, 2017.
297
+ Torsten Hoefler, Dan Alistarh, Tal Ben-Nun, Nikoli Dryden, and Alexandra Peste. Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks. CoRR, abs/2102.00554, 2021. URL https://arxiv.org/abs/2102.00554.
298
+ Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models, 2021.
299
+ Zehao Huang and Naiyan Wang. Data-driven sparse structure selection for deep neural networks. In Proceedings of the European conference on computer vision (ECCV), pp. 304-320, 2018.
300
+ Yann LeCun, John Denker, and Sara Solla. Optimal brain damage. Advances in neural information processing systems, 2, 1989.
301
+ Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan, and Changshui Zhang. Learning efficient convolutional networks through network slimming. In Proceedings of the IEEE international conference on computer vision, pp. 2736-2744, 2017.
302
+ Jian-Hao Luo, Jianxin Wu, and Weiyao Lin. Thinet: A filter level pruning method for deep neural network compression. In Proceedings of the IEEE international conference on computer vision, pp. 5058-5066, 2017.
303
+ Xinyin Ma, Gongfan Fang, and Xinchao Wang. Llm-pruner: On the structural pruning of large language models. arXiv preprint arXiv:2305.11627, 2023a. URL https://arxiv.org/pdf/2305.11627.pdf.
304
+ Xinyin Ma, Gongfan Fang, and Xinchao Wang. LLM-pruner: On the structural pruning of large language models, 2023b.
305
+ Rabeeh Karimi Mahabadi, James Henderson, and Sebastian Ruder. Compacter: Efficient low-rank hypercomplex adapter layers, 2021.
306
+ Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul, and Benjamin Bossan. Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/huggingface/peft, 2022.
307
+ Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016.
308
+ Asit Mishra, Jorge Albericio Latorre, Jeff Pool, Darko Stosic, Dusan Stosic, Ganesh Venkatesh, Chong Yu, and Paulius Micikevicius. Accelerating sparse deep neural networks. arXiv preprint arXiv:2104.08378, 2021.
309
+
310
+ Matan Ben Noach and Yoav Goldberg. Compressing pre-trained language models by matrix decomposition. In Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing, pp. 884-889, Suzhou, China, December 2020. Association for Computational Linguistics. URL https://aclanthology.org/2020.aacl-main.88.
311
+ Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. PyTorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019.
312
+ Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018.
313
+ Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. Communications of the ACM, 64(9):99-106, 2021.
314
+ Sidak Pal Singh and Dan Alistarh. Woodfisher: Efficient second-order approximation for neural network compression. Advances in Neural Information Processing Systems, 33:18098-18109, 2020.
315
+ Mingjie Sun, Zhuang Liu, Anna Bair, and J Zico Kolter. A simple and effective pruning approach for large language models. arXiv preprint arXiv:2306.11695, 2023.
316
+ Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.
317
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models, 2023.
318
+ Murad Tukan, Alaa Maalouf, Matan Weksler, and Dan Feldman. Compressed deep networks: Goodbye SVD, hello robust low-rank approximation. arXiv preprint arXiv:2009.05647, 2020.
319
+ Tycho FA van der Ouderaa, Markus Nagel, Mart van Baalen, Yuki M Asano, and Tijmen Blankevoort. The llm surgeon. arXiv preprint arXiv:2312.17244, 2023.
320
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
321
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019.
322
+ Guangxuan Xiao, Ji Lin, Mickael Seznec, Hao Wu, Julien Demouth, and Song Han. Smoothquant: Accurate and efficient post-training quantization for large language models. In International Conference on Machine Learning, pp. 38087-38099. PMLR, 2023.
323
+ Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? arXiv preprint arXiv:1905.07830, 2019.
324
+
325
+ Biao Zhang and Rico Sennrich. Root mean square layer normalization. Advances in Neural Information Processing Systems, 32, 2019.
326
+ Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068, 2022.
327
+ Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023.
328
+ Michael Zhu and Suyog Gupta. To prune, or not to prune: exploring the efficacy of pruning for model compression, 2017.
329
+ Xunyu Zhu, Jian Li, Yong Liu, Can Ma, and Weiping Wang. A survey on model compression for large language models. arXiv preprint arXiv:2308.07633, 2023.
330
+
331
+ # A APPENDIX
332
+
333
+ # A.1 PROOF OF EQUATION 2
334
+
335
+ An orthogonal matrix $\mathbf{Q}$ is a square matrix that satisfies the relation $\mathbf{Q}^{\top}\mathbf{Q} = \mathbf{Q}\mathbf{Q}^{\top} = \mathbf{I}$ . The norm of a vector is the square-root of the sum of squares of the elements: $\| \boldsymbol{x}\| = \sqrt{\sum_{i}\boldsymbol{x}_{i}^{2}} = \sqrt{\boldsymbol{x}^{\top}\boldsymbol{x}}$ . Multiplying a vector by $\mathbf{Q}$ does not change the norm since $\| \mathbf{Q}\mathbf{x}\| = \sqrt{\boldsymbol{x}^{\top}\mathbf{Q}^{\top}\mathbf{Q}\boldsymbol{x}} = \| \boldsymbol{x}\|$ .
336
+
337
+ The RMSNorm operation divides each row of the input matrix $\mathbf{X}$ by its norm. By the basic rules of linear algebra, if $\pmb{x}$ is a row of $\mathbf{X}$ , then $\mathbf{Q}^{\top}\pmb{x}$ is the corresponding row of $\mathbf{XQ}$ . Applying RMSNorm to $\mathbf{XQ}$ , said row will now be equal to $\frac{1}{\|\pmb{x}\|}\mathbf{Q}^{\top}\pmb{x}$ . After RMSnorm, we can multiply by $\mathbf{Q}^{\top}$ , our row is now equal to $\frac{1}{\|\pmb{x}\|}\mathbf{QQ}^{\top}\pmb{x} = \frac{1}{\|\pmb{x}\|}\pmb{x}$ . Thus we have the relation
338
+
339
+ $$
340
+ \operatorname {R M S N o r m} (\mathbf {X Q}) \mathbf {Q} ^ {\top} = \operatorname {R M S N o r m} (\mathbf {X}). \tag {10}
341
+ $$
342
+
343
+ # A.2 SINGLE PRECISION EIGENVALUE CALCULATION
344
+
345
+ As previously noted in Section 4, we employ double precision when performing the PCA algorithm. This choice is made in order to mitigate potential numerical errors that may arise during the computation of the orthogonal matrix in SliceGPT. Nevertheless, it is intriguing to investigate the impact of employing lower precision for PCA calculations on the ultimate accuracy.
346
+
347
+ Table 4 shows the perplexity of all our models when we apply FP32 PCA in our scheme. It shows that the accuracy of larger models could be affected by numerical errors during the PCA calculation phase. It should be noted that we use PyTorch (torch.linalg) for calculating the eigenvectors and eigenvalues.
348
+
349
+ Table 4: OPT and LLAMA-2 perplexity results on WikiText2 using FP32 PCA calculation. The calibration set size and sequence length are 128 and 2048, respectively.
350
+
351
+ <table><tr><td rowspan="2">Method</td><td colspan="7">OPT</td><td colspan="3">LLAMA-2</td></tr><tr><td>125M</td><td>1.3B</td><td>2.7B</td><td>6.7B</td><td>13B</td><td>30B</td><td>66B</td><td>7B</td><td>13B</td><td>70B</td></tr><tr><td>Dense</td><td>27.64</td><td>14.61</td><td>12.46</td><td>10.85</td><td>10.12</td><td>9.56</td><td>9.33</td><td>5.47</td><td>4.88</td><td>3.32</td></tr><tr><td>SparseGPT 2:4</td><td>45.07</td><td>29.61</td><td>14.90</td><td>13.00</td><td>11.80</td><td>10.53</td><td>10.22</td><td>8.69</td><td>7.07</td><td>4.98</td></tr><tr><td>SliceGPT 10%</td><td>29.48</td><td>15.15</td><td>12.83</td><td>11.05</td><td>10.28</td><td>9.68</td><td>9.45</td><td>6.51</td><td>5.64</td><td>4.20</td></tr><tr><td>SliceGPT 20%</td><td>34.12</td><td>16.51</td><td>13.87</td><td>11.64</td><td>10.73</td><td>9.94</td><td>9.80</td><td>7.30</td><td>6.07</td><td>5.82</td></tr><tr><td>SliceGPT 25%</td><td>38.25</td><td>17.67</td><td>14.78</td><td>12.14</td><td>11.08</td><td>10.15</td><td>9.81</td><td>8.52</td><td>6.65</td><td>7.01</td></tr><tr><td>SliceGPT 30%</td><td>44.17</td><td>19.33</td><td>16.20</td><td>12.82</td><td>11.53</td><td>10.43</td><td>9.99</td><td>10.41</td><td>7.49</td><td>8.75</td></tr></table>
352
+
353
+ # A.3 SENSITIVITY TO THE CALIBRATION SET SIZE AND SEQUENCE LENGTH
354
+
355
+ We present an ablation study to examine the role of the WikiText-2 calibration set. We focus on the generation task with $25\%$ sparsity using OPT 6.7B and LLAMA-2 7B models.
356
+
357
+ ![](images/190dc57d53c0e1bc9d3b4dca93b6d5c57f0aeb38e8eb46e45be3f9dae6bced67.jpg)
358
+ Figure 7: The effect of the calibration set size and sequence length on perplexity of WikiText2.
359
+
360
+ ![](images/f45812c105e9da8e12c88c0776c639c848e47fb6e7e6eb4bd343c79a46c92c92.jpg)
361
+
362
+ Next we explore the effect of using different sequence lengths $N$ in the calibration set. Given a fixed number of $B$ samples, the PCA input matrix is computed using $NB$ embedding vectors, and understanding the tradeoff between having a larger $B$ or a larger $N$ is interesting. Figure 7 (right) shows the results of varying the sequence length in the calibration set from 128 to 4096: we conclude that having a larger sequence length can result in better perplexity.
363
+
364
+ Using these insights, we use a calibration set size of 1024 and sequence length 2048 in our main experiments (Table 1). In Table 5 below we evaluate the perplexity of OPT and LLAMA-2 models on WikiText-2 with a smaller calibration set size, which confirms the trend that decreasing this degrades the perplexity across all models and sizes.
365
+
366
+ Table 5: OPT and Llama-2 perplexity results on WikiText2. The calibration set size and sequence length are 128 and 2048, respectively.
367
+
368
+ <table><tr><td rowspan="2">Method</td><td colspan="7">OPT</td><td colspan="3">LLAMA-2</td></tr><tr><td>125M</td><td>1.3B</td><td>2.7B</td><td>6.7B</td><td>13B</td><td>30B</td><td>66B</td><td>7B</td><td>13B</td><td>70B</td></tr><tr><td>Dense</td><td>27.64</td><td>14.61</td><td>12.46</td><td>10.85</td><td>10.12</td><td>9.56</td><td>9.33</td><td>5.47</td><td>4.88</td><td>3.32</td></tr><tr><td>SparseGPT 2:4</td><td>45.07</td><td>29.61</td><td>14.90</td><td>13.00</td><td>11.80</td><td>10.53</td><td>10.22</td><td>8.69</td><td>7.07</td><td>4.98</td></tr><tr><td>SliceGPT (10%)</td><td>29.33</td><td>15.15</td><td>12.82</td><td>11.00</td><td>10.30</td><td>9.66</td><td>9.43</td><td>5.96</td><td>5.29</td><td>3.78</td></tr><tr><td>SliceGPT (20%)</td><td>34.53</td><td>16.58</td><td>13.89</td><td>11.62</td><td>10.75</td><td>9.91</td><td>9.61</td><td>6.86</td><td>6.04</td><td>4.46</td></tr><tr><td>SliceGPT (25%)</td><td>38.13</td><td>17.78</td><td>14.84</td><td>12.12</td><td>11.08</td><td>10.10</td><td>9.76</td><td>7.56</td><td>6.61</td><td>4.89</td></tr><tr><td>SliceGPT (30%)</td><td>44.61</td><td>19.61</td><td>16.30</td><td>12.81</td><td>11.55</td><td>10.32</td><td>9.95</td><td>8.64</td><td>7.44</td><td>5.42</td></tr></table>
369
+
370
+ # A.4 SPECTRUM ANALYSIS OF LLAMA-2 AND OPT MODELS
371
+
372
+ The figure below shows the eigenvalue distribution for the OPT 6.7B and LLAMA-2 7B models. Although both models have a comparable parameter count, the LLAMA-2 model has a more tightly compressed distribution in its embeddings spectrum. This observation shows that there are no dominant principal components with significantly more information, making the pruning of these components a more challenging task.
373
+
374
+ ![](images/68bdf4cb0542680be911722cb6d2465764174030a3d03287166ff684be3b4d99.jpg)
375
+ Figure 7 (left) shows the result of varying the size of the calibration set on the perplexity. It shows that sample sizes of at least 128 provide sensible choices for our calibration set.
376
+
377
+ ![](images/9d95bfebb6e3f935ad6de1067d0bd8337b6bde4903959a546d9702609cd102ed.jpg)
378
+ Figure 8: Normalized (by maximum) spectrum of the MLP inputs (log scale) using 64 samples. Except for the first layer in the LLAMA-2 model, the eigenvalue distributions for both models show faster decay in early layers compared to later ones. This suggests that a greater amount of slicing could be applied after the orthogonal transformation in these early layers.
379
+
380
+ We can use the above insights to slice different layers by different amounts. Instead of specifying the slicing level upfront, we set the fraction of the total variance to discard during each PCA calculation, which sets the number of rows and columns to slice off from each matrix. For each model, we run three experiments with varying target variances to obtain a total reduction on the network close to $25\%$ .
381
+
382
+ The results are shown in Table 6 below. Varying the slicing level by layer improves the WikiText-2 perplexity in OPT models, but has the opposite effect in Llama-2 models.
383
+
384
+ Table 6: Evaluating the effects of varying slicing level by layer. The calibration set size is 128 and the sequence length is the maximum for each model.
385
+
386
+ <table><tr><td>Model</td><td>WikiText-2 PPL (25% constant slicing)</td><td>WikiText-2 PPL (varying slicing by layer)</td><td>Improvement</td></tr><tr><td>OPT 6.7B</td><td>12.10</td><td>11.94, 24.7% total slicing</td><td>0.16</td></tr><tr><td>OPT 13B</td><td>11.04</td><td>10.76, 24.2% total slicing</td><td>0.28</td></tr><tr><td>OPT 30B</td><td>10.13</td><td>9.95, 24.8% total slicing</td><td>0.18</td></tr><tr><td>OPT 66B</td><td>9.75</td><td>9.63, 24.1% total slicing</td><td>0.12</td></tr><tr><td>LLAMA-2 7B</td><td>6.84</td><td>7.63, 24.1% total slicing</td><td>-0.79</td></tr><tr><td>LLAMA-2 13B</td><td>6.00</td><td>6.17, 23.3% total slicing</td><td>-0.17</td></tr><tr><td>LLAMA-2 70B</td><td>4.44</td><td>4.63, 25.5% total slicing</td><td>-0.19</td></tr></table>
387
+
388
+ # A.5 DETAILED ZERO-SHOT RESULTS
389
+
390
+ In this section, we provide the detailed results of the zero-shot tasks we presented in the paper.
391
+
392
+ Table 7: Downstream zero-shot task performance of OPT, LLAMA-2 and Phi-2 models when slicing using the WikiText2 dataset.
393
+
394
+ <table><tr><td>Model</td><td>Slicing</td><td>PIQA</td><td>WinoGrande</td><td>HellaSwag</td><td>ARC-e</td><td>ARC-c</td><td>Avg. Score</td></tr><tr><td rowspan="4">OPT 1.3B</td><td>Dense</td><td>72.42</td><td>59.27</td><td>53.72</td><td>50.97</td><td>29.52</td><td>53.18</td></tr><tr><td>20%</td><td>65.34</td><td>54.85</td><td>45.39</td><td>46.04</td><td>26.96</td><td>47.72</td></tr><tr><td>25%</td><td>62.30</td><td>53.83</td><td>42.91</td><td>45.45</td><td>27.22</td><td>46.34</td></tr><tr><td>30%</td><td>60.77</td><td>54.70</td><td>39.81</td><td>43.90</td><td>25.77</td><td>44.99</td></tr><tr><td rowspan="4">OPT 2.7B</td><td>Dense</td><td>74.81</td><td>61.01</td><td>60.58</td><td>54.42</td><td>31.14</td><td>56.39</td></tr><tr><td>20%</td><td>68.23</td><td>57.93</td><td>51.38</td><td>51.81</td><td>28.50</td><td>51.57</td></tr><tr><td>25%</td><td>65.29</td><td>57.22</td><td>47.85</td><td>49.79</td><td>27.99</td><td>49.63</td></tr><tr><td>30%</td><td>62.35</td><td>57.22</td><td>44.18</td><td>46.72</td><td>27.05</td><td>47.50</td></tr><tr><td rowspan="4">OPT 6.7B</td><td>Dense</td><td>76.39</td><td>65.19</td><td>67.16</td><td>60.14</td><td>34.64</td><td>60.70</td></tr><tr><td>20%</td><td>72.74</td><td>61.09</td><td>61.04</td><td>55.89</td><td>30.80</td><td>56.31</td></tr><tr><td>25%</td><td>70.35</td><td>60.62</td><td>58.15</td><td>52.78</td><td>29.52</td><td>54.28</td></tr><tr><td>30%</td><td>68.61</td><td>60.69</td><td>54.56</td><td>52.15</td><td>29.01</td><td>53.00</td></tr><tr><td rowspan="4">OPT 13B</td><td>Dense</td><td>76.82</td><td>64.80</td><td>69.81</td><td>61.87</td><td>35.67</td><td>61.79</td></tr><tr><td>20%</td><td>74.48</td><td>64.96</td><td>65.42</td><td>60.90</td><td>35.24</td><td>60.20</td></tr><tr><td>25%</td><td>73.67</td><td>64.25</td><td>63.28</td><td>60.52</td><td>34.64</td><td>59.27</td></tr><tr><td>30%</td><td>71.82</td><td>62.90</td><td>60.66</td><td>58.80</td><td>32.94</td><td>57.42</td></tr><tr><td rowspan="4">OPT 30B</td><td>Dense</td><td>78.07</td><td>68.19</td><td>72.27</td><td>65.24</td><td>38.23</td><td>64.40</td></tr><tr><td>20%</td><td>76.50</td><td>66.61</td><td>70.61</td><td>64.18</td><td>35.75</td><td>62.73</td></tr><tr><td>25%</td><td>75.30</td><td>66.61</td><td>69.42</td><td>63.55</td><td>35.67</td><td>62.11</td></tr><tr><td>30%</td><td>74.97</td><td>65.04</td><td>68.15</td><td>63.55</td><td>34.64</td><td>61.27</td></tr><tr><td rowspan="4">OPT 66B</td><td>Dense</td><td>79.82</td><td>68.90</td><td>74.85</td><td>67.21</td><td>40.02</td><td>66.16</td></tr><tr><td>20%</td><td>78.73</td><td>67.88</td><td>73.79</td><td>68.81</td><td>39.51</td><td>65.74</td></tr><tr><td>25%</td><td>78.40</td><td>67.09</td><td>73.33</td><td>67.89</td><td>39.16</td><td>65.17</td></tr><tr><td>30%</td><td>77.42</td><td>66.30</td><td>72.62</td><td>66.90</td><td>37.97</td><td>64.24</td></tr><tr><td rowspan="4">LLAMA-2 7B</td><td>Dense</td><td>79.11</td><td>69.06</td><td>75.99</td><td>74.58</td><td>46.25</td><td>69.00</td></tr><tr><td>20%</td><td>69.42</td><td>65.11</td><td>59.04</td><td>59.76</td><td>37.54</td><td>58.18</td></tr><tr><td>25%</td><td>66.87</td><td>63.38</td><td>54.16</td><td>58.46</td><td>34.56</td><td>55.48</td></tr><tr><td>30%</td><td>63.55</td><td>61.33</td><td>49.62</td><td>51.77</td><td>31.23</td><td>51.50</td></tr><tr><td rowspan="4">LLAMA-2 13B</td><td>Dense</td><td>80.47</td><td>72.22</td><td>79.39</td><td>77.48</td><td>49.23</td><td>71.76</td></tr><tr><td>20%</td><td>71.87</td><td>69.38</td><td>63.04</td><td>69.87</td><td>43.09</td><td>63.45</td></tr><tr><td>25%</td><td>68.55</td><td>67.48</td><td>58.10</td><td>62.50</td><td>37.88</td><td>58.90</td></tr><tr><td>30%</td><td>66.10</td><td>65.11</td><td>52.69</td><td>56.82</td><td>35.07</td><td>55.16</td></tr><tr><td rowspan="4">LLAMA-2 70B</td><td>Dense</td><td>82.70</td><td>77.98</td><td>83.84</td><td>80.98</td><td>57.34</td><td>76.57</td></tr><tr><td>20%</td><td>76.61</td><td>76.40</td><td>72.98</td><td>80.51</td><td>55.20</td><td>72.34</td></tr><tr><td>25%</td><td>74.92</td><td>75.37</td><td>68.84</td><td>77.90</td><td>51.71</td><td>69.75</td></tr><tr><td>30%</td><td>72.31</td><td>73.56</td><td>63.69</td><td>73.40</td><td>47.61</td><td>66.11</td></tr><tr><td rowspan="4">Phi-2</td><td>Dense</td><td>79.11</td><td>75.77</td><td>73.83</td><td>78.32</td><td>54.18</td><td>72.24</td></tr><tr><td>20%</td><td>71.87</td><td>67.80</td><td>57.76</td><td>58.00</td><td>35.32</td><td>58.15</td></tr><tr><td>25%</td><td>69.21</td><td>65.35</td><td>52.40</td><td>53.70</td><td>31.66</td><td>54.46</td></tr><tr><td>30%</td><td>65.94</td><td>63.14</td><td>47.56</td><td>53.03</td><td>30.29</td><td>51.99</td></tr></table>
395
+
396
+ Table 8: Downstream zero-shot task performance of OPT, LLAMA-2 and Phi-2 models when slicing using the Alpaca dataset.
397
+
398
+ <table><tr><td>Model</td><td>Slicing</td><td>PIQA</td><td>WinoGrande</td><td>HellaSwag</td><td>ARC-e</td><td>ARC-c</td><td>Avg. Score</td></tr><tr><td rowspan="4">OPT 1.3B</td><td>Dense</td><td>72.42</td><td>59.27</td><td>53.72</td><td>50.97</td><td>29.52</td><td>53.18</td></tr><tr><td>20%</td><td>69.91</td><td>55.49</td><td>47.88</td><td>49.66</td><td>27.05</td><td>50.00</td></tr><tr><td>25%</td><td>69.37</td><td>55.72</td><td>45.82</td><td>48.70</td><td>26.62</td><td>49.25</td></tr><tr><td>30%</td><td>68.55</td><td>55.33</td><td>43.92</td><td>47.26</td><td>26.45</td><td>48.30</td></tr><tr><td rowspan="4">OPT 2.7B</td><td>Dense</td><td>74.81</td><td>61.01</td><td>60.58</td><td>54.42</td><td>31.14</td><td>56.39</td></tr><tr><td>20%</td><td>71.87</td><td>58.09</td><td>54.98</td><td>54.04</td><td>29.44</td><td>53.68</td></tr><tr><td>25%</td><td>70.95</td><td>58.09</td><td>52.62</td><td>53.03</td><td>29.61</td><td>52.86</td></tr><tr><td>30%</td><td>69.64</td><td>56.43</td><td>49.45</td><td>51.81</td><td>28.33</td><td>51.13</td></tr><tr><td rowspan="4">OPT 6.7B</td><td>Dense</td><td>76.39</td><td>65.19</td><td>67.16</td><td>60.14</td><td>34.64</td><td>60.70</td></tr><tr><td>20%</td><td>74.54</td><td>62.67</td><td>62.84</td><td>59.18</td><td>33.36</td><td>58.52</td></tr><tr><td>25%</td><td>73.78</td><td>62.59</td><td>60.99</td><td>59.01</td><td>33.70</td><td>58.01</td></tr><tr><td>30%</td><td>73.34</td><td>61.80</td><td>58.93</td><td>58.33</td><td>32.85</td><td>57.05</td></tr><tr><td rowspan="4">OPT 13B</td><td>Dense</td><td>76.82</td><td>64.80</td><td>69.81</td><td>61.87</td><td>35.67</td><td>61.79</td></tr><tr><td>20%</td><td>76.01</td><td>65.19</td><td>66.15</td><td>61.57</td><td>34.73</td><td>60.73</td></tr><tr><td>25%</td><td>74.65</td><td>64.64</td><td>65.02</td><td>60.65</td><td>35.07</td><td>60.00</td></tr><tr><td>30%</td><td>74.86</td><td>63.46</td><td>63.16</td><td>61.36</td><td>34.56</td><td>59.48</td></tr><tr><td rowspan="4">OPT 30B</td><td>Dense</td><td>78.07</td><td>68.19</td><td>72.27</td><td>65.24</td><td>38.23</td><td>64.40</td></tr><tr><td>20%</td><td>78.35</td><td>66.61</td><td>70.64</td><td>65.19</td><td>37.46</td><td>63.65</td></tr><tr><td>25%</td><td>77.48</td><td>65.82</td><td>69.58</td><td>65.91</td><td>37.63</td><td>63.28</td></tr><tr><td>30%</td><td>76.93</td><td>64.96</td><td>68.66</td><td>65.70</td><td>37.12</td><td>62.67</td></tr><tr><td rowspan="4">OPT 66B</td><td>Dense</td><td>79.82</td><td>68.90</td><td>74.85</td><td>67.21</td><td>40.02</td><td>66.16</td></tr><tr><td>20%</td><td>79.49</td><td>68.19</td><td>73.69</td><td>67.26</td><td>39.25</td><td>65.58</td></tr><tr><td>25%</td><td>79.11</td><td>68.35</td><td>73.30</td><td>67.00</td><td>38.74</td><td>65.30</td></tr><tr><td>30%</td><td>79.05</td><td>68.75</td><td>72.62</td><td>66.29</td><td>38.31</td><td>65.00</td></tr><tr><td rowspan="4">LLAMA-2 7B</td><td>Dense</td><td>79.11</td><td>69.06</td><td>75.99</td><td>74.58</td><td>46.25</td><td>69.00</td></tr><tr><td>20%</td><td>76.50</td><td>65.51</td><td>65.20</td><td>69.99</td><td>41.21</td><td>63.68</td></tr><tr><td>25%</td><td>74.21</td><td>64.01</td><td>60.55</td><td>66.88</td><td>38.91</td><td>60.91</td></tr><tr><td>30%</td><td>72.25</td><td>59.83</td><td>55.86</td><td>63.93</td><td>37.80</td><td>57.93</td></tr><tr><td rowspan="4">LLAMA-2 13B</td><td>Dense</td><td>80.47</td><td>72.22</td><td>79.39</td><td>77.48</td><td>49.23</td><td>71.76</td></tr><tr><td>20%</td><td>77.97</td><td>68.90</td><td>69.64</td><td>74.71</td><td>45.99</td><td>67.44</td></tr><tr><td>25%</td><td>76.88</td><td>67.40</td><td>65.85</td><td>72.52</td><td>44.54</td><td>65.44</td></tr><tr><td>30%</td><td>74.10</td><td>65.82</td><td>60.91</td><td>68.43</td><td>42.41</td><td>62.34</td></tr><tr><td rowspan="4">LLAMA-2 70B</td><td>Dense</td><td>82.70</td><td>77.98</td><td>83.84</td><td>80.98</td><td>57.34</td><td>76.57</td></tr><tr><td>20%</td><td>81.99</td><td>76.87</td><td>78.93</td><td>80.26</td><td>54.10</td><td>74.43</td></tr><tr><td>25%</td><td>80.69</td><td>77.98</td><td>76.97</td><td>79.67</td><td>52.65</td><td>73.59</td></tr><tr><td>30%</td><td>79.33</td><td>77.27</td><td>73.11</td><td>77.44</td><td>51.19</td><td>71.67</td></tr><tr><td rowspan="4">Phi-2</td><td>Dense</td><td>79.11</td><td>75.77</td><td>73.83</td><td>78.32</td><td>54.18</td><td>72.24</td></tr><tr><td>20%</td><td>76.17</td><td>68.75</td><td>61.95</td><td>72.18</td><td>45.48</td><td>64.90</td></tr><tr><td>25%</td><td>75.68</td><td>64.88</td><td>58.19</td><td>70.41</td><td>43.43</td><td>62.52</td></tr><tr><td>30%</td><td>74.05</td><td>62.12</td><td>53.31</td><td>67.26</td><td>39.42</td><td>63.47</td></tr></table>
399
+
400
+ Table 9: Downstream zero-shot task performance of LLAMA-2 and Phi-2 models when slicing and recovery fine-tuning using the WikiText2 dataset.
401
+
402
+ <table><tr><td>Model</td><td>Slicing</td><td>PIQA</td><td>WinoGrande</td><td>HellaSwag</td><td>ARC-e</td><td>ARC-c</td><td>Avg. Score</td></tr><tr><td rowspan="4">LLAMA-2 7B</td><td>Dense</td><td>79.11</td><td>69.06</td><td>75.99</td><td>74.58</td><td>46.25</td><td>69.00</td></tr><tr><td>20%</td><td>69.86</td><td>64.72</td><td>61.07</td><td>54.25</td><td>36.43</td><td>57.27</td></tr><tr><td>25%</td><td>69.26</td><td>64.96</td><td>58.65</td><td>52.36</td><td>35.75</td><td>56.20</td></tr><tr><td>30%</td><td>67.41</td><td>63.22</td><td>55.65</td><td>50.76</td><td>34.13</td><td>54.23</td></tr><tr><td rowspan="4">LLAMA-2 13B</td><td>Dense</td><td>80.47</td><td>72.22</td><td>79.39</td><td>77.48</td><td>49.23</td><td>71.76</td></tr><tr><td>20%</td><td>74.10</td><td>68.51</td><td>66.94</td><td>70.54</td><td>43.77</td><td>64.77</td></tr><tr><td>25%</td><td>71.27</td><td>68.98</td><td>64.12</td><td>63.76</td><td>40.87</td><td>61.80</td></tr><tr><td>30%</td><td>69.64</td><td>66.85</td><td>59.93</td><td>59.55</td><td>38.65</td><td>58.93</td></tr><tr><td rowspan="4">LLAMA-2 70B</td><td>Dense</td><td>82.70</td><td>77.98</td><td>83.84</td><td>80.98</td><td>57.34</td><td>76.57</td></tr><tr><td>20%</td><td>77.86</td><td>76.16</td><td>72.91</td><td>81.27</td><td>55.89</td><td>72.82</td></tr><tr><td>25%</td><td>76.71</td><td>73.72</td><td>71.41</td><td>79.88</td><td>54.69</td><td>71.28</td></tr><tr><td>30%</td><td>75.14</td><td>73.56</td><td>69.91</td><td>74.79</td><td>51.71</td><td>69.02</td></tr><tr><td rowspan="4">Phi-2</td><td>Dense</td><td>79.11</td><td>75.77</td><td>73.83</td><td>78.32</td><td>54.18</td><td>72.24</td></tr><tr><td>20%</td><td>71.27</td><td>67.17</td><td>54.86</td><td>56.61</td><td>38.91</td><td>57.76</td></tr><tr><td>25%</td><td>69.91</td><td>65.19</td><td>52.48</td><td>52.78</td><td>35.49</td><td>55.17</td></tr><tr><td>30%</td><td>66.16</td><td>63.54</td><td>49.72</td><td>46.38</td><td>32.68</td><td>51.70</td></tr></table>
403
+
404
+ Table 10: Downstream zero-shot task performance of LLAMA-2 and Phi-2 models when slicing and recovery fine-tuning using the Alpaca dataset.
405
+
406
+ <table><tr><td>Model</td><td>Slicing</td><td>PIQA</td><td>WinoGrande</td><td>HellaSwag</td><td>ARC-e</td><td>ARC-c</td><td>Avg. Score</td></tr><tr><td rowspan="4">LLAMA-2 7B</td><td>Dense</td><td>79.11</td><td>69.06</td><td>75.99</td><td>74.58</td><td>46.25</td><td>69.00</td></tr><tr><td>20%</td><td>76.55</td><td>65.59</td><td>68.26</td><td>71.84</td><td>45.05</td><td>65.46</td></tr><tr><td>25%</td><td>75.79</td><td>63.22</td><td>65.12</td><td>68.22</td><td>42.83</td><td>63.04</td></tr><tr><td>30%</td><td>74.59</td><td>61.64</td><td>63.06</td><td>66.54</td><td>40.87</td><td>61.34</td></tr><tr><td rowspan="4">LLAMA-2 13B</td><td>Dense</td><td>80.47</td><td>72.22</td><td>79.39</td><td>77.48</td><td>49.23</td><td>71.76</td></tr><tr><td>20%</td><td>79.27</td><td>68.27</td><td>73.21</td><td>74.37</td><td>49.83</td><td>68.99</td></tr><tr><td>25%</td><td>78.84</td><td>67.64</td><td>71.21</td><td>73.57</td><td>49.66</td><td>68.18</td></tr><tr><td>30%</td><td>76.11</td><td>68.03</td><td>68.58</td><td>71.42</td><td>47.10</td><td>66.35</td></tr><tr><td rowspan="4">LLAMA-2 70B</td><td>Dense</td><td>82.70</td><td>77.98</td><td>83.84</td><td>80.98</td><td>57.34</td><td>76.57</td></tr><tr><td>20%</td><td>81.94</td><td>77.74</td><td>79.39</td><td>81.57</td><td>58.45</td><td>75.82</td></tr><tr><td>25%</td><td>81.88</td><td>77.11</td><td>79.04</td><td>81.36</td><td>58.70</td><td>75.62</td></tr><tr><td>30%</td><td>80.30</td><td>75.85</td><td>77.13</td><td>80.05</td><td>58.19</td><td>74.30</td></tr><tr><td rowspan="4">Phi-2</td><td>Dense</td><td>79.11</td><td>75.77</td><td>73.83</td><td>78.32</td><td>54.18</td><td>72.24</td></tr><tr><td>20%</td><td>77.42</td><td>72.14</td><td>65.33</td><td>74.20</td><td>49.91</td><td>67.80</td></tr><tr><td>25%</td><td>76.17</td><td>68.75</td><td>63.39</td><td>70.45</td><td>47.44</td><td>65.24</td></tr><tr><td>30%</td><td>75.24</td><td>65.59</td><td>60.10</td><td>70.16</td><td>46.25</td><td>63.47</td></tr></table>
407
+
408
+ # A.6 BENCHMARKING THROUGHPUT EXPERIMENT
409
+
410
+ Table 11: Benchmarking throughput for OPT and LLAMA-2 models on 80GB H100 GPUs. We set the sequence length to 128 and find the maximum throughput by doubling the batch size until the GPUs run out of memory or the throughput drops off.
411
+
412
+ <table><tr><td>Model</td><td>Slicing</td><td>GPUs</td><td>Batchsize</td><td>Tokens/s</td></tr><tr><td rowspan="3">OPT 13B</td><td>Dense</td><td>1</td><td>512</td><td>2518</td></tr><tr><td>25%</td><td>1</td><td>512</td><td>2846 (1.13×)</td></tr><tr><td>50%</td><td>1</td><td>512</td><td>3071 (1.22×)</td></tr><tr><td rowspan="3">OPT 66B</td><td>Dense</td><td>2</td><td>16</td><td>141</td></tr><tr><td>25%</td><td>2</td><td>16</td><td>152 (1.08×)</td></tr><tr><td>50%</td><td>1</td><td>32</td><td>441 (6.26×)</td></tr><tr><td rowspan="3">LLAMA-2 13B</td><td>Dense</td><td>1</td><td>512</td><td>2707</td></tr><tr><td>25%</td><td>1</td><td>512</td><td>2878 (1.06×)</td></tr><tr><td>50%</td><td>1</td><td>512</td><td>3122 (1.15×)</td></tr><tr><td rowspan="3">LLAMA-2 70B</td><td>Dense</td><td>2</td><td>128</td><td>541</td></tr><tr><td>25%</td><td>2</td><td>256</td><td>839 (1.55×)</td></tr><tr><td>50%</td><td>1</td><td>128</td><td>1014 (3.75×)</td></tr></table>
413
+
414
+ # A.7 BENCHMARKING INFERENCE TIME OF SLICEGPT AGAINST SPARSEGPT
415
+
416
+ We use the CuSparseLT 0.5 library to run sparse matrix multiplications on an 80 GB A100 GPU, replicating the size of the matrix-matrix multiplications in three different-sized LLAMA-2 models. We used PyTorch to run similar matrix multiplications for the dense equivalent, and for SliceGPT (which is also straightforward dense matmul, but smaller). We chose a sequence length of 2048, and took the matrix sizes from the HuggingFace config files. We took the median runtime over $10^{3}$ attempts.
417
+
418
+ Each LLAMA-2 layer requires a gated FFN with one up projection, one down projection, and a gated projection. In attention, the architecture of the model means that the query matrix multiplication is a different size to the key and value matrix multiplications. The following table shows the time taken in ms to run each matrix multiplication in the model, plus a "total" time and a relative speedup.
419
+
420
+ Table 12: Results of timing the matrix multiplications involved in each layer of LLAMA-2 models. For larger models, SliceGPT (25%) gives the same speedup as SparseGPT 2:4 but with better WikiText-2 perplexity. For smaller models SparseGPT 2:4 provides better speedup albeit at worse perplexity. Slicing at $50\%$ trades off perplexity for even greater speedups.
421
+
422
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Method</td><td rowspan="2">PPL</td><td colspan="5">Operation (ms)</td><td rowspan="2">Total in ms (speedup)</td></tr><tr><td>Down Proj</td><td>Up/Gate Proj</td><td>K,V</td><td>Q</td><td>Out</td></tr><tr><td rowspan="4">LLAMA-2 7B</td><td>Dense</td><td>5.47</td><td>0.89</td><td>0.87</td><td>0.34</td><td>0.34</td><td>0.34</td><td>3.99</td></tr><tr><td>SparseGPT 2:4</td><td>8.69</td><td>0.56</td><td>0.61</td><td>0.23</td><td>0.23</td><td>0.23</td><td>2.70 (1.48×)</td></tr><tr><td>SliceGPT (25%)</td><td>7.24</td><td>0.67</td><td>0.64</td><td>0.26</td><td>0.25</td><td>0.27</td><td>2.99 (1.33×)</td></tr><tr><td>SliceGPT (50%)</td><td>17.17</td><td>0.46</td><td>0.44</td><td>0.18</td><td>0.18</td><td>0.18</td><td>2.06 (1.94×)</td></tr><tr><td rowspan="4">LLAMA-2 13B</td><td>Dense</td><td>4.88</td><td>1.29</td><td>1.28</td><td>0.52</td><td>0.52</td><td>0.52</td><td>5.93</td></tr><tr><td>SparseGPT 2:4</td><td>7.07</td><td>0.81</td><td>0.95</td><td>0.31</td><td>0.31</td><td>0.31</td><td>3.95 (1.50×)</td></tr><tr><td>SliceGPT (25%)</td><td>6.30</td><td>1.03</td><td>0.98</td><td>0.39</td><td>0.39</td><td>0.41</td><td>4.57 (1.30×)</td></tr><tr><td>SliceGPT (50%)</td><td>13.71</td><td>0.68</td><td>0.67</td><td>0.26</td><td>0.27</td><td>0.30</td><td>3.11 (1.91×)</td></tr><tr><td rowspan="4">LLAMA-2 70B</td><td>Dense</td><td>3.32</td><td>4.63</td><td>4.27</td><td>0.21</td><td>1.27</td><td>1.27</td><td>16.13</td></tr><tr><td>SparseGPT 2:4</td><td>4.98</td><td>2.87</td><td>3.69</td><td>0.14</td><td>0.84</td><td>0.83</td><td>12.20 (1.32×)</td></tr><tr><td>SliceGPT (25%)</td><td>4.60</td><td>3.4</td><td>3.26</td><td>0.16</td><td>0.96</td><td>1.00</td><td>12.20 (1.32×)</td></tr><tr><td>SliceGPT (50%)</td><td>8.86</td><td>2.28</td><td>2.34</td><td>0.15</td><td>0.69</td><td>0.68</td><td>8.63 (1.87×)</td></tr></table>
423
+
424
+ We also benchmarked the OPT architecture in the same way. In this case, the matrix multiplications associated with Key, Value, Query and Out are all the same size, and there are just two matrix multiplications in the MLP section (FC1 and FC2).
425
+
426
+ Table 13: Results of timing the matrix multiplications involved in each layer of OPT models. For larger models, SliceGPT (25%) gives slightly better speedup than SparseGPT 2:4, and with better WikiText-2 perplexity. For smaller models SparseGPT 2:4 provides better speedup albeit at worse perplexity. Slicing at $50\%$ trades off perplexity for even greater speedups.
427
+
428
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Method</td><td rowspan="2">PPL</td><td colspan="3">Operation (ms)</td><td rowspan="2">Total in ms (speedup)</td></tr><tr><td>FC2</td><td>FC1</td><td>K,V,Q,Out</td></tr><tr><td rowspan="4">OPT 13B</td><td>Dense</td><td>10.12</td><td>1.89</td><td>1.89</td><td>0.52</td><td>7.75</td></tr><tr><td>SparseGPT 2:4</td><td>11.80</td><td>1.18</td><td>1.50</td><td>0.31</td><td>5.42 (1.43×)</td></tr><tr><td>SliceGPT (25%)</td><td>10.94</td><td>1.50</td><td>1.45</td><td>0.38</td><td>5.92 (1.31×)</td></tr><tr><td>SliceGPT (50%)</td><td>15.39</td><td>0.96</td><td>0.99</td><td>0.26</td><td>3.98 (1.95×)</td></tr><tr><td rowspan="4">OPT 30B</td><td>Dense</td><td>9.56</td><td>10.29</td><td>1.28</td><td>0.52</td><td>5.93</td></tr><tr><td>SparseGPT 2:4</td><td>10.53</td><td>0.81</td><td>0.95</td><td>0.31</td><td>3.95 (1.50×)</td></tr><tr><td>SliceGPT (25%)</td><td>10.04</td><td>1.03</td><td>0.98</td><td>0.39</td><td>4.55 (1.30×)</td></tr><tr><td>SliceGPT (50%)</td><td>12.47</td><td>0.68</td><td>0.67</td><td>0.26</td><td>3.06 (1.94×)</td></tr><tr><td rowspan="4">OPT 66B</td><td>Dense</td><td>9.33</td><td>4.63</td><td>4.27</td><td>0.21</td><td>14.01</td></tr><tr><td>SparseGPT 2:4</td><td>10.22</td><td>2.87</td><td>3.69</td><td>0.14</td><td>10.81 (1.30×)</td></tr><tr><td>SliceGPT (25%)</td><td>9.68</td><td>3.40</td><td>3.26</td><td>0.16</td><td>10.56 (1.33×)</td></tr><tr><td>SliceGPT (50%)</td><td>11.39</td><td>2.28</td><td>2.34</td><td>0.15</td><td>7.56 (1.85×)</td></tr></table>
2401.15xxx/2401.15024/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b757fef1b25e0f6248f82406877321acffc110b845aad21ff4e7b84a0da1c9e
3
+ size 1417027
2401.15xxx/2401.15024/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_content_list.json ADDED
@@ -0,0 +1,2035 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Yuhui Li\\* Fangyun Wei Chao Zhang\\* Hongyang Zhang\\*",
5
+ "bbox": [
6
+ 248,
7
+ 203,
8
+ 720,
9
+ 219
10
+ ],
11
+ "page_idx": 0
12
+ },
13
+ {
14
+ "type": "text",
15
+ "text": "$\\spadesuit$ Peking University",
16
+ "bbox": [
17
+ 196,
18
+ 219,
19
+ 326,
20
+ 234
21
+ ],
22
+ "page_idx": 0
23
+ },
24
+ {
25
+ "type": "text",
26
+ "text": "Microsoft Research",
27
+ "bbox": [
28
+ 341,
29
+ 219,
30
+ 477,
31
+ 234
32
+ ],
33
+ "page_idx": 0
34
+ },
35
+ {
36
+ "type": "text",
37
+ "text": "\\*University of Waterloo",
38
+ "bbox": [
39
+ 490,
40
+ 219,
41
+ 653,
42
+ 234
43
+ ],
44
+ "page_idx": 0
45
+ },
46
+ {
47
+ "type": "text",
48
+ "text": "†Vector Institute",
49
+ "bbox": [
50
+ 666,
51
+ 219,
52
+ 776,
53
+ 234
54
+ ],
55
+ "page_idx": 0
56
+ },
57
+ {
58
+ "type": "text",
59
+ "text": "hongyang.zhang@uwaterloo.ca",
60
+ "bbox": [
61
+ 352,
62
+ 234,
63
+ 617,
64
+ 250
65
+ ],
66
+ "page_idx": 0
67
+ },
68
+ {
69
+ "type": "text",
70
+ "text": "https://github.com/SafeAILab/EAGLE",
71
+ "bbox": [
72
+ 318,
73
+ 250,
74
+ 653,
75
+ 263
76
+ ],
77
+ "page_idx": 0
78
+ },
79
+ {
80
+ "type": "image",
81
+ "img_path": "images/2285cf218f82178f4e7f0f1a35951cf30a2fc9c26d7c57714479f745bfcd273f.jpg",
82
+ "image_caption": [
83
+ "Figure 1: Speedup ratio of Vicuna and LLaMA2-Chat inference latency on the MT-bench for greedy (temperature=0) settings. Speedup ratio of Medusa and Lookahead are copied from their original technical reports. With speculative sampling, there is a lack of suitable draft models to accelerate the 7B model. Employing a 7B model as the draft model for a 13B model results in slow speeds due to the high overhead of the 7B model, rendering it less efficient than vanilla autoregressive decoding. These scenarios are marked as N/A. In this paper, we only compare with speculative sampling based methods that do not need to finetune the backbone models, ensuring the output text distribution remains constant."
84
+ ],
85
+ "image_footnote": [],
86
+ "bbox": [
87
+ 94,
88
+ 287,
89
+ 875,
90
+ 488
91
+ ],
92
+ "page_idx": 0
93
+ },
94
+ {
95
+ "type": "text",
96
+ "text": "Abstract",
97
+ "text_level": 1,
98
+ "bbox": [
99
+ 241,
100
+ 623,
101
+ 318,
102
+ 638
103
+ ],
104
+ "page_idx": 0
105
+ },
106
+ {
107
+ "type": "text",
108
+ "text": "Autoregressive decoding makes the inference of Large Language Models (LLMs) time-consuming. In this paper, we reconsider speculative sampling and derive two key observations. Firstly, autoregression at the feature (second-to-top-layer) level is more straightforward than at the token level. Secondly, the inherent uncertainty in feature (second-to-top-layer) level autoregression constrains its performance. Based on these insights, we introduce EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency), a simple yet highly efficient speculative sampling framework. By incorporating a token sequence advanced by one time step, EAGLE effectively resolves the uncertainty, enabling precise second-to-top-layer feature prediction with minimal overhead. We conducted comprehensive evaluations",
109
+ "bbox": [
110
+ 116,
111
+ 648,
112
+ 444,
113
+ 905
114
+ ],
115
+ "page_idx": 0
116
+ },
117
+ {
118
+ "type": "text",
119
+ "text": "of EAGLE, including all models from the Vicuna and LLaMA2-Chat series, the MoE model Mixtral 8x7B Instruct, and tasks in dialogue, code generation, mathematical reasoning, and instruction following. For LLaMA2-Chat 70B, EAGLE achieved a latency speedup ratio of $2.7\\mathrm{x} - 3.5\\mathrm{x}$ , doubled throughput, while maintaining the distribution of the generated text.",
120
+ "bbox": [
121
+ 527,
122
+ 625,
123
+ 856,
124
+ 746
125
+ ],
126
+ "page_idx": 0
127
+ },
128
+ {
129
+ "type": "text",
130
+ "text": "1. Introduction",
131
+ "text_level": 1,
132
+ "bbox": [
133
+ 498,
134
+ 773,
135
+ 629,
136
+ 789
137
+ ],
138
+ "page_idx": 0
139
+ },
140
+ {
141
+ "type": "text",
142
+ "text": "Autoregressive decoding, the de facto standard for large language models (LLMs), generates tokens sequentially, leading to slow and costly generation. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) based methods address this by dividing the process into a low-cost draft stage and a parallelized verification stage over the drafted tokens, allowing for multiple tokens to be validated in a",
143
+ "bbox": [
144
+ 495,
145
+ 799,
146
+ 885,
147
+ 905
148
+ ],
149
+ "page_idx": 0
150
+ },
151
+ {
152
+ "type": "aside_text",
153
+ "text": "arXiv:2401.15077v3 [cs.LG] 4 Mar 2025",
154
+ "bbox": [
155
+ 22,
156
+ 267,
157
+ 57,
158
+ 700
159
+ ],
160
+ "page_idx": 0
161
+ },
162
+ {
163
+ "type": "header",
164
+ "text": "#",
165
+ "bbox": [
166
+ 91,
167
+ 111,
168
+ 143,
169
+ 154
170
+ ],
171
+ "page_idx": 0
172
+ },
173
+ {
174
+ "type": "header",
175
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
176
+ "bbox": [
177
+ 145,
178
+ 136,
179
+ 880,
180
+ 159
181
+ ],
182
+ "page_idx": 0
183
+ },
184
+ {
185
+ "type": "page_number",
186
+ "text": "1",
187
+ "bbox": [
188
+ 480,
189
+ 922,
190
+ 491,
191
+ 934
192
+ ],
193
+ "page_idx": 0
194
+ },
195
+ {
196
+ "type": "text",
197
+ "text": "single LLM pass. These approaches accelerate generation by producing multiple tokens per pass. More importantly, the verification stage ensures that the text distribution aligns precisely with the decoding results of the original LLM, maintaining the integrity of the generated content.",
198
+ "bbox": [
199
+ 84,
200
+ 85,
201
+ 478,
202
+ 161
203
+ ],
204
+ "page_idx": 1
205
+ },
206
+ {
207
+ "type": "text",
208
+ "text": "Applying speculative sampling hinges on finding a draft model that mirrors the original LLM's functionality but with reduced latency, often involving a lower-parameter version from the same LLM series. For instance, in the LLaMA2 (Touvron et al., 2023) series which includes models with 7B, 13B, and 70B parameters, using the 7B model as a draft model of the 70B model is valid, while finding a suitable draft model for the smallest 7B variant is tricky. An alternative could be to use TinyLLaMA (Zhang et al., 2024), but it is not feasible for instruct-tuned models due to the inconsistency in instruction templates between LLaMA2-Chat and TinyLLaMA-Chat. Despite the 7B model's potential as a draft model, its high overhead diminishes acceleration gains. Training a new, appropriately sized draft model specifically for speculative sampling is not an ideal solution either due to the high cost: TinyLLaMA is trained on 3,000B tokens, whereas EAGLE is trained on 2-4B tokens.",
209
+ "bbox": [
210
+ 86,
211
+ 167,
212
+ 478,
213
+ 425
214
+ ],
215
+ "page_idx": 1
216
+ },
217
+ {
218
+ "type": "text",
219
+ "text": "The key to enhancing acceleration in speculative sampling lies in reducing the time overhead and improving the acceptance rate of the draft by the original LLM (Chen et al., 2023b; Xia et al., 2023; Santilli et al., 2023). Numerous approaches focus on reducing the overhead of the drafting phase. Lookahead (Fu et al., 2023) employs n-gram and Jacobi iteration, while Medusa (Cai et al., 2023) utilizes a set of MLPs that predict tokens based on the second-to-top-layer feature of the original LLM. These strategies significantly decrease the latency in generating drafts, leading to improved acceleration. However, their effectiveness is limited by the lower accuracy of the resulting drafts, with Medusa achieving an accuracy of about 0.6, and Lookahead even lower. In contrast, our method attains an accuracy of approximately 0.8.",
220
+ "bbox": [
221
+ 84,
222
+ 431,
223
+ 477,
224
+ 659
225
+ ],
226
+ "page_idx": 1
227
+ },
228
+ {
229
+ "type": "text",
230
+ "text": "To overcome these limitations, we introduce EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency), an efficient speculative sampling method, grounded in the following two observations.",
231
+ "bbox": [
232
+ 84,
233
+ 665,
234
+ 478,
235
+ 727
236
+ ],
237
+ "page_idx": 1
238
+ },
239
+ {
240
+ "type": "text",
241
+ "text": "Firstly, autoregression at the feature level is simpler than at the token level. In this paper, \"features\" refer to the second-to-top-layer features of the original LLM, located before the LM head. Compared to token sequences, which are simple transformations of natural language, feature sequences exhibit more regularity. Autoregressively processing at the feature level and then deriving tokens using the LM head of the original LLM yields better results than directly autoregressively predicting tokens. As illustrated in Figure 4, autoregressively predicting features yields better performance, demonstrated by a higher speedup ratio of",
242
+ "bbox": [
243
+ 84,
244
+ 734,
245
+ 478,
246
+ 902
247
+ ],
248
+ "page_idx": 1
249
+ },
250
+ {
251
+ "type": "image",
252
+ "img_path": "images/9df3a3611bf82d020d22b95debfda596513ae7ea2ad6a69df96aa90f7d7a10cb.jpg",
253
+ "image_caption": [
254
+ "Figure 2: Speedup ratio on the MT-bench for non-greedy (temperature=1) settings. Lookahead is confined to greedy decoding, and the non-greedy generation of Medusa does not guarantee lossless performance. Therefore, EAGLE is not compared with these methods."
255
+ ],
256
+ "image_footnote": [],
257
+ "bbox": [
258
+ 501,
259
+ 84,
260
+ 880,
261
+ 202
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "image",
267
+ "img_path": "images/b719a6df771353b4c66c7b5a952adbf6f958c04e830bedf1bd90c8286d97d2df.jpg",
268
+ "image_caption": [
269
+ "Figure 3: Uncertainty in feature sequences. The next feature following $f_{I}$ is contingent on the sampling outcome and cannot be determined solely based on $f_{I}$ , where both \"always\" and \"am\" are possible to follow the token \"I\" and lead to two branches."
270
+ ],
271
+ "image_footnote": [],
272
+ "bbox": [
273
+ 513,
274
+ 318,
275
+ 875,
276
+ 401
277
+ ],
278
+ "page_idx": 1
279
+ },
280
+ {
281
+ "type": "text",
282
+ "text": "1.9x compared to 1.5x.",
283
+ "text_level": 1,
284
+ "bbox": [
285
+ 496,
286
+ 513,
287
+ 653,
288
+ 527
289
+ ],
290
+ "page_idx": 1
291
+ },
292
+ {
293
+ "type": "text",
294
+ "text": "Secondly, the uncertainty inherent in the sampling process significantly constrains the performance of predicting the next feature. In text generation, the target LLM predicts the distribution of tokens and samples accordingly, introducing randomness. Features, being high-dimensional and continuous, cannot be treated similarly. As depicted in Figure 3, sampling different tokens like \"am\" or \"always\" leads to distinct feature sequences, introducing ambiguity into the feature-level autoregression. Medusa faces a similar issue in predicting spaced tokens, where it is uncertain whether the true target for the input $f_{I}$ should be $p_{am}$ or $p_{always}$ . To address this issue, EAGLE inputs the token sequence from one time step ahead, which includes the sampling outcomes, into the draft model. In the example illustrated in Figure 3, this involves predicting $f_{always}$ based on $f_{I}$ and $t_{always}$ , and predicting $f_{am}$ based on $f_{I}$ and $t_{am}$ . As illustrated in Figure 4, by addressing the uncertainty, the speedup ratio further increases from $1.9\\mathrm{x}$ to $2.8\\mathrm{x}$ .",
295
+ "bbox": [
296
+ 495,
297
+ 536,
298
+ 888,
299
+ 808
300
+ ],
301
+ "page_idx": 1
302
+ },
303
+ {
304
+ "type": "text",
305
+ "text": "We conducted experiments across dialogue, code generation, mathematical reasoning, and instruction following tasks using the MT-bench, HumanEval, GSM8K, and Alpaca datasets, respectively. Tested LLMs included all models from the Vicuna and LLaMA2-Chat series, along with Mixtral 8x7B Instruct. For LLaMA2-Chat 70B, EAGLE",
306
+ "bbox": [
307
+ 495,
308
+ 814,
309
+ 888,
310
+ 905
311
+ ],
312
+ "page_idx": 1
313
+ },
314
+ {
315
+ "type": "header",
316
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
317
+ "bbox": [
318
+ 254,
319
+ 56,
320
+ 718,
321
+ 71
322
+ ],
323
+ "page_idx": 1
324
+ },
325
+ {
326
+ "type": "page_number",
327
+ "text": "2",
328
+ "bbox": [
329
+ 480,
330
+ 922,
331
+ 491,
332
+ 934
333
+ ],
334
+ "page_idx": 1
335
+ },
336
+ {
337
+ "type": "image",
338
+ "img_path": "images/0e53d0242434585b4aaf01d556bab456d69ea97a2096cbc9a1ebc45a4e44dc3e.jpg",
339
+ "image_caption": [
340
+ "Figure 4: Accuracy and speedup ratio of draft models based on tokens, features and feature&shifted-token at temperature=0, tested on MT-bench with Vicuna 7B as the original LLM. Feature&shifted-token refers to using a feature sequence and a token sequence advanced by one time step as inputs."
341
+ ],
342
+ "image_footnote": [],
343
+ "bbox": [
344
+ 96,
345
+ 80,
346
+ 472,
347
+ 176
348
+ ],
349
+ "page_idx": 2
350
+ },
351
+ {
352
+ "type": "text",
353
+ "text": "achieved a speedup ratio of $2.7\\mathrm{x} - 3.5\\mathrm{x}$ , doubled throughput, and theoretically guaranteed the preservation of the generated text's distribution. Figure 1 and 2 illustrates the performance of EAGLE on the MT-bench (Zheng et al., 2023), a highly realistic benchmark simulating actual applications and real-world scenarios, including multi-turn instructions akin to dialogues with ChatGPT. We have chosen to utilize this benchmark as it has been employed by the current state-of-the-art, including Lookahead and Medusa, to demonstrate their speedup ratios. This choice facilitates a fair and direct comparison between our approach and these methods. Compared to the recently proposed speculative sampling-based frameworks, Lookahead and Medusa, EAGLE achieves $1.7\\mathrm{x} - 2.1\\mathrm{x}$ and $1.5\\mathrm{x} - 1.6\\mathrm{x}$ speedups, respectively. EAGLE operates in parallel with other acceleration or throughput-improving methods, such as quantization, compilation, etc. Combining EAGLE with these techniques could further reduce the operational costs of LLM systems. For example, with gpt-fast (PyTorch Labs, 2023), EAGLE accelerates LLaMA2-Chat 7B decoding to 160.4 tokens/s on a single RTX 3090 GPU.",
354
+ "bbox": [
355
+ 86,
356
+ 311,
357
+ 475,
358
+ 630
359
+ ],
360
+ "page_idx": 2
361
+ },
362
+ {
363
+ "type": "text",
364
+ "text": "EAGLE boasts low training costs. For the LLaMA2-Chat 70B model, EAGLE trains a decoder layer with fewer than 1B parameters using no more than 70k dialogues from the ShareGPT dataset. The training is completed in 1-2 days on 4x A100 (40G) GPUs. The training of EAGLE on 7B, 13B and 33B models can even be conducted on a RTX 3090 node in 1-2 days. In practical applications, EAGLE requires only a single training session to provide acceleration for each query. As the number of queries increases, the amortized training cost of EAGLE becomes negligible.",
365
+ "bbox": [
366
+ 84,
367
+ 637,
368
+ 473,
369
+ 789
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "Beyond performance, EAGLE offers additional advantages:",
376
+ "bbox": [
377
+ 86,
378
+ 796,
379
+ 475,
380
+ 811
381
+ ],
382
+ "page_idx": 2
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "- Generality: EAGLE is applicable to any autoregressive LLMs (at least in principle). We have applied EAGLE to LLaMA2-Chat (7B, 13B, 70B), Vicuna (7B, 13B, 33B) and Mixtral 8x7B Instruct in a zero-shot way on the MT-bench, GSM8K, HumanEval and",
387
+ "bbox": [
388
+ 102,
389
+ 829,
390
+ 475,
391
+ 905
392
+ ],
393
+ "page_idx": 2
394
+ },
395
+ {
396
+ "type": "text",
397
+ "text": "alpaca datasets. EAGLE adheres to the commonly used zero-shot/few-shot settings within the LLM community. All experiments employ the same weights, trained exclusively on the ShareGPT dataset, without any additional training on the evaluation datasets. The method adds only a lightweight plug-in (a single transformer decoder layer) to the LLM, which can be easily deployed in a production environment.",
398
+ "bbox": [
399
+ 527,
400
+ 84,
401
+ 887,
402
+ 205
403
+ ],
404
+ "page_idx": 2
405
+ },
406
+ {
407
+ "type": "text",
408
+ "text": "- Reliability: EAGLE does not involve any fine-tuning of the original LLM, and the preservation of the output distribution by EAGLE is theoretically guaranteed for both the greedy and non-greedy settings. This is in sharp contrast to Lookahead and Medusa which either focus solely on greedy settings or do not guarantee the preservation of distribution in these settings.",
409
+ "bbox": [
410
+ 516,
411
+ 215,
412
+ 885,
413
+ 323
414
+ ],
415
+ "page_idx": 2
416
+ },
417
+ {
418
+ "type": "text",
419
+ "text": "2. Preliminaries",
420
+ "text_level": 1,
421
+ "bbox": [
422
+ 496,
423
+ 342,
424
+ 635,
425
+ 357
426
+ ],
427
+ "page_idx": 2
428
+ },
429
+ {
430
+ "type": "text",
431
+ "text": "Notations. In this paper, \"target LLM\" denotes the LLM intended for acceleration, while \"draft model\" refers to the model used for draft generation. \"Feature\" generally signifies the second-to-top-layer feature of a LLM, the hidden state before the LM head. Tokens are denoted by lowercase $t$ , their embeddings by $e$ , features by $f$ , and distributions by $p$ . Sequences are represented in uppercase, for example, $T_{i:j}$ for $(t_i, t_{i+1}, \\dots, t_j)$ . In a LLM, input $T_{1:j}$ is transformed into embeddings $E_{1:j}$ through the embedding layer, then to features $F_{1:j}$ , and the LM Head maps $f_j$ to a distribution $p_{j+1} = \\text{LMHead}(f_j)$ , sampling the next token $t_{j+1}$ . Vanilla autoregression at the token level is described by $T_{1:j} \\rightarrow E_{1:j} \\rightarrow f_j \\rightarrow p_{j+1} \\rightarrow t_{j+1}$ for any integer $j \\geq 1$ .",
432
+ "bbox": [
433
+ 495,
434
+ 367,
435
+ 887,
436
+ 566
437
+ ],
438
+ "page_idx": 2
439
+ },
440
+ {
441
+ "type": "text",
442
+ "text": "Speculative sampling. Speculative sampling operates through draft and verification phases, with the drafting phase using a smaller model to generate $\\gamma$ tokens $\\hat{T}_{j+1:j+\\gamma}$ and their distributions $\\hat{P}_{j+1:j+\\gamma}$ . In the verification phase, a single forward pass of the target LLM yields the probabilities $P_{j+1:j+\\gamma}$ . Tokens are then sequentially evaluated, with a token $\\hat{t}_{j+i}$ having an acceptance probability $\\min(1, p_{j+i}(\\hat{t}_{j+i}) / \\hat{p}_{j+i}(\\hat{t}_{j+i}))$ . Upon the rejection of a token $\\hat{t}_{j+i}$ , all subsequent tokens are discarded, and this token is resampled based on a distribution norm $(\\max(0, p_{j+i} - \\hat{p}_{j+i}))$ . As proven in Appendix A.1 of speculative sampling (Leviathan et al., 2023), this method equates to sampling directly from the target LLM. EAGLE adopts this method, ensuring that the distribution of the generated text remains unchanged for both the greedy and non-greedy settings.",
443
+ "bbox": [
444
+ 495,
445
+ 571,
446
+ 888,
447
+ 816
448
+ ],
449
+ "page_idx": 2
450
+ },
451
+ {
452
+ "type": "text",
453
+ "text": "3. EAGLE",
454
+ "text_level": 1,
455
+ "bbox": [
456
+ 496,
457
+ 834,
458
+ 591,
459
+ 849
460
+ ],
461
+ "page_idx": 2
462
+ },
463
+ {
464
+ "type": "text",
465
+ "text": "EAGLE, aligning with other speculative sampling-based methods, incorporates both a drafting phase and a verification phase.",
466
+ "bbox": [
467
+ 495,
468
+ 859,
469
+ 887,
470
+ 906
471
+ ],
472
+ "page_idx": 2
473
+ },
474
+ {
475
+ "type": "header",
476
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
477
+ "bbox": [
478
+ 254,
479
+ 56,
480
+ 715,
481
+ 71
482
+ ],
483
+ "page_idx": 2
484
+ },
485
+ {
486
+ "type": "page_number",
487
+ "text": "3",
488
+ "bbox": [
489
+ 480,
490
+ 922,
491
+ 491,
492
+ 934
493
+ ],
494
+ "page_idx": 2
495
+ },
496
+ {
497
+ "type": "image",
498
+ "img_path": "images/0b34fb43aa41af6a614695df0acfb46820aa3e064729be7274ad5d71277a4a9f.jpg",
499
+ "image_caption": [
500
+ "Figure 5: A comparison of the methods for drafting the fourth and fifth tokens, $t_4$ and $t_5$ . $t$ (represented by blue blocks) denotes tokens, and $f$ (orange blocks) signifies the features, with subscripts indicating their positions in the sequence. The red border indicates the predictions of the draft model. For simplicity, the $n$ in the $n$ -gram for Lookahead, as shown in the figure, has been set to 2."
501
+ ],
502
+ "image_footnote": [],
503
+ "bbox": [
504
+ 93,
505
+ 80,
506
+ 467,
507
+ 180
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "text",
513
+ "text": "3.1. Drafting phase",
514
+ "text_level": 1,
515
+ "bbox": [
516
+ 84,
517
+ 345,
518
+ 225,
519
+ 363
520
+ ],
521
+ "page_idx": 3
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "The primary distinction between EAGLE and other methods lies predominantly in the drafting phase. Figure 5 illustrates a schematic of the drafting phase for different methods. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) and Lookahead (Fu et al., 2023) predict tokens based on tokens. Medusa (Cai et al., 2023) independently predicts $t_4$ and $t_5$ using the feature $f_2$ from the target LLM. EAGLE predicts $f_3$ using the feature sequence $(f_1, f_2)$ and the token sequence $(t_2, t_3)$ , advanced by one time step. From $p_4 = \\mathrm{LMHead}(f_3)$ , $t_4$ is sampled. Subsequently, $f_3$ and $t_4$ are concatenated into the input sequence to predict the next feature $f_4$ and sample the subsequent token $t_5$ .",
526
+ "bbox": [
527
+ 84,
528
+ 369,
529
+ 475,
530
+ 551
531
+ ],
532
+ "page_idx": 3
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "As illustrated in Figure 6, EAGLE's draft model comprises three modules: the Embedding layer, LM Head, and Autoregression Head. The Embedding layer and LM Head employ the parameters of the target LLM and do not necessitate additional training. The draft model takes as input a feature sequence of shape (bs, seq_len, hidden_dim) and an advanced token sequence of shape (bs, seq_len). It then converts the token sequence into a token embedding sequence of shape (bs, seq_len, hidden_dim), and concatenates it to form a fused sequence of shape (bs, seq_len, $2 \\times$ hidden_dim). The Autoregression Head consisting of an FC layer and a decoder layer. The FC layer reduces the dimensionality of the fused sequence to (bs, seq_len, hidden_dim) and then we utilize the decoder layer to predict the next feature. The LM Head calculates the distribution based on the feature, from which the next token is sampled. Finally, the predicted feature and the sampled token are concatenated into the input, facilitating the continuation of the autoregressive process. EAGLE creates a tree-structured draft using tree attention, generating a draft tree with depth $m$ and more than $m$ tokens through $m$ forward passes. For instance, as shown in Figure 6, EAGLE drafts a 10-token tree with just 3 forward passes. The actual tree structure employed by EAGLE is",
537
+ "bbox": [
538
+ 84,
539
+ 558,
540
+ 478,
541
+ 906
542
+ ],
543
+ "page_idx": 3
544
+ },
545
+ {
546
+ "type": "image",
547
+ "img_path": "images/63c6033693d1a78865c7c2c61213735c1220f550097691395c93f7c629f01b54.jpg",
548
+ "image_caption": [],
549
+ "image_footnote": [],
550
+ "bbox": [
551
+ 504,
552
+ 82,
553
+ 877,
554
+ 304
555
+ ],
556
+ "page_idx": 3
557
+ },
558
+ {
559
+ "type": "image",
560
+ "img_path": "images/9cec80c8a0c3ffe53184a527cdd87bd77433c206fa261d9bfc6226fc9e781707.jpg",
561
+ "image_caption": [
562
+ "Figure 6: Pipeline of EAGLE. The upper section illustrates the computational process, while the lower section displays the corresponding generation results for each step. In the upper section, green blocks represent token embeddings, orange blocks represent features, red boxes indicate the predictions of the draft model, and blue modules with snowflake icons represent the use of target LLM parameters, which are not subject to training."
563
+ ],
564
+ "image_footnote": [],
565
+ "bbox": [
566
+ 550,
567
+ 309,
568
+ 838,
569
+ 406
570
+ ],
571
+ "page_idx": 3
572
+ },
573
+ {
574
+ "type": "text",
575
+ "text": "detailed in Appendix A.1.",
576
+ "bbox": [
577
+ 496,
578
+ 580,
579
+ 671,
580
+ 598
581
+ ],
582
+ "page_idx": 3
583
+ },
584
+ {
585
+ "type": "text",
586
+ "text": "3.2. Training of the draft models",
587
+ "text_level": 1,
588
+ "bbox": [
589
+ 496,
590
+ 613,
591
+ 728,
592
+ 628
593
+ ],
594
+ "page_idx": 3
595
+ },
596
+ {
597
+ "type": "text",
598
+ "text": "Predicting the next feature constitutes a regression task, for which we employ Smooth L1 loss (see Figure 5 EAGLE):",
599
+ "bbox": [
600
+ 496,
601
+ 637,
602
+ 887,
603
+ 669
604
+ ],
605
+ "page_idx": 3
606
+ },
607
+ {
608
+ "type": "equation",
609
+ "text": "\n$$\nL _ {r e g} = \\text {S m o o t h L 1} \\left(f _ {i + 1}, \\text {D r a f t} \\text {M o d e l} \\left(T _ {2: i + 1}, F _ {1: i}\\right)\\right).\n$$\n",
610
+ "text_format": "latex",
611
+ "bbox": [
612
+ 513,
613
+ 678,
614
+ 870,
615
+ 696
616
+ ],
617
+ "page_idx": 3
618
+ },
619
+ {
620
+ "type": "text",
621
+ "text": "Predicting features is an intermediary objective of the draft model, with the ultimate goal being the prediction of tokens to generate a sequence of tokens. Consequently, we also employ classification loss to directly optimize towards this final objective:",
622
+ "bbox": [
623
+ 496,
624
+ 705,
625
+ 885,
626
+ 782
627
+ ],
628
+ "page_idx": 3
629
+ },
630
+ {
631
+ "type": "equation",
632
+ "text": "\n$$\np _ {i + 2} = \\operatorname {S o f t m a x} (\\operatorname {L M} _ {-} \\operatorname {H e a d} (f _ {i + 1})),\n$$\n",
633
+ "text_format": "latex",
634
+ "bbox": [
635
+ 570,
636
+ 792,
637
+ 808,
638
+ 809
639
+ ],
640
+ "page_idx": 3
641
+ },
642
+ {
643
+ "type": "equation",
644
+ "text": "\n$$\n\\hat {p} _ {i + 2} = \\operatorname {S o f t m a x} (\\operatorname {L M} _ {-} \\operatorname {H e a d} (\\hat {f} _ {i + 1})),\n$$\n",
645
+ "text_format": "latex",
646
+ "bbox": [
647
+ 571,
648
+ 811,
649
+ 808,
650
+ 830
651
+ ],
652
+ "page_idx": 3
653
+ },
654
+ {
655
+ "type": "equation",
656
+ "text": "\n$$\nL _ {c l s} = \\operatorname {C r o s s} \\text {E n t r o p y} \\left(p _ {i + 2}, \\hat {p} _ {i + 2}\\right).\n$$\n",
657
+ "text_format": "latex",
658
+ "bbox": [
659
+ 575,
660
+ 833,
661
+ 807,
662
+ 849
663
+ ],
664
+ "page_idx": 3
665
+ },
666
+ {
667
+ "type": "text",
668
+ "text": "By integrating regression loss and classification loss, we train the Autoregression Head using the combined loss function $L = L_{reg} + w_{cls}L_{cls}$ . Typically, the classification loss",
669
+ "bbox": [
670
+ 496,
671
+ 859,
672
+ 888,
673
+ 906
674
+ ],
675
+ "page_idx": 3
676
+ },
677
+ {
678
+ "type": "header",
679
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
680
+ "bbox": [
681
+ 254,
682
+ 56,
683
+ 718,
684
+ 71
685
+ ],
686
+ "page_idx": 3
687
+ },
688
+ {
689
+ "type": "page_number",
690
+ "text": "4",
691
+ "bbox": [
692
+ 480,
693
+ 922,
694
+ 491,
695
+ 934
696
+ ],
697
+ "page_idx": 3
698
+ },
699
+ {
700
+ "type": "text",
701
+ "text": "is an order of magnitude larger than the regression loss in numerical terms. Consequently, we set $w_{cls}$ to 0.1.",
702
+ "bbox": [
703
+ 84,
704
+ 85,
705
+ 473,
706
+ 116
707
+ ],
708
+ "page_idx": 4
709
+ },
710
+ {
711
+ "type": "text",
712
+ "text": "EAGLE's Autoregression Head is ideally trained with autoregressively generated text from the target LLM, yet this approach is costly. Fortunately, EAGLE exhibits low sensitivity to training data (ablation study in Section 4.3.3). Instead of employing text generated by the target LLM, we utilize a fixed dataset, substantially reducing the overhead. During the drafting phase, EAGLE autoregressively processes features. Inaccuracies in features can lead to error accumulation. To mitigate this issue, we employ data augmentation by adding random noise sampled from a uniform distribution $\\mathcal{U}(-0.1,0.1)$ to features of the target LLM during training (Jain et al., 2023).",
713
+ "bbox": [
714
+ 84,
715
+ 122,
716
+ 475,
717
+ 305
718
+ ],
719
+ "page_idx": 4
720
+ },
721
+ {
722
+ "type": "text",
723
+ "text": "3.3. Verification phase",
724
+ "text_level": 1,
725
+ "bbox": [
726
+ 84,
727
+ 320,
728
+ 243,
729
+ 335
730
+ ],
731
+ "page_idx": 4
732
+ },
733
+ {
734
+ "type": "text",
735
+ "text": "Employing tree attention, the target LLM computes the probability of each token in the tree-structured draft through a single forward pass. At every node of the draft tree, we recursively apply speculative sampling algorithms to sample or adjust the distribution (details in Appendix A.2), consistent with SpecInfer (Miao et al., 2023), ensuring that the distribution of the output text aligns with that of the target LLM. Concurrently, we document accepted tokens and their features for use in the next drafting phase.",
736
+ "bbox": [
737
+ 84,
738
+ 344,
739
+ 475,
740
+ 481
741
+ ],
742
+ "page_idx": 4
743
+ },
744
+ {
745
+ "type": "text",
746
+ "text": "4. Experiments",
747
+ "text_level": 1,
748
+ "bbox": [
749
+ 84,
750
+ 500,
751
+ 217,
752
+ 516
753
+ ],
754
+ "page_idx": 4
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "Models and tasks. We conducted experiments on Vicuna models (7B, 13B, 33B), LLaMA2-chat models (7B, 13B, 70B), and Mixtral 8x7B Instruct, encompassing the common sizes of current mainstream LLMs. We evaluated EAGLE across multiple tasks including multi-turn dialogue, code generation, mathematical reasoning, and instruction following, employing the MT-bench (Zheng et al., 2023), HumanEval (Chen et al., 2021), GSM8K (Cobbe et al., 2021), and Alpaca (Taori et al., 2023) datasets, respectively. Speculative sampling (Leviathan et al., 2023) conducted experiments with a batch size of 1, a setting subsequently adopted by other works such as DistillSpec (Zhou et al., 2023) and BiLD (Kim et al., 2023). Similarly, the majority of our experiments also adopted this setting. Experiments with a batch size greater than 1 are presented in Section 4.4.",
759
+ "bbox": [
760
+ 84,
761
+ 525,
762
+ 475,
763
+ 752
764
+ ],
765
+ "page_idx": 4
766
+ },
767
+ {
768
+ "type": "text",
769
+ "text": "Metrics. Like other speculative sampling-based methods, EAGLE primarily focuses on latency rather than throughput. We assess acceleration effects using the following metrics:",
770
+ "bbox": [
771
+ 84,
772
+ 758,
773
+ 475,
774
+ 806
775
+ ],
776
+ "page_idx": 4
777
+ },
778
+ {
779
+ "type": "list",
780
+ "sub_type": "text",
781
+ "list_items": [
782
+ "- Walltime speedup ratio: The actual test speedup ratio relative to vanilla autoregressive decoding.",
783
+ "- Average acceptance length $\\tau$ : The average number of tokens accepted per forward pass of the target LLM."
784
+ ],
785
+ "bbox": [
786
+ 104,
787
+ 825,
788
+ 473,
789
+ 905
790
+ ],
791
+ "page_idx": 4
792
+ },
793
+ {
794
+ "type": "text",
795
+ "text": "- Acceptance rate $\\alpha$ : The ratio of accepted to generated tokens during drafting, gauges draft accuracy. It's less applicable for tree drafts due to multiple tokens sampled per location with only one accepted. Hence, when measuring this metric, we utilize chain drafts without tree attention, aligning with speculative sampling and DistillSpec. EAGLE's draft model inputs feature and token sequences. Autoregressive feature processing can propagate errors, so we measure the acceptance rate as $n - \\alpha$ , considering $n$ features predicted by the draft model, potentially with inaccuracies.",
796
+ "bbox": [
797
+ 516,
798
+ 84,
799
+ 885,
800
+ 251
801
+ ],
802
+ "page_idx": 4
803
+ },
804
+ {
805
+ "type": "text",
806
+ "text": "Acceleration of EAGLE theoretically guarantees the preservation of the target LLMs' output distribution. Consequently, evaluating the quality of EAGLE's generated results is both unnecessary and meaningless.",
807
+ "bbox": [
808
+ 496,
809
+ 268,
810
+ 887,
811
+ 330
812
+ ],
813
+ "page_idx": 4
814
+ },
815
+ {
816
+ "type": "text",
817
+ "text": "Training. We fixed the target LLMs. EAGLE was trained on the ShareGPT dataset, utilizing 68,000 dialogue iterations with a learning rate set at 3e-5. We employed the AdamW optimizer with beta values $(\\beta_{1},\\beta_{2})$ set to (0.9, 0.95) and implemented gradient clipping of 0.5. The trainable parameters of EAGLE corresponding to the 7B, 13B, 33B, and 70B models are 0.24B, 0.37B, 0.56B, and 0.99B, respectively. The trainable parameters of EAGLE for MoE model Mixtral 8x7B is 0.28B. EAGLE is characterized by its low training cost; the Autoregression Head is trainable within 1-2 days on an A100 40G server for the 70B models.",
818
+ "bbox": [
819
+ 496,
820
+ 335,
821
+ 887,
822
+ 503
823
+ ],
824
+ "page_idx": 4
825
+ },
826
+ {
827
+ "type": "text",
828
+ "text": "4.1. Effectiveness",
829
+ "text_level": 1,
830
+ "bbox": [
831
+ 496,
832
+ 520,
833
+ 622,
834
+ 534
835
+ ],
836
+ "page_idx": 4
837
+ },
838
+ {
839
+ "type": "text",
840
+ "text": "Figures 1 and 2, along with Table 1, display the speedup ratios of EAGLE. EAGLE demonstrates better acceleration at temperature $= 0$ compared to temperature $= 1$ . For instance, for LLaMA2-Chat 13B at temperature $= 0$ , the speedup ratios range from $3.01x$ to $3.76x$ , while at temperature $= 1$ , they range from $2.66x$ to $2.89x$ . In code generation tasks (HumanEval), EAGLE achieves its best acceleration performance. This is attributed to the prevalence of fixed templates in code, making it easier to generate drafts for these templates. Compared to recently introduced speculative sampling-based methods, Lookahead and Medusa, EAGLE is faster by $1.70x - 2.08x$ and $1.47x - 1.60x$ , respectively. Employing speculative sampling in the Vicuna and LLaMA2-Chat series is challenging. For the 7B model, there is no suitable draft model. For other sizes, using the 7B model as the draft model, we iterated through draft lengths from 2 to 10 and reported the highest speedup ratio. For the 13B model, we observed no improvement in speed. For the 33B and 70B models, the speedup ratios were $1.12x$ and $1.88x$ , respectively. For DistillSpec, to ensure fairness, we used the same training data as EAGLE. Additionally, the divergence function employed follows the FKL as detailed in Appendix A.1 of the DistillSpec paper. While distillation slightly improved the speedup ratio, the limited enhancement is",
841
+ "bbox": [
842
+ 495,
843
+ 542,
844
+ 887,
845
+ 906
846
+ ],
847
+ "page_idx": 4
848
+ },
849
+ {
850
+ "type": "header",
851
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
852
+ "bbox": [
853
+ 254,
854
+ 56,
855
+ 715,
856
+ 71
857
+ ],
858
+ "page_idx": 4
859
+ },
860
+ {
861
+ "type": "page_number",
862
+ "text": "5",
863
+ "bbox": [
864
+ 480,
865
+ 922,
866
+ 491,
867
+ 934
868
+ ],
869
+ "page_idx": 4
870
+ },
871
+ {
872
+ "type": "table",
873
+ "img_path": "images/42948b6e71f3b7c9f74366a0cb0d6837abf91b1bf5d17415b0638d6e4c687ec7.jpg",
874
+ "table_caption": [
875
+ "Table 1: Speedup ratio and average acceptance length $\\tau$ on HumanEval, GSM8K, and Alpaca. T denotes temperature, V represents Vicuna, and LC stands for LLaMA2-Chat."
876
+ ],
877
+ "table_footnote": [],
878
+ "table_body": "<table><tr><td colspan=\"2\"></td><td colspan=\"2\">HumanEval</td><td colspan=\"2\">GSM8K</td><td colspan=\"2\">Alpaca</td></tr><tr><td></td><td>Model</td><td>Speedup</td><td>τ</td><td>Speedup</td><td>τ</td><td>Speedup</td><td>τ</td></tr><tr><td rowspan=\"6\">T=0</td><td>V 7B</td><td>3.33x</td><td>4.29</td><td>3.01x</td><td>4.00</td><td>2.79x</td><td>3.86</td></tr><tr><td>V13B</td><td>3.58x</td><td>4.39</td><td>3.08x</td><td>3.97</td><td>3.03x</td><td>3.95</td></tr><tr><td>V 33B</td><td>3.67x</td><td>4.28</td><td>3.25x</td><td>3.94</td><td>2.97x</td><td>3.61</td></tr><tr><td>LC 7B</td><td>3.17x</td><td>4.24</td><td>2.91x</td><td>3.82</td><td>2.78x</td><td>3.71</td></tr><tr><td>LC 13B</td><td>3.76x</td><td>4.52</td><td>3.20x</td><td>4.03</td><td>3.01x</td><td>3.83</td></tr><tr><td>LC 70B</td><td>3.52x</td><td>4.42</td><td>3.03x</td><td>3.93</td><td>2.97x</td><td>3.77</td></tr><tr><td rowspan=\"6\">T=1</td><td>V 7B</td><td>2.39x</td><td>3.43</td><td>2.34x</td><td>3.29</td><td>2.21x</td><td>3.30</td></tr><tr><td>V13B</td><td>2.65x</td><td>3.63</td><td>2.57x</td><td>3.60</td><td>2.45x</td><td>3.57</td></tr><tr><td>V 33B</td><td>2.76x</td><td>3.62</td><td>2.77x</td><td>3.60</td><td>2.52x</td><td>3.32</td></tr><tr><td>LC 7B</td><td>2.61x</td><td>3.79</td><td>2.40x</td><td>3.52</td><td>2.29x</td><td>3.33</td></tr><tr><td>LC 13B</td><td>2.89x</td><td>3.78</td><td>2.82x</td><td>3.67</td><td>2.66x</td><td>3.55</td></tr><tr><td>LC 70B</td><td>2.92x</td><td>3.76</td><td>2.74x</td><td>3.58</td><td>2.65x</td><td>3.47</td></tr></table>",
879
+ "bbox": [
880
+ 89,
881
+ 138,
882
+ 475,
883
+ 327
884
+ ],
885
+ "page_idx": 5
886
+ },
887
+ {
888
+ "type": "table",
889
+ "img_path": "images/7446c716b989669b78a3d415327064b88e0d9f3ebc4707e04c99954530a519a2.jpg",
890
+ "table_caption": [
891
+ "Table 2: Average acceptance length $\\tau$ and acceptance rate $\\alpha$ on MT-bench. T denotes temperature."
892
+ ],
893
+ "table_footnote": [],
894
+ "table_body": "<table><tr><td></td><td>Model</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td rowspan=\"6\">T=0</td><td>Vicuna 7B</td><td>3.94</td><td>0.79</td><td>0.74</td><td>0.72</td><td>0.73</td><td>0.67</td></tr><tr><td>Vicuna 13B</td><td>3.98</td><td>0.79</td><td>0.74</td><td>0.72</td><td>0.74</td><td>0.70</td></tr><tr><td>Vicuna 33B</td><td>3.68</td><td>0.74</td><td>0.69</td><td>0.67</td><td>0.67</td><td>0.66</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.62</td><td>0.76</td><td>0.69</td><td>0.67</td><td>0.68</td><td>0.68</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.90</td><td>0.77</td><td>0.69</td><td>0.69</td><td>0.70</td><td>0.71</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.81</td><td>0.75</td><td>0.69</td><td>0.65</td><td>0.64</td><td>0.64</td></tr><tr><td rowspan=\"6\">T=1</td><td>Vicuna 7B</td><td>3.17</td><td>0.71</td><td>0.68</td><td>0.66</td><td>0.66</td><td>0.65</td></tr><tr><td>Vicuna 13B</td><td>3.20</td><td>0.73</td><td>0.68</td><td>0.68</td><td>0.67</td><td>0.69</td></tr><tr><td>Vicuna 33B</td><td>3.22</td><td>0.71</td><td>0.67</td><td>0.64</td><td>0.64</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.30</td><td>0.71</td><td>0.66</td><td>0.66</td><td>0.66</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.45</td><td>0.73</td><td>0.69</td><td>0.66</td><td>0.67</td><td>0.67</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.46</td><td>0.73</td><td>0.67</td><td>0.64</td><td>0.66</td><td>0.65</td></tr></table>",
895
+ "bbox": [
896
+ 89,
897
+ 385,
898
+ 473,
899
+ 559
900
+ ],
901
+ "page_idx": 5
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "because distillation aims to increase the draft model's acceptance rate, while the bottleneck for speculative sampling performance lies in the high overhead of the draft model.",
906
+ "bbox": [
907
+ 84,
908
+ 588,
909
+ 475,
910
+ 635
911
+ ],
912
+ "page_idx": 5
913
+ },
914
+ {
915
+ "type": "text",
916
+ "text": "Tables 1 and 2 indicate that in EAGLE, the target LLM generates 3.2-4.5 tokens per forward pass, surpassing vanilla decoding which produces only one token per forward pass, thereby significantly increasing generation speed. As shown in Figure 2 and Appendix B, the acceptance rate for completely accurate feature sequences, $0 - \\alpha$ , significantly exceeds that for sequences with a single erroneous feature, $1 - \\alpha$ , indicating the impact of feature errors on draft model performance. Yet, the slight variation between $1 - \\alpha$ to $4 - \\alpha$ underscores EAGLE's robustness to feature errors and its adept handling of error accumulation.",
917
+ "bbox": [
918
+ 84,
919
+ 641,
920
+ 475,
921
+ 808
922
+ ],
923
+ "page_idx": 5
924
+ },
925
+ {
926
+ "type": "text",
927
+ "text": "Table 3 reveals that EAGLE achieved a $1.5\\mathrm{x}$ speedup with the Mixtral 8x7B Instruct model. This modest acceleration, compared to models like LLaMA, is due to a shorter average acceptance length and the complexity of accelerating MoE models via speculative sampling. MoE models typically require reading the weights of only two experts per token",
928
+ "bbox": [
929
+ 84,
930
+ 814,
931
+ 475,
932
+ 906
933
+ ],
934
+ "page_idx": 5
935
+ },
936
+ {
937
+ "type": "text",
938
+ "text": "Table 3: Speedup ratio, average acceptance length $\\tau$ , and acceptance rate $\\alpha$ on MT-bench at temperature=0. The target LLM is Mixtral 8x7B Instruct-v0.1.",
939
+ "bbox": [
940
+ 496,
941
+ 83,
942
+ 885,
943
+ 128
944
+ ],
945
+ "page_idx": 5
946
+ },
947
+ {
948
+ "type": "table",
949
+ "img_path": "images/e9780790ce0c83e7527f193088b3b52d5fc977792efba61173174a2b19f58f3c.jpg",
950
+ "table_caption": [],
951
+ "table_footnote": [],
952
+ "table_body": "<table><tr><td>Speedup</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td>1.50x</td><td>3.25</td><td>0.67</td><td>0.62</td><td>0.61</td><td>0.64</td><td>0.63</td></tr></table>",
953
+ "bbox": [
954
+ 506,
955
+ 138,
956
+ 877,
957
+ 186
958
+ ],
959
+ "page_idx": 5
960
+ },
961
+ {
962
+ "type": "table",
963
+ "img_path": "images/98a53652d6580a138ff001e7e09b0da1be7eb12443cf880a53fc7bc6272eec01.jpg",
964
+ "table_caption": [
965
+ "Table 4: Generation speed of EAGLE combined with gpt-fast, evaluated on MT-bench with LLaMA2-Chat 7B at temperature=0."
966
+ ],
967
+ "table_footnote": [],
968
+ "table_body": "<table><tr><td>Precision</td><td>FP16</td><td>int4</td></tr><tr><td>Vanilla (Huggingface)</td><td>24.5 tokens/s</td><td>N/A</td></tr><tr><td>gpt-fast</td><td>55.1 tokens/s</td><td>106.9 tokens/s</td></tr><tr><td>EAGLE + gpt-fast</td><td>100.2 tokens/s</td><td>160.4 tokens/s</td></tr></table>",
969
+ "bbox": [
970
+ 498,
971
+ 258,
972
+ 893,
973
+ 335
974
+ ],
975
+ "page_idx": 5
976
+ },
977
+ {
978
+ "type": "text",
979
+ "text": "during vanilla autoregressive decoding. However, during the verification phase of speculative sampling, processing multiple tokens may necessitate accessing the weights of more than two experts, contrasting with dense decoder-only models where all weights are read regardless of the number of tokens forwarded.",
980
+ "bbox": [
981
+ 495,
982
+ 363,
983
+ 887,
984
+ 455
985
+ ],
986
+ "page_idx": 5
987
+ },
988
+ {
989
+ "type": "text",
990
+ "text": "4.2. Case study: EAGLE + gpt-fast",
991
+ "text_level": 1,
992
+ "bbox": [
993
+ 496,
994
+ 470,
995
+ 746,
996
+ 486
997
+ ],
998
+ "page_idx": 5
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "text": "EAGLE is compatible with other acceleration technologies. We conducted experiments combining EAGLE with gpt-fast, which employs quantization and compilation for acceleration. As shown in Figure 4, by integrating EAGLE with gpt-fast, we increased the generation speed of LLaMA2-Chat 7B on a single RTX 3090 to 160.4 tokens/s.",
1003
+ "bbox": [
1004
+ 495,
1005
+ 494,
1006
+ 888,
1007
+ 585
1008
+ ],
1009
+ "page_idx": 5
1010
+ },
1011
+ {
1012
+ "type": "text",
1013
+ "text": "4.3. Ablation study",
1014
+ "text_level": 1,
1015
+ "bbox": [
1016
+ 496,
1017
+ 601,
1018
+ 635,
1019
+ 617
1020
+ ],
1021
+ "page_idx": 5
1022
+ },
1023
+ {
1024
+ "type": "text",
1025
+ "text": "4.3.1. TREE ATTENTION",
1026
+ "text_level": 1,
1027
+ "bbox": [
1028
+ 496,
1029
+ 625,
1030
+ 669,
1031
+ 638
1032
+ ],
1033
+ "page_idx": 5
1034
+ },
1035
+ {
1036
+ "type": "text",
1037
+ "text": "EAGLE, similar to SpecInfer and Medusa, employs tree attention, where both the generation and validation of drafts are tree-structured. In contrast, methods like speculative sampling do not use tree attention, resulting in chain-structured draft generation and validation. Table 5 and Figure 7 present comparative results indicating the impact of using tree attention. The implementation of tree draft and verification in EAGLE results in an approximate increase of 0.6-0.8 in the average acceptance length and about 0.3-0.5 in the speedup ratio. Compared to chain draft and verification, tree draft and verification do not increase the number of forward passes in the model (both the target LLM and the draft model), but they do increase the number of tokens processed per forward pass. Consequently, the improvement in the speedup ratio is less pronounced than the increase in average acceptance length. Notably, even without employing tree draft and verification, EAGLE demonstrates a",
1038
+ "bbox": [
1039
+ 495,
1040
+ 648,
1041
+ 887,
1042
+ 906
1043
+ ],
1044
+ "page_idx": 5
1045
+ },
1046
+ {
1047
+ "type": "header",
1048
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1049
+ "bbox": [
1050
+ 254,
1051
+ 56,
1052
+ 715,
1053
+ 71
1054
+ ],
1055
+ "page_idx": 5
1056
+ },
1057
+ {
1058
+ "type": "page_number",
1059
+ "text": "6",
1060
+ "bbox": [
1061
+ 480,
1062
+ 922,
1063
+ 491,
1064
+ 934
1065
+ ],
1066
+ "page_idx": 5
1067
+ },
1068
+ {
1069
+ "type": "image",
1070
+ "img_path": "images/5d4c63f4be47b83425a087cfda781e257b4ff5b5a465f1b61bec9e3d7e5556fe.jpg",
1071
+ "image_caption": [
1072
+ "Figure 7: Speedup ratios of EAGLE with and without the use of tree attention. The evaluation dataset is MT-bench, with the temperature parameter set to 0."
1073
+ ],
1074
+ "image_footnote": [],
1075
+ "bbox": [
1076
+ 86,
1077
+ 83,
1078
+ 475,
1079
+ 210
1080
+ ],
1081
+ "page_idx": 6
1082
+ },
1083
+ {
1084
+ "type": "table",
1085
+ "img_path": "images/bc20d5f6d680171e43bc325c11cde43063f2b3c83731d5da76631dce169e5376.jpg",
1086
+ "table_caption": [
1087
+ "Table 5: Average acceptance length $\\tau$ of EAGLE with and without the use of tree attention. The evaluation dataset is MT-bench, with the temperature parameter set to 0."
1088
+ ],
1089
+ "table_footnote": [],
1090
+ "table_body": "<table><tr><td colspan=\"3\">Vicuna</td><td colspan=\"3\">LLaMA2-Chat</td></tr><tr><td>Size</td><td>Chain</td><td>Tree</td><td>Size</td><td>Chain</td><td>Tree</td></tr><tr><td>7B</td><td>3.20</td><td>3.94 (+0.74)</td><td>7B</td><td>3.00</td><td>3.62 (+0.62)</td></tr><tr><td>13B</td><td>3.23</td><td>3.98 (+0.75)</td><td>13B</td><td>3.18</td><td>3.90 (+0.68)</td></tr><tr><td>33B</td><td>2.97</td><td>3.68 (+0.71)</td><td>70B</td><td>3.12</td><td>3.81 (+0.69)</td></tr></table>",
1091
+ "bbox": [
1092
+ 89,
1093
+ 354,
1094
+ 473,
1095
+ 446
1096
+ ],
1097
+ "page_idx": 6
1098
+ },
1099
+ {
1100
+ "type": "text",
1101
+ "text": "significant acceleration effect, approximately in the range of $2.3\\mathrm{x} - 2.7\\mathrm{x}$",
1102
+ "bbox": [
1103
+ 84,
1104
+ 473,
1105
+ 473,
1106
+ 502
1107
+ ],
1108
+ "page_idx": 6
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "4.3.2. INPUTS OF DRAFT MODELS",
1113
+ "text_level": 1,
1114
+ "bbox": [
1115
+ 84,
1116
+ 518,
1117
+ 323,
1118
+ 532
1119
+ ],
1120
+ "page_idx": 6
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "text": "Compared to other speculative sampling-based methods, the key innovation of EAGLE lies in its utilization of features computed by the target LLM and the incorporation of sampling outcomes into the input of the draft model to address randomness. We conducted an ablation study on Vicuna 7B, assessing draft models with varying inputs. We tested four types of inputs: feature&shifted-token (EAGLE), feature&unshifted-token, token, and feature. Both feature&shifted-token (EAGLE) and feature&unshifted-token integrate semantic information at different levels. The distinction lies in the fact that feature&shifted-token (EAGLE) inputs tokens advanced by one time step, equipping it to address randomness effectively. Apart from the use of a FC layer to reduce dimensionality for the feature&token input, the structure of the draft model remains entirely consistent. Figure 8 presents the experimental outcomes on the MT-bench with Vicuna 7B as the target LLM. Three observations can be drawn.",
1125
+ "bbox": [
1126
+ 84,
1127
+ 541,
1128
+ 475,
1129
+ 815
1130
+ ],
1131
+ "page_idx": 6
1132
+ },
1133
+ {
1134
+ "type": "list",
1135
+ "sub_type": "text",
1136
+ "list_items": [
1137
+ "- First, when the number of parameters of the draft model is limited, utilizing features yields slightly better results than tokens.",
1138
+ "Second, merging features and tokens modestly boosts"
1139
+ ],
1140
+ "bbox": [
1141
+ 102,
1142
+ 832,
1143
+ 473,
1144
+ 905
1145
+ ],
1146
+ "page_idx": 6
1147
+ },
1148
+ {
1149
+ "type": "text",
1150
+ "text": "Table 6: The speedup ratios and average acceptance length $\\tau$ using different training datasets evaluated on the MT-bench, with the target LLM being LLaMA2-Chat 7B and the temperature set to 0. \"Fixed dataset\" refers to both questions and answers originating from the ShareGPT dataset. \"Data generated by target LLM\" denotes that while questions are sourced from the ShareGPT dataset, the answers are generated by the target LLM.",
1151
+ "bbox": [
1152
+ 496,
1153
+ 82,
1154
+ 887,
1155
+ 203
1156
+ ],
1157
+ "page_idx": 6
1158
+ },
1159
+ {
1160
+ "type": "table",
1161
+ "img_path": "images/7c2ce9b8a10424374ae278e46e47e687749e3475e81ff8fb1462369e0fe4802a.jpg",
1162
+ "table_caption": [],
1163
+ "table_footnote": [],
1164
+ "table_body": "<table><tr><td>Training data</td><td>Speedup</td><td>τ</td></tr><tr><td>Fixed dataset</td><td>2.78x</td><td>3.62</td></tr><tr><td>Data generated by target LLM</td><td>2.88x</td><td>3.75</td></tr></table>",
1165
+ "bbox": [
1166
+ 519,
1167
+ 214,
1168
+ 864,
1169
+ 280
1170
+ ],
1171
+ "page_idx": 6
1172
+ },
1173
+ {
1174
+ "type": "text",
1175
+ "text": "performance, mainly as discrete, error-free tokens mitigate feature error accumulation, evident from the similar $0 - \\alpha$ of feature&unshifted-token and feature-only draft models, with a significantly improved $1 - \\alpha$ .",
1176
+ "bbox": [
1177
+ 527,
1178
+ 304,
1179
+ 887,
1180
+ 364
1181
+ ],
1182
+ "page_idx": 6
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "- Third, addressing the randomness inherent in the sampling process results in the most significant improvement. The feature&shifted-token scheme, compared to feature&unshifted-token, adds no complexity yet markedly enhances the draft model's capability by simply advancing the token by one time step, allowing the draft model to account for the randomness in sampling.",
1187
+ "bbox": [
1188
+ 516,
1189
+ 377,
1190
+ 888,
1191
+ 484
1192
+ ],
1193
+ "page_idx": 6
1194
+ },
1195
+ {
1196
+ "type": "text",
1197
+ "text": "4.3.3. TRAINING DATA",
1198
+ "text_level": 1,
1199
+ "bbox": [
1200
+ 496,
1201
+ 508,
1202
+ 660,
1203
+ 523
1204
+ ],
1205
+ "page_idx": 6
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "EAGLE uses a fixed dataset for training, avoiding increased overhead from using the target LLM for generating training data. Ablation study (see Table 6) shows that data from the target LLM marginally improves performance, indicating EAGLE's low sensitivity to training data and justifying the fixed dataset approach for cost reduction.",
1210
+ "bbox": [
1211
+ 495,
1212
+ 532,
1213
+ 885,
1214
+ 625
1215
+ ],
1216
+ "page_idx": 6
1217
+ },
1218
+ {
1219
+ "type": "text",
1220
+ "text": "4.4. Batch size and throughput",
1221
+ "text_level": 1,
1222
+ "bbox": [
1223
+ 496,
1224
+ 640,
1225
+ 718,
1226
+ 656
1227
+ ],
1228
+ "page_idx": 6
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "Inference in LLMs is memory-bound (Patterson, 2004; Shazeer, 2019), leaving GPU computational resources underutilized. The principle behind the speculative sampling-based approach in enhancing generation speed lies in more effectively utilizing GPU computational resources. As the batch size increases, the available computational capacity of the GPU decreases, leading to a reduction in the acceleration effect. In this section, we present experimental results for scenarios where the batch size exceeds 1. As demonstrated in Table 7, the speedup ratio diminishes with increasing batch size. When using Vicuna 7B as the target LLM, the speedup ratio at $\\text{bs} = 4$ is higher than at $\\text{bs} = 3$ . This is attributed to the fact that, during the verification phase of EAGLE, the target LLM processes multiple tokens in a single forward pass, and the processing at $\\text{bs} = 4$ is faster than at $\\text{bs} = 3$ . In contrast, with vanilla autoregressive decoding",
1233
+ "bbox": [
1234
+ 495,
1235
+ 662,
1236
+ 887,
1237
+ 906
1238
+ ],
1239
+ "page_idx": 6
1240
+ },
1241
+ {
1242
+ "type": "header",
1243
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1244
+ "bbox": [
1245
+ 254,
1246
+ 56,
1247
+ 715,
1248
+ 71
1249
+ ],
1250
+ "page_idx": 6
1251
+ },
1252
+ {
1253
+ "type": "page_number",
1254
+ "text": "7",
1255
+ "bbox": [
1256
+ 480,
1257
+ 922,
1258
+ 491,
1259
+ 934
1260
+ ],
1261
+ "page_idx": 6
1262
+ },
1263
+ {
1264
+ "type": "image",
1265
+ "img_path": "images/55a41ed71359199c53e0d468c8aed79f664ec69f8689dd20198a4adfc91521f7.jpg",
1266
+ "image_caption": [],
1267
+ "image_footnote": [],
1268
+ "bbox": [
1269
+ 133,
1270
+ 83,
1271
+ 838,
1272
+ 106
1273
+ ],
1274
+ "page_idx": 7
1275
+ },
1276
+ {
1277
+ "type": "image",
1278
+ "img_path": "images/69757b7b2973bcb2d762f9aa9a8b842ca1f8a470831d1b6adbd973ec79c334b3.jpg",
1279
+ "image_caption": [],
1280
+ "image_footnote": [],
1281
+ "bbox": [
1282
+ 107,
1283
+ 114,
1284
+ 310,
1285
+ 250
1286
+ ],
1287
+ "page_idx": 7
1288
+ },
1289
+ {
1290
+ "type": "image",
1291
+ "img_path": "images/13fd560b911fd633a67c61c9f19e7a0528f3fc5ae5407dad4269155d8c20d8c2.jpg",
1292
+ "image_caption": [],
1293
+ "image_footnote": [],
1294
+ "bbox": [
1295
+ 321,
1296
+ 114,
1297
+ 495,
1298
+ 250
1299
+ ],
1300
+ "page_idx": 7
1301
+ },
1302
+ {
1303
+ "type": "image",
1304
+ "img_path": "images/763085b22612ab09946e7df47519b42bdd46782f3f5bacd37032b6510b88d84e.jpg",
1305
+ "image_caption": [],
1306
+ "image_footnote": [],
1307
+ "bbox": [
1308
+ 503,
1309
+ 114,
1310
+ 684,
1311
+ 250
1312
+ ],
1313
+ "page_idx": 7
1314
+ },
1315
+ {
1316
+ "type": "image",
1317
+ "img_path": "images/291d397fa5142b07b0aeef4117a23b56a8f6f5f3269fcb1bb1e6269c030d2e31.jpg",
1318
+ "image_caption": [],
1319
+ "image_footnote": [],
1320
+ "bbox": [
1321
+ 691,
1322
+ 114,
1323
+ 872,
1324
+ 250
1325
+ ],
1326
+ "page_idx": 7
1327
+ },
1328
+ {
1329
+ "type": "image",
1330
+ "img_path": "images/7fda2c23cf9d61b370d1aa061603853d368c81bb57c866c0c4de36117c2d5994.jpg",
1331
+ "image_caption": [
1332
+ "Figure 8: Performance of draft models with varying inputs. The target LLM is Vicuna 7B, and the test dataset is MT-bench. Speed refers to the walltime speedup ratio, $\\tau$ denotes the average acceptance length, $0 - \\alpha$ represents the acceptance rate with entirely precise inputs, $1 - \\alpha$ indicates the acceptance rate when the input includes one imprecise feature, and $T$ refers to the temperature."
1333
+ ],
1334
+ "image_footnote": [],
1335
+ "bbox": [
1336
+ 107,
1337
+ 255,
1338
+ 308,
1339
+ 378
1340
+ ],
1341
+ "page_idx": 7
1342
+ },
1343
+ {
1344
+ "type": "image",
1345
+ "img_path": "images/ae1e1e5dcad1eee3d790c36ec03d9453e9cc9f267131097d1997da21f8577bec.jpg",
1346
+ "image_caption": [],
1347
+ "image_footnote": [],
1348
+ "bbox": [
1349
+ 316,
1350
+ 255,
1351
+ 493,
1352
+ 378
1353
+ ],
1354
+ "page_idx": 7
1355
+ },
1356
+ {
1357
+ "type": "image",
1358
+ "img_path": "images/27f526eafcd945e355661ce2d62bfd6a77652b5a637e57414ba2e0defffe4422.jpg",
1359
+ "image_caption": [],
1360
+ "image_footnote": [],
1361
+ "bbox": [
1362
+ 504,
1363
+ 256,
1364
+ 683,
1365
+ 378
1366
+ ],
1367
+ "page_idx": 7
1368
+ },
1369
+ {
1370
+ "type": "image",
1371
+ "img_path": "images/c04015b102ff3c2e2c1eb0324538f148f41dec99116c0b5c3a922ef3180881f5.jpg",
1372
+ "image_caption": [],
1373
+ "image_footnote": [],
1374
+ "bbox": [
1375
+ 692,
1376
+ 255,
1377
+ 872,
1378
+ 378
1379
+ ],
1380
+ "page_idx": 7
1381
+ },
1382
+ {
1383
+ "type": "text",
1384
+ "text": "where the target LLM processes one token per forward pass, the speeds at $\\mathrm{bs} = 3$ and $\\mathrm{bs} = 4$ are nearly identical.",
1385
+ "bbox": [
1386
+ 84,
1387
+ 487,
1388
+ 475,
1389
+ 517
1390
+ ],
1391
+ "page_idx": 7
1392
+ },
1393
+ {
1394
+ "type": "text",
1395
+ "text": "Although speculative sampling-based methods predominantly focus on latency, we also investigated EAGLE's throughput for batch size $>1$ , another key metric for LLM systems. Compared to vanilla autoregressive decoding, EAGLE requires slightly more CUDA memory. For Vicuna 7B as the target LLM, operating under a memory constraint of a single RTX 3090 with 24G of CUDA memory, the maximum batch size (bs) for vanilla autoregressive decoding and EAGLE are 8 and 7, respectively. In the case of LLaMA2-Chat 70B, constrained by 4 A100 (40G) GPUs totaling 160G of CUDA memory, the maximum bs for vanilla autoregressive decoding and EAGLE are 5 and 4, respectively. All evaluations were conducted at FP16 precision. We calculated the throughput for different bs and selected the maximum value. Both vanilla autoregressive decoding and EAGLE achieve maximum throughput at their respective maximum bs. Tree attention consumes more computational resources. At $\\mathrm{bs} = 7$ , the computational resources are less abundant, making the non-use of tree attention more advantageous. As illustrated in Table 7, EAGLE achieves a $2\\mathrm{x}$ increase in throughput.",
1396
+ "bbox": [
1397
+ 84,
1398
+ 523,
1399
+ 475,
1400
+ 843
1401
+ ],
1402
+ "page_idx": 7
1403
+ },
1404
+ {
1405
+ "type": "table",
1406
+ "img_path": "images/eb20d2adf8df73bc215572ead2dfb30e6b5c297d0f60f67f8778715711105109.jpg",
1407
+ "table_caption": [
1408
+ "Table 7: Speedup ratios at different batch sizes and throughput of EAGLE. The evaluation dataset is MT-bench, with the temperature parameter set to 0."
1409
+ ],
1410
+ "table_footnote": [],
1411
+ "table_body": "<table><tr><td>Batch size</td><td>1</td><td>2</td><td>3</td><td>4</td><td>Throughput</td></tr><tr><td>Vicuna 7B</td><td>2.90x</td><td>2.87x</td><td>2.65x</td><td>2.76x</td><td>1.97x</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.01x</td><td>2.81x</td><td>2.50x</td><td>2.40x</td><td>1.99x</td></tr></table>",
1412
+ "bbox": [
1413
+ 501,
1414
+ 541,
1415
+ 885,
1416
+ 593
1417
+ ],
1418
+ "page_idx": 7
1419
+ },
1420
+ {
1421
+ "type": "text",
1422
+ "text": "5. Related Work",
1423
+ "text_level": 1,
1424
+ "bbox": [
1425
+ 496,
1426
+ 630,
1427
+ 638,
1428
+ 646
1429
+ ],
1430
+ "page_idx": 7
1431
+ },
1432
+ {
1433
+ "type": "text",
1434
+ "text": "There has been considerable research into accelerating language models, involving techniques such as distillation (Hinton et al., 2015), quantization (Hubara et al., 2018; Shen et al., 2020; Kim et al., 2021; Zadeh et al., 2020; Zafrir et al., 2019), pruning (Gale et al., 2019; Sanh et al., 2020; Kurtic et al., 2022; Voita et al., 2019), and innovative network architecture designs (Gu & Dao, 2023; Wu et al., 2020). These methods aim to reduce the latency per forward pass.",
1435
+ "bbox": [
1436
+ 495,
1437
+ 656,
1438
+ 888,
1439
+ 777
1440
+ ],
1441
+ "page_idx": 7
1442
+ },
1443
+ {
1444
+ "type": "text",
1445
+ "text": "Similar to our approach are frameworks based on speculative sampling. Early works (Stern et al., 2018; Sun et al., 2021) accelerated greedy decoding, while speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) extended it to non-greedy sampling, provably maintaining the original output distribution. Ensuring unchanged output distribution makes acceleration more challenging; many studies have explored lossy acceleration as a trade-off. For instance,",
1446
+ "bbox": [
1447
+ 495,
1448
+ 785,
1449
+ 888,
1450
+ 906
1451
+ ],
1452
+ "page_idx": 7
1453
+ },
1454
+ {
1455
+ "type": "header",
1456
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1457
+ "bbox": [
1458
+ 254,
1459
+ 56,
1460
+ 718,
1461
+ 71
1462
+ ],
1463
+ "page_idx": 7
1464
+ },
1465
+ {
1466
+ "type": "page_number",
1467
+ "text": "8",
1468
+ "bbox": [
1469
+ 480,
1470
+ 922,
1471
+ 491,
1472
+ 934
1473
+ ],
1474
+ "page_idx": 7
1475
+ },
1476
+ {
1477
+ "type": "text",
1478
+ "text": "DistillSpec (Zhou et al., 2023) modifies acceptance probabilities using a lenience function, BiLD (Kim et al., 2023) accepts drafts if the distance metric from the target LLM distribution is below a certain threshold, and Medusa (Cai et al., 2023) uses a minimum of a hard threshold and an entropy-dependent threshold for truncation. In contrast, EAGLE does not employ any relaxations and maintains the output distribution of the LLM unchanged.",
1479
+ "bbox": [
1480
+ 84,
1481
+ 84,
1482
+ 475,
1483
+ 205
1484
+ ],
1485
+ "page_idx": 8
1486
+ },
1487
+ {
1488
+ "type": "text",
1489
+ "text": "The primary differences among speculative sampling-based methods manifest predominantly in the drafting phase. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) utilizes a lower-parameter version of the target LLM as the draft model. Self-Speculative Decoding (Zhang et al., 2023) skips some layers of the target LLM during draft generation. SpecInfer (Miao et al., 2023) employs a set of small models to generate drafts in parallel. Cascade Speculative Drafting (Chen et al., 2023b) and Staged Speculative Decoding (Spector & Re, 2023) cascade different overhead draft models. Online Speculative Decoding (Liu et al., 2023) trains the draft model on a distribution of queries. Methods (Hooper et al., 2023; Fu et al., 2023; Yang et al., 2023b) such as Medusa (Cai et al., 2023) do not employ a separate target LLM; instead, they generate drafts by utilizing features or weights from the target LLM. REST (He et al., 2023) generates drafts based on retrieval methods. LLMA (Yang et al., 2023a), used for tasks like grammatical correction where input and output overlap, retrieves drafts directly from the input.",
1490
+ "bbox": [
1491
+ 86,
1492
+ 212,
1493
+ 477,
1494
+ 515
1495
+ ],
1496
+ "page_idx": 8
1497
+ },
1498
+ {
1499
+ "type": "text",
1500
+ "text": "6. Conclusion",
1501
+ "text_level": 1,
1502
+ "bbox": [
1503
+ 84,
1504
+ 534,
1505
+ 205,
1506
+ 550
1507
+ ],
1508
+ "page_idx": 8
1509
+ },
1510
+ {
1511
+ "type": "text",
1512
+ "text": "In this paper, we introduce EAGLE, an efficient framework for speculative sampling. EAGLE conducts the drafting process autoregressively at the more structured (second-to-top-layer) feature level and mitigates sampling uncertainty in predicting the next feature by incorporating tokens from one time step ahead. EAGLE is guaranteed to preserve the output distribution of the LLM while significantly enhancing generation speed. On MT-bench, EAGLE is $2.1\\mathrm{x} - 3.8\\mathrm{x}$ faster than vanilla autoregressive decoding, $1.7\\mathrm{x} - 2.1\\mathrm{x}$ faster than Lookahead, and $1.5\\mathrm{x} - 1.6\\mathrm{x}$ faster than Medusa.",
1513
+ "bbox": [
1514
+ 84,
1515
+ 560,
1516
+ 473,
1517
+ 710
1518
+ ],
1519
+ "page_idx": 8
1520
+ },
1521
+ {
1522
+ "type": "text",
1523
+ "text": "Acknowledgements. We acknowledge useful discussions with the Medusa's team leader Tianle Cai, the Lookahead's team leader Hao Zhang, the SpecTr's team leader Ziteng Sun, interactions with the gpt-fast team leaders Horace He and Soumith Chintala on X, and Yihan Wu.",
1524
+ "bbox": [
1525
+ 84,
1526
+ 719,
1527
+ 473,
1528
+ 794
1529
+ ],
1530
+ "page_idx": 8
1531
+ },
1532
+ {
1533
+ "type": "text",
1534
+ "text": "References",
1535
+ "text_level": 1,
1536
+ "bbox": [
1537
+ 86,
1538
+ 814,
1539
+ 184,
1540
+ 830
1541
+ ],
1542
+ "page_idx": 8
1543
+ },
1544
+ {
1545
+ "type": "ref_text",
1546
+ "text": "Cai, T., Li, Y., Geng, Z., Peng, H., and Dao, T. Medusa: Simple framework for accelerating LLM generation with multiple decoding heads. https://github.com/FasterDecoding/Medusa, 2023.",
1547
+ "bbox": [
1548
+ 86,
1549
+ 837,
1550
+ 475,
1551
+ 898
1552
+ ],
1553
+ "page_idx": 8
1554
+ },
1555
+ {
1556
+ "type": "list",
1557
+ "sub_type": "ref_text",
1558
+ "list_items": [
1559
+ "Chen, C., Borgeaud, S., Irving, G., Lespiau, J.-B., Sifre, L., and Jumper, J. Accelerating large language model decoding with speculative sampling. arXiv preprint arXiv:2302.01318, 2023a.",
1560
+ "Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. d. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021.",
1561
+ "Chen, Z., Yang, X., Lin, J., Sun, C., Huang, J., and Chang, K. C.-C. Cascade speculative drafting for even faster LLM inference. arXiv preprint arXiv:2312.11462, 2023b.",
1562
+ "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.",
1563
+ "Fu, Y., Bailis, P., Stoica, I., and Zhang, H. Breaking the sequential dependency of LLM inference using lookahead decoding, November 2023. URL https://lmsys.org/blog/2023-11-21-lookahead-decoding/.",
1564
+ "Gale, T., Olsen, E., and Hooker, S. The state of sparsity in deep neural networks.(2019). arXiv preprint cs.LG/1902.09574, 2019.",
1565
+ "Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023.",
1566
+ "He, Z., Zhong, Z., Cai, T., Lee, J. D., and He, D. Rest: Retrieval-based speculative decoding. arXiv preprint arXiv:2311.08252, 2023.",
1567
+ "Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015.",
1568
+ "Hooper, C., Kim, S., Mohammadzadeh, H., Genc, H., Keutzer, K., Gholami, A., and Shao, S. Speed: Speculative pipelined execution for efficient decoding. arXiv preprint arXiv:2310.12072, 2023.",
1569
+ "Hubara, I., Courbariaux, M., Soudry, D., El-Yaniv, R., and Bengio, Y. Quantized neural networks: Training neural networks with low precision weights and activations. journal of machine learning research, 18(187):1-30, 2018.",
1570
+ "Jain, N., Chiang, P.-y., Wen, Y., Kirchenbauer, J., Chu, H.-M., Somepalli, G., Bartoldson, B. R., Kailkhura, B., Schwarzschild, A., Saha, A., et al. NEFTune: Noisy embeddings improve instruction finetuning. arXiv preprint arXiv:2310.05914, 2023."
1571
+ ],
1572
+ "bbox": [
1573
+ 498,
1574
+ 84,
1575
+ 887,
1576
+ 904
1577
+ ],
1578
+ "page_idx": 8
1579
+ },
1580
+ {
1581
+ "type": "header",
1582
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1583
+ "bbox": [
1584
+ 254,
1585
+ 56,
1586
+ 718,
1587
+ 71
1588
+ ],
1589
+ "page_idx": 8
1590
+ },
1591
+ {
1592
+ "type": "page_number",
1593
+ "text": "9",
1594
+ "bbox": [
1595
+ 480,
1596
+ 922,
1597
+ 491,
1598
+ 934
1599
+ ],
1600
+ "page_idx": 8
1601
+ },
1602
+ {
1603
+ "type": "list",
1604
+ "sub_type": "ref_text",
1605
+ "list_items": [
1606
+ "Kim, S., Gholami, A., Yao, Z., Mahoney, M. W., and Keutzer, K. I-bert: Integer-only bert quantization. In International conference on machine learning, pp. 5506-5518. PMLR, 2021.",
1607
+ "Kim, S., Mangalam, K., Moon, S., Malik, J., Mahoney, M. W., Gholami, A., and Keutzer, K. Speculative decoding with big little decoder. In Thirty-seventh Conference on Neural Information Processing Systems, 2023.",
1608
+ "Kurtic, E., Campos, D., Nguyen, T., Frantar, E., Kurtz, M., Fineran, B., Goin, M., and Alistarh, D. The optimal bert surgeon: Scalable and accurate second-order pruning for large language models. arXiv preprint arXiv:2203.07259, 2022.",
1609
+ "Leviathan, Y., Kalman, M., and Matias, Y. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pp. 19274-19286. PMLR, 2023.",
1610
+ "Liu, X., Hu, L., Bailis, P., Stoica, I., Deng, Z., Cheung, A., and Zhang, H. Online speculative decoding. arXiv preprint arXiv:2310.07177, 2023.",
1611
+ "Miao, X., Oliaro, G., Zhang, Z., Cheng, X., Wang, Z., Wong, R. Y. Y., Chen, Z., Arfeen, D., Abhyankar, R., and Jia, Z. SpecInfer: Accelerating generative LLM serving with speculative inference and token tree verification. arXiv preprint arXiv:2305.09781, 2023.",
1612
+ "Patterson, D. A. Latency lags bandwidth. Communications of the ACM, 47(10):71-75, 2004.",
1613
+ "PyTorch Labs. gpt-fast. https://github.com/pytorch-labs/gpt-fast/, 2023.",
1614
+ "Sanh, V., Wolf, T., and Rush, A. Movement pruning: Adaptive sparsity by fine-tuning. Advances in Neural Information Processing Systems, 33:20378-20389, 2020.",
1615
+ "Santilli, A., Severino, S., Postolache, E., Maiorca, V., Mancusi, M., Marin, R., and Rodola, E. Accelerating transformer inference for translation via parallel decoding. In Rogers, A., Boyd-Graber, J., and Okazaki, N. (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 12336-12355, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.689. URL https://aclanthology.org/2023.acl-long.689.",
1616
+ "Shazeer, N. Fast transformer decoding: One write-head is all you need. arXiv preprint arXiv:1911.02150, 2019.",
1617
+ "Shen, S., Dong, Z., Ye, J., Ma, L., Yao, Z., Gholami, A., Mahoney, M. W., and Keutzer, K. Q-bert: Hessian based ultra low precision quantization of bert. In Proceedings"
1618
+ ],
1619
+ "bbox": [
1620
+ 86,
1621
+ 84,
1622
+ 478,
1623
+ 906
1624
+ ],
1625
+ "page_idx": 9
1626
+ },
1627
+ {
1628
+ "type": "list",
1629
+ "sub_type": "ref_text",
1630
+ "list_items": [
1631
+ "of the AAAI Conference on Artificial Intelligence, volume 34, pp. 8815-8821, 2020.",
1632
+ "Spector, B. and Re, C. Accelerating LLM inference with staged speculative decoding. arXiv preprint arXiv:2308.04623, 2023.",
1633
+ "Stern, M., Shazeer, N., and Uszkoreit, J. Blockwise parallel decoding for deep autoregressive models. Advances in Neural Information Processing Systems, 31, 2018.",
1634
+ "Sun, X., Ge, T., Wei, F., and Wang, H. Instantaneous grammatical error correction with shallow aggressive decoding. arXiv preprint arXiv:2106.04970, 2021.",
1635
+ "Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., and Hashimoto, T. B. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.",
1636
+ "Touvron, H., Martin, L., Stone, K., Albert, P., Almahairi, A., Babaei, Y., Bashlykov, N., Batra, S., Bhargava, P., Bhosale, S., et al. LIAMA 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288, 2023.",
1637
+ "Voita, E., Talbot, D., Moiseev, F., Sennrich, R., and Titov, I. Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned. arXiv preprint arXiv:1905.09418, 2019.",
1638
+ "Wu, Z., Liu, Z., Lin, J., Lin, Y., and Han, S. Lite transformer with long-short range attention. arXiv preprint arXiv:2004.11886, 2020.",
1639
+ "Xia, H., Ge, T., Wang, P., Chen, S.-Q., Wei, F., and Sui, Z. Speculative decoding: Exploiting speculative execution for accelerating seq2seq generation. In Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3909-3925, 2023.",
1640
+ "Yang, N., Ge, T., Wang, L., Jiao, B., Jiang, D., Yang, L., Majumder, R., and Wei, F. Inference with reference: Lossless acceleration of large language models. arXiv preprint arXiv:2304.04487, 2023a.",
1641
+ "Yang, S., Lee, G., Cho, J., Papailiopoulos, D., and Lee, K. Predictive pipelined decoding: A comput-. latency trade-off for exact ltm decoding. arXiv preprint arXiv:2307.05908, 2023b.",
1642
+ "Zadeh, A. H., Edo, I., Awad, O. M., and Moshovos, A. Gobo: Quantizing attention-based nlp models for low latency and energy efficient inference. In 2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO), pp. 811-824. IEEE, 2020."
1643
+ ],
1644
+ "bbox": [
1645
+ 500,
1646
+ 84,
1647
+ 887,
1648
+ 906
1649
+ ],
1650
+ "page_idx": 9
1651
+ },
1652
+ {
1653
+ "type": "header",
1654
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1655
+ "bbox": [
1656
+ 254,
1657
+ 56,
1658
+ 718,
1659
+ 71
1660
+ ],
1661
+ "page_idx": 9
1662
+ },
1663
+ {
1664
+ "type": "page_number",
1665
+ "text": "10",
1666
+ "bbox": [
1667
+ 477,
1668
+ 922,
1669
+ 495,
1670
+ 934
1671
+ ],
1672
+ "page_idx": 9
1673
+ },
1674
+ {
1675
+ "type": "list",
1676
+ "sub_type": "ref_text",
1677
+ "list_items": [
1678
+ "Zafrir, O., Boudoukh, G., Izsak, P., and Wasserblat, M. Q8bert: Quantized 8bit bert. In 2019 Fifth Workshop on Energy Efficient Machine Learning and Cognitive Computing-NeurIPS Edition (EMC2-NIPS), pp. 36-39. IEEE, 2019.",
1679
+ "Zhang, J., Wang, J., Li, H., Shou, L., Chen, K., Chen, G., and Mehrotra, S. Draft & verify: Lossless large language model acceleration via self-speculative decoding. arXiv preprint arXiv:2309.08168, 2023.",
1680
+ "Zhang, P., Zeng, G., Wang, T., and Lu, W. TinyLlama: An open-source small language model. arXiv preprint arXiv:2401.02385, 2024.",
1681
+ "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E., et al. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685, 2023.",
1682
+ "Zhou, Y., Lyu, K., Rawat, A. S., Menon, A. K., Rostamizadeh, A., Kumar, S., Kagy, J.-F., and Agarwal, R. DistillSpec: Improving speculative decoding via knowledge distillation. arXiv preprint arXiv:2310.08461, 2023."
1683
+ ],
1684
+ "bbox": [
1685
+ 86,
1686
+ 84,
1687
+ 475,
1688
+ 426
1689
+ ],
1690
+ "page_idx": 10
1691
+ },
1692
+ {
1693
+ "type": "header",
1694
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1695
+ "bbox": [
1696
+ 254,
1697
+ 56,
1698
+ 715,
1699
+ 71
1700
+ ],
1701
+ "page_idx": 10
1702
+ },
1703
+ {
1704
+ "type": "page_number",
1705
+ "text": "11",
1706
+ "bbox": [
1707
+ 477,
1708
+ 922,
1709
+ 493,
1710
+ 934
1711
+ ],
1712
+ "page_idx": 10
1713
+ },
1714
+ {
1715
+ "type": "text",
1716
+ "text": "A. Implementation Details",
1717
+ "text_level": 1,
1718
+ "bbox": [
1719
+ 84,
1720
+ 83,
1721
+ 312,
1722
+ 99
1723
+ ],
1724
+ "page_idx": 11
1725
+ },
1726
+ {
1727
+ "type": "text",
1728
+ "text": "A.1. Tree Structure",
1729
+ "text_level": 1,
1730
+ "bbox": [
1731
+ 84,
1732
+ 109,
1733
+ 227,
1734
+ 125
1735
+ ],
1736
+ "page_idx": 11
1737
+ },
1738
+ {
1739
+ "type": "text",
1740
+ "text": "Utilizing tree attention, EAGLE generates a tree-structured draft. The left side of Figure 9 illustrates the tree structure of the draft, while the right side depicts the corresponding chain-structured draft when tree attention is not used (as utilized in the ablation study detailed in Section 4.3.1). In a greedy setting, we select the top $k$ tokens with the highest probabilities as child nodes. In a non-greedy setting, we sample $k$ tokens. The number of child nodes, $k$ , can be inferred from Figure 9; for instance, $k = 4$ at the root node. Regardless of employing a tree-structured or chain-structured draft, the draft model undergoes 5 forward passes during the draft phase. During the verification phase, each token's probability is obtained through a single forward pass by the target LLM.",
1741
+ "bbox": [
1742
+ 84,
1743
+ 132,
1744
+ 887,
1745
+ 239
1746
+ ],
1747
+ "page_idx": 11
1748
+ },
1749
+ {
1750
+ "type": "text",
1751
+ "text": "Why do we use such a tree structure? The choice of the tree structure, as depicted in Figure 9, was not rigorously optimized but rather based on intuition: branches of higher-probability tokens should be deeper and wider. For this paper, all models across all experiments utilized the draft structure shown in Figure 9. However, the optimal tree structure is likely context-dependent. For instance, as batch size increases and redundant computational resources decrease, a smaller tree might be preferable. Tuning the draft structure could potentially lead to improved performance.",
1752
+ "bbox": [
1753
+ 84,
1754
+ 246,
1755
+ 888,
1756
+ 323
1757
+ ],
1758
+ "page_idx": 11
1759
+ },
1760
+ {
1761
+ "type": "image",
1762
+ "img_path": "images/db562c2514d7e9c23c2097acad387afda67e36d5c4f4a8a35271bdf6236659a6.jpg",
1763
+ "image_caption": [
1764
+ "Figure 9: Structure of EAGLE's draft. The left side shows the draft structure when tree attention is employed, while the right side depicts the draft structure without the use of tree attention."
1765
+ ],
1766
+ "image_footnote": [],
1767
+ "bbox": [
1768
+ 109,
1769
+ 349,
1770
+ 864,
1771
+ 566
1772
+ ],
1773
+ "page_idx": 11
1774
+ },
1775
+ {
1776
+ "type": "text",
1777
+ "text": "A.2. Multi-Round Speculative Sampling",
1778
+ "text_level": 1,
1779
+ "bbox": [
1780
+ 84,
1781
+ 650,
1782
+ 372,
1783
+ 666
1784
+ ],
1785
+ "page_idx": 11
1786
+ },
1787
+ {
1788
+ "type": "text",
1789
+ "text": "Unlike the chain-structured draft of speculative sampling, EAGLE employs a tree-structured draft, necessitating modifications to the sampling algorithm. The sampling algorithm A of speculative sampling can be briefly described as: if a token is accepted, it returns that token; otherwise, it samples a token from the adjusted distribution. For a tree-structured draft with $k$ candidate tokens, Multi-round speculative sampling recursively invokes algorithm A. Instead of directly sampling from the adjusted distribution after rejecting a token, Multi-round speculative sampling calls A again. If all tokens are rejected, it then directly samples from the adjusted distribution. The pseudocode for Multi-round speculative sampling is provided in Algorithm 1.",
1790
+ "bbox": [
1791
+ 84,
1792
+ 672,
1793
+ 887,
1794
+ 779
1795
+ ],
1796
+ "page_idx": 11
1797
+ },
1798
+ {
1799
+ "type": "text",
1800
+ "text": "B. Detailed experimental results",
1801
+ "text_level": 1,
1802
+ "bbox": [
1803
+ 84,
1804
+ 797,
1805
+ 359,
1806
+ 815
1807
+ ],
1808
+ "page_idx": 11
1809
+ },
1810
+ {
1811
+ "type": "text",
1812
+ "text": "Table 8 displays the speedup ratio, average acceptance length $\\tau$ and acceptance rate $\\alpha$ of EAGLE on HumanEval, GSM8K, and Alpaca datasets.",
1813
+ "bbox": [
1814
+ 84,
1815
+ 824,
1816
+ 888,
1817
+ 854
1818
+ ],
1819
+ "page_idx": 11
1820
+ },
1821
+ {
1822
+ "type": "header",
1823
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
1824
+ "bbox": [
1825
+ 254,
1826
+ 56,
1827
+ 715,
1828
+ 71
1829
+ ],
1830
+ "page_idx": 11
1831
+ },
1832
+ {
1833
+ "type": "page_number",
1834
+ "text": "12",
1835
+ "bbox": [
1836
+ 477,
1837
+ 922,
1838
+ 495,
1839
+ 934
1840
+ ],
1841
+ "page_idx": 11
1842
+ },
1843
+ {
1844
+ "type": "text",
1845
+ "text": "Algorithm 1 Multi-round speculative sampling",
1846
+ "bbox": [
1847
+ 86,
1848
+ 142,
1849
+ 398,
1850
+ 157
1851
+ ],
1852
+ "page_idx": 12
1853
+ },
1854
+ {
1855
+ "type": "text",
1856
+ "text": "Input: Target distribution $p$ , samples $t_i$ and distributions $\\hat{p}_i$ for each $i$ from 1 to $k$ , where $t_i$ is sampled from $\\hat{p}_i$ ,",
1857
+ "bbox": [
1858
+ 101,
1859
+ 161,
1860
+ 833,
1861
+ 178
1862
+ ],
1863
+ "page_idx": 12
1864
+ },
1865
+ {
1866
+ "type": "text",
1867
+ "text": "Output: a sample $x \\sim p$ ;",
1868
+ "bbox": [
1869
+ 102,
1870
+ 179,
1871
+ 277,
1872
+ 193
1873
+ ],
1874
+ "page_idx": 12
1875
+ },
1876
+ {
1877
+ "type": "text",
1878
+ "text": "$i\\gets 1$",
1879
+ "bbox": [
1880
+ 104,
1881
+ 194,
1882
+ 145,
1883
+ 205
1884
+ ],
1885
+ "page_idx": 12
1886
+ },
1887
+ {
1888
+ "type": "text",
1889
+ "text": "for $i\\leq k$ do",
1890
+ "bbox": [
1891
+ 104,
1892
+ 208,
1893
+ 189,
1894
+ 220
1895
+ ],
1896
+ "page_idx": 12
1897
+ },
1898
+ {
1899
+ "type": "text",
1900
+ "text": "$r\\gets U(0,1)$",
1901
+ "bbox": [
1902
+ 120,
1903
+ 223,
1904
+ 204,
1905
+ 237
1906
+ ],
1907
+ "page_idx": 12
1908
+ },
1909
+ {
1910
+ "type": "text",
1911
+ "text": "if $r < p(t_i) / \\hat{p}_i(t_i)$ then",
1912
+ "bbox": [
1913
+ 120,
1914
+ 238,
1915
+ 279,
1916
+ 253
1917
+ ],
1918
+ "page_idx": 12
1919
+ },
1920
+ {
1921
+ "type": "text",
1922
+ "text": "Return $t_i$",
1923
+ "bbox": [
1924
+ 137,
1925
+ 253,
1926
+ 202,
1927
+ 267
1928
+ ],
1929
+ "page_idx": 12
1930
+ },
1931
+ {
1932
+ "type": "text",
1933
+ "text": "end if",
1934
+ "bbox": [
1935
+ 120,
1936
+ 268,
1937
+ 163,
1938
+ 281
1939
+ ],
1940
+ "page_idx": 12
1941
+ },
1942
+ {
1943
+ "type": "text",
1944
+ "text": "$p\\gets \\text{norm}(\\max(0,p(t_i) - \\hat{p}_i(t_i)))$",
1945
+ "bbox": [
1946
+ 120,
1947
+ 284,
1948
+ 357,
1949
+ 299
1950
+ ],
1951
+ "page_idx": 12
1952
+ },
1953
+ {
1954
+ "type": "text",
1955
+ "text": "$i\\gets i + 1$",
1956
+ "bbox": [
1957
+ 120,
1958
+ 300,
1959
+ 186,
1960
+ 311
1961
+ ],
1962
+ "page_idx": 12
1963
+ },
1964
+ {
1965
+ "type": "text",
1966
+ "text": "end for",
1967
+ "bbox": [
1968
+ 104,
1969
+ 314,
1970
+ 156,
1971
+ 325
1972
+ ],
1973
+ "page_idx": 12
1974
+ },
1975
+ {
1976
+ "type": "text",
1977
+ "text": "Sample $t\\sim p$",
1978
+ "bbox": [
1979
+ 104,
1980
+ 329,
1981
+ 196,
1982
+ 343
1983
+ ],
1984
+ "page_idx": 12
1985
+ },
1986
+ {
1987
+ "type": "text",
1988
+ "text": "Return $t$",
1989
+ "bbox": [
1990
+ 104,
1991
+ 344,
1992
+ 166,
1993
+ 356
1994
+ ],
1995
+ "page_idx": 12
1996
+ },
1997
+ {
1998
+ "type": "table",
1999
+ "img_path": "images/d38388876d4fdc0abb6e38170af230b09210762007a3f8d1fc693e347468e5b3.jpg",
2000
+ "table_caption": [
2001
+ "Table 8: Speedup ratio, average acceptance length $\\tau$ and acceptance rate $\\alpha$ on HumanEval, GSM8K, and Alpaca at temperature = 0."
2002
+ ],
2003
+ "table_footnote": [],
2004
+ "table_body": "<table><tr><td>Dataset</td><td>Model</td><td>Speedup</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td rowspan=\"6\">HumanEval</td><td>Vicuna 7B</td><td>3.33x</td><td>4.29</td><td>0.82</td><td>0.77</td><td>0.72</td><td>0.69</td><td>0.71</td></tr><tr><td>Vicuna13B</td><td>3.58x</td><td>4.39</td><td>0.85</td><td>0.78</td><td>0.74</td><td>0.72</td><td>0.73</td></tr><tr><td>Vicuna 33B</td><td>3.67x</td><td>4.28</td><td>0.83</td><td>0.77</td><td>0.74</td><td>0.70</td><td>0.70</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.17x</td><td>4.24</td><td>0.81</td><td>0.76</td><td>0.73</td><td>0.74</td><td>0.72</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.76x</td><td>4.52</td><td>0.85</td><td>0.80</td><td>0.78</td><td>0.76</td><td>0.75</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.52x</td><td>4.42</td><td>0.84</td><td>0.79</td><td>0.75</td><td>0.73</td><td>0.74</td></tr><tr><td rowspan=\"6\">GSM8K</td><td>Vicuna 7B</td><td>3.01x</td><td>4.00</td><td>0.79</td><td>0.71</td><td>0.70</td><td>0.71</td><td>0.70</td></tr><tr><td>Vicuna13B</td><td>3.08x</td><td>3.97</td><td>0.79</td><td>0.71</td><td>0.67</td><td>0.68</td><td>0.64</td></tr><tr><td>Vicuna 33B</td><td>3.25x</td><td>3.94</td><td>0.79</td><td>0.71</td><td>0.67</td><td>0.67</td><td>0.67</td></tr><tr><td>LLaMA2-Chat 7B</td><td>2.91x</td><td>3.82</td><td>0.75</td><td>0.69</td><td>0.64</td><td>0.65</td><td>0.63</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.20x</td><td>4.03</td><td>0.80</td><td>0.70</td><td>0.70</td><td>0.68</td><td>0.66</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.03x</td><td>3.93</td><td>0.77</td><td>0.71</td><td>0.66</td><td>0.64</td><td>0.60</td></tr><tr><td rowspan=\"6\">Alpaca</td><td>Vicuna 7B</td><td>2.79x</td><td>3.86</td><td>0.74</td><td>0.68</td><td>0.66</td><td>0.66</td><td>0.67</td></tr><tr><td>Vicuna13B</td><td>3.03x</td><td>3.95</td><td>0.72</td><td>0.67</td><td>0.64</td><td>0.63</td><td>0.64</td></tr><tr><td>Vicuna 33B</td><td>2.97x</td><td>3.61</td><td>0.70</td><td>0.64</td><td>0.64</td><td>0.63</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 7B</td><td>2.78x</td><td>3.71</td><td>0.73</td><td>0.66</td><td>0.62</td><td>0.64</td><td>0.62</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.01x</td><td>3.83</td><td>0.75</td><td>0.67</td><td>0.64</td><td>0.63</td><td>0.63</td></tr><tr><td>LLaMA2-Chat 70B</td><td>2.97x</td><td>3.77</td><td>0.76</td><td>0.68</td><td>0.65</td><td>0.61</td><td>0.62</td></tr></table>",
2005
+ "bbox": [
2006
+ 179,
2007
+ 527,
2008
+ 792,
2009
+ 844
2010
+ ],
2011
+ "page_idx": 12
2012
+ },
2013
+ {
2014
+ "type": "header",
2015
+ "text": "EAGLE: Speculative Sampling Requires Rethinking Feature Uncertainty",
2016
+ "bbox": [
2017
+ 254,
2018
+ 56,
2019
+ 715,
2020
+ 70
2021
+ ],
2022
+ "page_idx": 12
2023
+ },
2024
+ {
2025
+ "type": "page_number",
2026
+ "text": "13",
2027
+ "bbox": [
2028
+ 477,
2029
+ 922,
2030
+ 493,
2031
+ 934
2032
+ ],
2033
+ "page_idx": 12
2034
+ }
2035
+ ]
2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15077/062e8bc5-a71d-46c7-8916-7432ffa989f6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8010f1f677723009ddc73c4458a0c9a77e46a53cfe5bd56560eb48f621db51ae
3
+ size 1827881
2401.15xxx/2401.15077/full.md ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Yuhui Li\* Fangyun Wei Chao Zhang\* Hongyang Zhang\*
2
+
3
+ $\spadesuit$ Peking University
4
+
5
+ Microsoft Research
6
+
7
+ \*University of Waterloo
8
+
9
+ †Vector Institute
10
+
11
+ hongyang.zhang@uwaterloo.ca
12
+
13
+ https://github.com/SafeAILab/EAGLE
14
+
15
+ ![](images/2285cf218f82178f4e7f0f1a35951cf30a2fc9c26d7c57714479f745bfcd273f.jpg)
16
+ Figure 1: Speedup ratio of Vicuna and LLaMA2-Chat inference latency on the MT-bench for greedy (temperature=0) settings. Speedup ratio of Medusa and Lookahead are copied from their original technical reports. With speculative sampling, there is a lack of suitable draft models to accelerate the 7B model. Employing a 7B model as the draft model for a 13B model results in slow speeds due to the high overhead of the 7B model, rendering it less efficient than vanilla autoregressive decoding. These scenarios are marked as N/A. In this paper, we only compare with speculative sampling based methods that do not need to finetune the backbone models, ensuring the output text distribution remains constant.
17
+
18
+ # Abstract
19
+
20
+ Autoregressive decoding makes the inference of Large Language Models (LLMs) time-consuming. In this paper, we reconsider speculative sampling and derive two key observations. Firstly, autoregression at the feature (second-to-top-layer) level is more straightforward than at the token level. Secondly, the inherent uncertainty in feature (second-to-top-layer) level autoregression constrains its performance. Based on these insights, we introduce EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency), a simple yet highly efficient speculative sampling framework. By incorporating a token sequence advanced by one time step, EAGLE effectively resolves the uncertainty, enabling precise second-to-top-layer feature prediction with minimal overhead. We conducted comprehensive evaluations
21
+
22
+ of EAGLE, including all models from the Vicuna and LLaMA2-Chat series, the MoE model Mixtral 8x7B Instruct, and tasks in dialogue, code generation, mathematical reasoning, and instruction following. For LLaMA2-Chat 70B, EAGLE achieved a latency speedup ratio of $2.7\mathrm{x} - 3.5\mathrm{x}$ , doubled throughput, while maintaining the distribution of the generated text.
23
+
24
+ # 1. Introduction
25
+
26
+ Autoregressive decoding, the de facto standard for large language models (LLMs), generates tokens sequentially, leading to slow and costly generation. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) based methods address this by dividing the process into a low-cost draft stage and a parallelized verification stage over the drafted tokens, allowing for multiple tokens to be validated in a
27
+
28
+ single LLM pass. These approaches accelerate generation by producing multiple tokens per pass. More importantly, the verification stage ensures that the text distribution aligns precisely with the decoding results of the original LLM, maintaining the integrity of the generated content.
29
+
30
+ Applying speculative sampling hinges on finding a draft model that mirrors the original LLM's functionality but with reduced latency, often involving a lower-parameter version from the same LLM series. For instance, in the LLaMA2 (Touvron et al., 2023) series which includes models with 7B, 13B, and 70B parameters, using the 7B model as a draft model of the 70B model is valid, while finding a suitable draft model for the smallest 7B variant is tricky. An alternative could be to use TinyLLaMA (Zhang et al., 2024), but it is not feasible for instruct-tuned models due to the inconsistency in instruction templates between LLaMA2-Chat and TinyLLaMA-Chat. Despite the 7B model's potential as a draft model, its high overhead diminishes acceleration gains. Training a new, appropriately sized draft model specifically for speculative sampling is not an ideal solution either due to the high cost: TinyLLaMA is trained on 3,000B tokens, whereas EAGLE is trained on 2-4B tokens.
31
+
32
+ The key to enhancing acceleration in speculative sampling lies in reducing the time overhead and improving the acceptance rate of the draft by the original LLM (Chen et al., 2023b; Xia et al., 2023; Santilli et al., 2023). Numerous approaches focus on reducing the overhead of the drafting phase. Lookahead (Fu et al., 2023) employs n-gram and Jacobi iteration, while Medusa (Cai et al., 2023) utilizes a set of MLPs that predict tokens based on the second-to-top-layer feature of the original LLM. These strategies significantly decrease the latency in generating drafts, leading to improved acceleration. However, their effectiveness is limited by the lower accuracy of the resulting drafts, with Medusa achieving an accuracy of about 0.6, and Lookahead even lower. In contrast, our method attains an accuracy of approximately 0.8.
33
+
34
+ To overcome these limitations, we introduce EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency), an efficient speculative sampling method, grounded in the following two observations.
35
+
36
+ Firstly, autoregression at the feature level is simpler than at the token level. In this paper, "features" refer to the second-to-top-layer features of the original LLM, located before the LM head. Compared to token sequences, which are simple transformations of natural language, feature sequences exhibit more regularity. Autoregressively processing at the feature level and then deriving tokens using the LM head of the original LLM yields better results than directly autoregressively predicting tokens. As illustrated in Figure 4, autoregressively predicting features yields better performance, demonstrated by a higher speedup ratio of
37
+
38
+ ![](images/9df3a3611bf82d020d22b95debfda596513ae7ea2ad6a69df96aa90f7d7a10cb.jpg)
39
+ Figure 2: Speedup ratio on the MT-bench for non-greedy (temperature=1) settings. Lookahead is confined to greedy decoding, and the non-greedy generation of Medusa does not guarantee lossless performance. Therefore, EAGLE is not compared with these methods.
40
+
41
+ ![](images/b719a6df771353b4c66c7b5a952adbf6f958c04e830bedf1bd90c8286d97d2df.jpg)
42
+ Figure 3: Uncertainty in feature sequences. The next feature following $f_{I}$ is contingent on the sampling outcome and cannot be determined solely based on $f_{I}$ , where both "always" and "am" are possible to follow the token "I" and lead to two branches.
43
+
44
+ # 1.9x compared to 1.5x.
45
+
46
+ Secondly, the uncertainty inherent in the sampling process significantly constrains the performance of predicting the next feature. In text generation, the target LLM predicts the distribution of tokens and samples accordingly, introducing randomness. Features, being high-dimensional and continuous, cannot be treated similarly. As depicted in Figure 3, sampling different tokens like "am" or "always" leads to distinct feature sequences, introducing ambiguity into the feature-level autoregression. Medusa faces a similar issue in predicting spaced tokens, where it is uncertain whether the true target for the input $f_{I}$ should be $p_{am}$ or $p_{always}$ . To address this issue, EAGLE inputs the token sequence from one time step ahead, which includes the sampling outcomes, into the draft model. In the example illustrated in Figure 3, this involves predicting $f_{always}$ based on $f_{I}$ and $t_{always}$ , and predicting $f_{am}$ based on $f_{I}$ and $t_{am}$ . As illustrated in Figure 4, by addressing the uncertainty, the speedup ratio further increases from $1.9\mathrm{x}$ to $2.8\mathrm{x}$ .
47
+
48
+ We conducted experiments across dialogue, code generation, mathematical reasoning, and instruction following tasks using the MT-bench, HumanEval, GSM8K, and Alpaca datasets, respectively. Tested LLMs included all models from the Vicuna and LLaMA2-Chat series, along with Mixtral 8x7B Instruct. For LLaMA2-Chat 70B, EAGLE
49
+
50
+ ![](images/0e53d0242434585b4aaf01d556bab456d69ea97a2096cbc9a1ebc45a4e44dc3e.jpg)
51
+ Figure 4: Accuracy and speedup ratio of draft models based on tokens, features and feature&shifted-token at temperature=0, tested on MT-bench with Vicuna 7B as the original LLM. Feature&shifted-token refers to using a feature sequence and a token sequence advanced by one time step as inputs.
52
+
53
+ achieved a speedup ratio of $2.7\mathrm{x} - 3.5\mathrm{x}$ , doubled throughput, and theoretically guaranteed the preservation of the generated text's distribution. Figure 1 and 2 illustrates the performance of EAGLE on the MT-bench (Zheng et al., 2023), a highly realistic benchmark simulating actual applications and real-world scenarios, including multi-turn instructions akin to dialogues with ChatGPT. We have chosen to utilize this benchmark as it has been employed by the current state-of-the-art, including Lookahead and Medusa, to demonstrate their speedup ratios. This choice facilitates a fair and direct comparison between our approach and these methods. Compared to the recently proposed speculative sampling-based frameworks, Lookahead and Medusa, EAGLE achieves $1.7\mathrm{x} - 2.1\mathrm{x}$ and $1.5\mathrm{x} - 1.6\mathrm{x}$ speedups, respectively. EAGLE operates in parallel with other acceleration or throughput-improving methods, such as quantization, compilation, etc. Combining EAGLE with these techniques could further reduce the operational costs of LLM systems. For example, with gpt-fast (PyTorch Labs, 2023), EAGLE accelerates LLaMA2-Chat 7B decoding to 160.4 tokens/s on a single RTX 3090 GPU.
54
+
55
+ EAGLE boasts low training costs. For the LLaMA2-Chat 70B model, EAGLE trains a decoder layer with fewer than 1B parameters using no more than 70k dialogues from the ShareGPT dataset. The training is completed in 1-2 days on 4x A100 (40G) GPUs. The training of EAGLE on 7B, 13B and 33B models can even be conducted on a RTX 3090 node in 1-2 days. In practical applications, EAGLE requires only a single training session to provide acceleration for each query. As the number of queries increases, the amortized training cost of EAGLE becomes negligible.
56
+
57
+ Beyond performance, EAGLE offers additional advantages:
58
+
59
+ - Generality: EAGLE is applicable to any autoregressive LLMs (at least in principle). We have applied EAGLE to LLaMA2-Chat (7B, 13B, 70B), Vicuna (7B, 13B, 33B) and Mixtral 8x7B Instruct in a zero-shot way on the MT-bench, GSM8K, HumanEval and
60
+
61
+ alpaca datasets. EAGLE adheres to the commonly used zero-shot/few-shot settings within the LLM community. All experiments employ the same weights, trained exclusively on the ShareGPT dataset, without any additional training on the evaluation datasets. The method adds only a lightweight plug-in (a single transformer decoder layer) to the LLM, which can be easily deployed in a production environment.
62
+
63
+ - Reliability: EAGLE does not involve any fine-tuning of the original LLM, and the preservation of the output distribution by EAGLE is theoretically guaranteed for both the greedy and non-greedy settings. This is in sharp contrast to Lookahead and Medusa which either focus solely on greedy settings or do not guarantee the preservation of distribution in these settings.
64
+
65
+ # 2. Preliminaries
66
+
67
+ Notations. In this paper, "target LLM" denotes the LLM intended for acceleration, while "draft model" refers to the model used for draft generation. "Feature" generally signifies the second-to-top-layer feature of a LLM, the hidden state before the LM head. Tokens are denoted by lowercase $t$ , their embeddings by $e$ , features by $f$ , and distributions by $p$ . Sequences are represented in uppercase, for example, $T_{i:j}$ for $(t_i, t_{i+1}, \dots, t_j)$ . In a LLM, input $T_{1:j}$ is transformed into embeddings $E_{1:j}$ through the embedding layer, then to features $F_{1:j}$ , and the LM Head maps $f_j$ to a distribution $p_{j+1} = \text{LMHead}(f_j)$ , sampling the next token $t_{j+1}$ . Vanilla autoregression at the token level is described by $T_{1:j} \rightarrow E_{1:j} \rightarrow f_j \rightarrow p_{j+1} \rightarrow t_{j+1}$ for any integer $j \geq 1$ .
68
+
69
+ Speculative sampling. Speculative sampling operates through draft and verification phases, with the drafting phase using a smaller model to generate $\gamma$ tokens $\hat{T}_{j+1:j+\gamma}$ and their distributions $\hat{P}_{j+1:j+\gamma}$ . In the verification phase, a single forward pass of the target LLM yields the probabilities $P_{j+1:j+\gamma}$ . Tokens are then sequentially evaluated, with a token $\hat{t}_{j+i}$ having an acceptance probability $\min(1, p_{j+i}(\hat{t}_{j+i}) / \hat{p}_{j+i}(\hat{t}_{j+i}))$ . Upon the rejection of a token $\hat{t}_{j+i}$ , all subsequent tokens are discarded, and this token is resampled based on a distribution norm $(\max(0, p_{j+i} - \hat{p}_{j+i}))$ . As proven in Appendix A.1 of speculative sampling (Leviathan et al., 2023), this method equates to sampling directly from the target LLM. EAGLE adopts this method, ensuring that the distribution of the generated text remains unchanged for both the greedy and non-greedy settings.
70
+
71
+ # 3. EAGLE
72
+
73
+ EAGLE, aligning with other speculative sampling-based methods, incorporates both a drafting phase and a verification phase.
74
+
75
+ ![](images/0b34fb43aa41af6a614695df0acfb46820aa3e064729be7274ad5d71277a4a9f.jpg)
76
+ Figure 5: A comparison of the methods for drafting the fourth and fifth tokens, $t_4$ and $t_5$ . $t$ (represented by blue blocks) denotes tokens, and $f$ (orange blocks) signifies the features, with subscripts indicating their positions in the sequence. The red border indicates the predictions of the draft model. For simplicity, the $n$ in the $n$ -gram for Lookahead, as shown in the figure, has been set to 2.
77
+
78
+ # 3.1. Drafting phase
79
+
80
+ The primary distinction between EAGLE and other methods lies predominantly in the drafting phase. Figure 5 illustrates a schematic of the drafting phase for different methods. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) and Lookahead (Fu et al., 2023) predict tokens based on tokens. Medusa (Cai et al., 2023) independently predicts $t_4$ and $t_5$ using the feature $f_2$ from the target LLM. EAGLE predicts $f_3$ using the feature sequence $(f_1, f_2)$ and the token sequence $(t_2, t_3)$ , advanced by one time step. From $p_4 = \mathrm{LMHead}(f_3)$ , $t_4$ is sampled. Subsequently, $f_3$ and $t_4$ are concatenated into the input sequence to predict the next feature $f_4$ and sample the subsequent token $t_5$ .
81
+
82
+ As illustrated in Figure 6, EAGLE's draft model comprises three modules: the Embedding layer, LM Head, and Autoregression Head. The Embedding layer and LM Head employ the parameters of the target LLM and do not necessitate additional training. The draft model takes as input a feature sequence of shape (bs, seq_len, hidden_dim) and an advanced token sequence of shape (bs, seq_len). It then converts the token sequence into a token embedding sequence of shape (bs, seq_len, hidden_dim), and concatenates it to form a fused sequence of shape (bs, seq_len, $2 \times$ hidden_dim). The Autoregression Head consisting of an FC layer and a decoder layer. The FC layer reduces the dimensionality of the fused sequence to (bs, seq_len, hidden_dim) and then we utilize the decoder layer to predict the next feature. The LM Head calculates the distribution based on the feature, from which the next token is sampled. Finally, the predicted feature and the sampled token are concatenated into the input, facilitating the continuation of the autoregressive process. EAGLE creates a tree-structured draft using tree attention, generating a draft tree with depth $m$ and more than $m$ tokens through $m$ forward passes. For instance, as shown in Figure 6, EAGLE drafts a 10-token tree with just 3 forward passes. The actual tree structure employed by EAGLE is
83
+
84
+ ![](images/63c6033693d1a78865c7c2c61213735c1220f550097691395c93f7c629f01b54.jpg)
85
+
86
+ ![](images/9cec80c8a0c3ffe53184a527cdd87bd77433c206fa261d9bfc6226fc9e781707.jpg)
87
+ Figure 6: Pipeline of EAGLE. The upper section illustrates the computational process, while the lower section displays the corresponding generation results for each step. In the upper section, green blocks represent token embeddings, orange blocks represent features, red boxes indicate the predictions of the draft model, and blue modules with snowflake icons represent the use of target LLM parameters, which are not subject to training.
88
+
89
+ detailed in Appendix A.1.
90
+
91
+ # 3.2. Training of the draft models
92
+
93
+ Predicting the next feature constitutes a regression task, for which we employ Smooth L1 loss (see Figure 5 EAGLE):
94
+
95
+ $$
96
+ L _ {r e g} = \text {S m o o t h L 1} \left(f _ {i + 1}, \text {D r a f t} \text {M o d e l} \left(T _ {2: i + 1}, F _ {1: i}\right)\right).
97
+ $$
98
+
99
+ Predicting features is an intermediary objective of the draft model, with the ultimate goal being the prediction of tokens to generate a sequence of tokens. Consequently, we also employ classification loss to directly optimize towards this final objective:
100
+
101
+ $$
102
+ p _ {i + 2} = \operatorname {S o f t m a x} (\operatorname {L M} _ {-} \operatorname {H e a d} (f _ {i + 1})),
103
+ $$
104
+
105
+ $$
106
+ \hat {p} _ {i + 2} = \operatorname {S o f t m a x} (\operatorname {L M} _ {-} \operatorname {H e a d} (\hat {f} _ {i + 1})),
107
+ $$
108
+
109
+ $$
110
+ L _ {c l s} = \operatorname {C r o s s} \text {E n t r o p y} \left(p _ {i + 2}, \hat {p} _ {i + 2}\right).
111
+ $$
112
+
113
+ By integrating regression loss and classification loss, we train the Autoregression Head using the combined loss function $L = L_{reg} + w_{cls}L_{cls}$ . Typically, the classification loss
114
+
115
+ is an order of magnitude larger than the regression loss in numerical terms. Consequently, we set $w_{cls}$ to 0.1.
116
+
117
+ EAGLE's Autoregression Head is ideally trained with autoregressively generated text from the target LLM, yet this approach is costly. Fortunately, EAGLE exhibits low sensitivity to training data (ablation study in Section 4.3.3). Instead of employing text generated by the target LLM, we utilize a fixed dataset, substantially reducing the overhead. During the drafting phase, EAGLE autoregressively processes features. Inaccuracies in features can lead to error accumulation. To mitigate this issue, we employ data augmentation by adding random noise sampled from a uniform distribution $\mathcal{U}(-0.1,0.1)$ to features of the target LLM during training (Jain et al., 2023).
118
+
119
+ # 3.3. Verification phase
120
+
121
+ Employing tree attention, the target LLM computes the probability of each token in the tree-structured draft through a single forward pass. At every node of the draft tree, we recursively apply speculative sampling algorithms to sample or adjust the distribution (details in Appendix A.2), consistent with SpecInfer (Miao et al., 2023), ensuring that the distribution of the output text aligns with that of the target LLM. Concurrently, we document accepted tokens and their features for use in the next drafting phase.
122
+
123
+ # 4. Experiments
124
+
125
+ Models and tasks. We conducted experiments on Vicuna models (7B, 13B, 33B), LLaMA2-chat models (7B, 13B, 70B), and Mixtral 8x7B Instruct, encompassing the common sizes of current mainstream LLMs. We evaluated EAGLE across multiple tasks including multi-turn dialogue, code generation, mathematical reasoning, and instruction following, employing the MT-bench (Zheng et al., 2023), HumanEval (Chen et al., 2021), GSM8K (Cobbe et al., 2021), and Alpaca (Taori et al., 2023) datasets, respectively. Speculative sampling (Leviathan et al., 2023) conducted experiments with a batch size of 1, a setting subsequently adopted by other works such as DistillSpec (Zhou et al., 2023) and BiLD (Kim et al., 2023). Similarly, the majority of our experiments also adopted this setting. Experiments with a batch size greater than 1 are presented in Section 4.4.
126
+
127
+ Metrics. Like other speculative sampling-based methods, EAGLE primarily focuses on latency rather than throughput. We assess acceleration effects using the following metrics:
128
+
129
+ - Walltime speedup ratio: The actual test speedup ratio relative to vanilla autoregressive decoding.
130
+ - Average acceptance length $\tau$ : The average number of tokens accepted per forward pass of the target LLM.
131
+
132
+ - Acceptance rate $\alpha$ : The ratio of accepted to generated tokens during drafting, gauges draft accuracy. It's less applicable for tree drafts due to multiple tokens sampled per location with only one accepted. Hence, when measuring this metric, we utilize chain drafts without tree attention, aligning with speculative sampling and DistillSpec. EAGLE's draft model inputs feature and token sequences. Autoregressive feature processing can propagate errors, so we measure the acceptance rate as $n - \alpha$ , considering $n$ features predicted by the draft model, potentially with inaccuracies.
133
+
134
+ Acceleration of EAGLE theoretically guarantees the preservation of the target LLMs' output distribution. Consequently, evaluating the quality of EAGLE's generated results is both unnecessary and meaningless.
135
+
136
+ Training. We fixed the target LLMs. EAGLE was trained on the ShareGPT dataset, utilizing 68,000 dialogue iterations with a learning rate set at 3e-5. We employed the AdamW optimizer with beta values $(\beta_{1},\beta_{2})$ set to (0.9, 0.95) and implemented gradient clipping of 0.5. The trainable parameters of EAGLE corresponding to the 7B, 13B, 33B, and 70B models are 0.24B, 0.37B, 0.56B, and 0.99B, respectively. The trainable parameters of EAGLE for MoE model Mixtral 8x7B is 0.28B. EAGLE is characterized by its low training cost; the Autoregression Head is trainable within 1-2 days on an A100 40G server for the 70B models.
137
+
138
+ # 4.1. Effectiveness
139
+
140
+ Figures 1 and 2, along with Table 1, display the speedup ratios of EAGLE. EAGLE demonstrates better acceleration at temperature $= 0$ compared to temperature $= 1$ . For instance, for LLaMA2-Chat 13B at temperature $= 0$ , the speedup ratios range from $3.01x$ to $3.76x$ , while at temperature $= 1$ , they range from $2.66x$ to $2.89x$ . In code generation tasks (HumanEval), EAGLE achieves its best acceleration performance. This is attributed to the prevalence of fixed templates in code, making it easier to generate drafts for these templates. Compared to recently introduced speculative sampling-based methods, Lookahead and Medusa, EAGLE is faster by $1.70x - 2.08x$ and $1.47x - 1.60x$ , respectively. Employing speculative sampling in the Vicuna and LLaMA2-Chat series is challenging. For the 7B model, there is no suitable draft model. For other sizes, using the 7B model as the draft model, we iterated through draft lengths from 2 to 10 and reported the highest speedup ratio. For the 13B model, we observed no improvement in speed. For the 33B and 70B models, the speedup ratios were $1.12x$ and $1.88x$ , respectively. For DistillSpec, to ensure fairness, we used the same training data as EAGLE. Additionally, the divergence function employed follows the FKL as detailed in Appendix A.1 of the DistillSpec paper. While distillation slightly improved the speedup ratio, the limited enhancement is
141
+
142
+ Table 1: Speedup ratio and average acceptance length $\tau$ on HumanEval, GSM8K, and Alpaca. T denotes temperature, V represents Vicuna, and LC stands for LLaMA2-Chat.
143
+
144
+ <table><tr><td colspan="2"></td><td colspan="2">HumanEval</td><td colspan="2">GSM8K</td><td colspan="2">Alpaca</td></tr><tr><td></td><td>Model</td><td>Speedup</td><td>τ</td><td>Speedup</td><td>τ</td><td>Speedup</td><td>τ</td></tr><tr><td rowspan="6">T=0</td><td>V 7B</td><td>3.33x</td><td>4.29</td><td>3.01x</td><td>4.00</td><td>2.79x</td><td>3.86</td></tr><tr><td>V13B</td><td>3.58x</td><td>4.39</td><td>3.08x</td><td>3.97</td><td>3.03x</td><td>3.95</td></tr><tr><td>V 33B</td><td>3.67x</td><td>4.28</td><td>3.25x</td><td>3.94</td><td>2.97x</td><td>3.61</td></tr><tr><td>LC 7B</td><td>3.17x</td><td>4.24</td><td>2.91x</td><td>3.82</td><td>2.78x</td><td>3.71</td></tr><tr><td>LC 13B</td><td>3.76x</td><td>4.52</td><td>3.20x</td><td>4.03</td><td>3.01x</td><td>3.83</td></tr><tr><td>LC 70B</td><td>3.52x</td><td>4.42</td><td>3.03x</td><td>3.93</td><td>2.97x</td><td>3.77</td></tr><tr><td rowspan="6">T=1</td><td>V 7B</td><td>2.39x</td><td>3.43</td><td>2.34x</td><td>3.29</td><td>2.21x</td><td>3.30</td></tr><tr><td>V13B</td><td>2.65x</td><td>3.63</td><td>2.57x</td><td>3.60</td><td>2.45x</td><td>3.57</td></tr><tr><td>V 33B</td><td>2.76x</td><td>3.62</td><td>2.77x</td><td>3.60</td><td>2.52x</td><td>3.32</td></tr><tr><td>LC 7B</td><td>2.61x</td><td>3.79</td><td>2.40x</td><td>3.52</td><td>2.29x</td><td>3.33</td></tr><tr><td>LC 13B</td><td>2.89x</td><td>3.78</td><td>2.82x</td><td>3.67</td><td>2.66x</td><td>3.55</td></tr><tr><td>LC 70B</td><td>2.92x</td><td>3.76</td><td>2.74x</td><td>3.58</td><td>2.65x</td><td>3.47</td></tr></table>
145
+
146
+ Table 2: Average acceptance length $\tau$ and acceptance rate $\alpha$ on MT-bench. T denotes temperature.
147
+
148
+ <table><tr><td></td><td>Model</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td rowspan="6">T=0</td><td>Vicuna 7B</td><td>3.94</td><td>0.79</td><td>0.74</td><td>0.72</td><td>0.73</td><td>0.67</td></tr><tr><td>Vicuna 13B</td><td>3.98</td><td>0.79</td><td>0.74</td><td>0.72</td><td>0.74</td><td>0.70</td></tr><tr><td>Vicuna 33B</td><td>3.68</td><td>0.74</td><td>0.69</td><td>0.67</td><td>0.67</td><td>0.66</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.62</td><td>0.76</td><td>0.69</td><td>0.67</td><td>0.68</td><td>0.68</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.90</td><td>0.77</td><td>0.69</td><td>0.69</td><td>0.70</td><td>0.71</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.81</td><td>0.75</td><td>0.69</td><td>0.65</td><td>0.64</td><td>0.64</td></tr><tr><td rowspan="6">T=1</td><td>Vicuna 7B</td><td>3.17</td><td>0.71</td><td>0.68</td><td>0.66</td><td>0.66</td><td>0.65</td></tr><tr><td>Vicuna 13B</td><td>3.20</td><td>0.73</td><td>0.68</td><td>0.68</td><td>0.67</td><td>0.69</td></tr><tr><td>Vicuna 33B</td><td>3.22</td><td>0.71</td><td>0.67</td><td>0.64</td><td>0.64</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.30</td><td>0.71</td><td>0.66</td><td>0.66</td><td>0.66</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.45</td><td>0.73</td><td>0.69</td><td>0.66</td><td>0.67</td><td>0.67</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.46</td><td>0.73</td><td>0.67</td><td>0.64</td><td>0.66</td><td>0.65</td></tr></table>
149
+
150
+ because distillation aims to increase the draft model's acceptance rate, while the bottleneck for speculative sampling performance lies in the high overhead of the draft model.
151
+
152
+ Tables 1 and 2 indicate that in EAGLE, the target LLM generates 3.2-4.5 tokens per forward pass, surpassing vanilla decoding which produces only one token per forward pass, thereby significantly increasing generation speed. As shown in Figure 2 and Appendix B, the acceptance rate for completely accurate feature sequences, $0 - \alpha$ , significantly exceeds that for sequences with a single erroneous feature, $1 - \alpha$ , indicating the impact of feature errors on draft model performance. Yet, the slight variation between $1 - \alpha$ to $4 - \alpha$ underscores EAGLE's robustness to feature errors and its adept handling of error accumulation.
153
+
154
+ Table 3 reveals that EAGLE achieved a $1.5\mathrm{x}$ speedup with the Mixtral 8x7B Instruct model. This modest acceleration, compared to models like LLaMA, is due to a shorter average acceptance length and the complexity of accelerating MoE models via speculative sampling. MoE models typically require reading the weights of only two experts per token
155
+
156
+ Table 3: Speedup ratio, average acceptance length $\tau$ , and acceptance rate $\alpha$ on MT-bench at temperature=0. The target LLM is Mixtral 8x7B Instruct-v0.1.
157
+
158
+ <table><tr><td>Speedup</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td>1.50x</td><td>3.25</td><td>0.67</td><td>0.62</td><td>0.61</td><td>0.64</td><td>0.63</td></tr></table>
159
+
160
+ Table 4: Generation speed of EAGLE combined with gpt-fast, evaluated on MT-bench with LLaMA2-Chat 7B at temperature=0.
161
+
162
+ <table><tr><td>Precision</td><td>FP16</td><td>int4</td></tr><tr><td>Vanilla (Huggingface)</td><td>24.5 tokens/s</td><td>N/A</td></tr><tr><td>gpt-fast</td><td>55.1 tokens/s</td><td>106.9 tokens/s</td></tr><tr><td>EAGLE + gpt-fast</td><td>100.2 tokens/s</td><td>160.4 tokens/s</td></tr></table>
163
+
164
+ during vanilla autoregressive decoding. However, during the verification phase of speculative sampling, processing multiple tokens may necessitate accessing the weights of more than two experts, contrasting with dense decoder-only models where all weights are read regardless of the number of tokens forwarded.
165
+
166
+ # 4.2. Case study: EAGLE + gpt-fast
167
+
168
+ EAGLE is compatible with other acceleration technologies. We conducted experiments combining EAGLE with gpt-fast, which employs quantization and compilation for acceleration. As shown in Figure 4, by integrating EAGLE with gpt-fast, we increased the generation speed of LLaMA2-Chat 7B on a single RTX 3090 to 160.4 tokens/s.
169
+
170
+ # 4.3. Ablation study
171
+
172
+ # 4.3.1. TREE ATTENTION
173
+
174
+ EAGLE, similar to SpecInfer and Medusa, employs tree attention, where both the generation and validation of drafts are tree-structured. In contrast, methods like speculative sampling do not use tree attention, resulting in chain-structured draft generation and validation. Table 5 and Figure 7 present comparative results indicating the impact of using tree attention. The implementation of tree draft and verification in EAGLE results in an approximate increase of 0.6-0.8 in the average acceptance length and about 0.3-0.5 in the speedup ratio. Compared to chain draft and verification, tree draft and verification do not increase the number of forward passes in the model (both the target LLM and the draft model), but they do increase the number of tokens processed per forward pass. Consequently, the improvement in the speedup ratio is less pronounced than the increase in average acceptance length. Notably, even without employing tree draft and verification, EAGLE demonstrates a
175
+
176
+ ![](images/5d4c63f4be47b83425a087cfda781e257b4ff5b5a465f1b61bec9e3d7e5556fe.jpg)
177
+ Figure 7: Speedup ratios of EAGLE with and without the use of tree attention. The evaluation dataset is MT-bench, with the temperature parameter set to 0.
178
+
179
+ Table 5: Average acceptance length $\tau$ of EAGLE with and without the use of tree attention. The evaluation dataset is MT-bench, with the temperature parameter set to 0.
180
+
181
+ <table><tr><td colspan="3">Vicuna</td><td colspan="3">LLaMA2-Chat</td></tr><tr><td>Size</td><td>Chain</td><td>Tree</td><td>Size</td><td>Chain</td><td>Tree</td></tr><tr><td>7B</td><td>3.20</td><td>3.94 (+0.74)</td><td>7B</td><td>3.00</td><td>3.62 (+0.62)</td></tr><tr><td>13B</td><td>3.23</td><td>3.98 (+0.75)</td><td>13B</td><td>3.18</td><td>3.90 (+0.68)</td></tr><tr><td>33B</td><td>2.97</td><td>3.68 (+0.71)</td><td>70B</td><td>3.12</td><td>3.81 (+0.69)</td></tr></table>
182
+
183
+ significant acceleration effect, approximately in the range of $2.3\mathrm{x} - 2.7\mathrm{x}$
184
+
185
+ # 4.3.2. INPUTS OF DRAFT MODELS
186
+
187
+ Compared to other speculative sampling-based methods, the key innovation of EAGLE lies in its utilization of features computed by the target LLM and the incorporation of sampling outcomes into the input of the draft model to address randomness. We conducted an ablation study on Vicuna 7B, assessing draft models with varying inputs. We tested four types of inputs: feature&shifted-token (EAGLE), feature&unshifted-token, token, and feature. Both feature&shifted-token (EAGLE) and feature&unshifted-token integrate semantic information at different levels. The distinction lies in the fact that feature&shifted-token (EAGLE) inputs tokens advanced by one time step, equipping it to address randomness effectively. Apart from the use of a FC layer to reduce dimensionality for the feature&token input, the structure of the draft model remains entirely consistent. Figure 8 presents the experimental outcomes on the MT-bench with Vicuna 7B as the target LLM. Three observations can be drawn.
188
+
189
+ - First, when the number of parameters of the draft model is limited, utilizing features yields slightly better results than tokens.
190
+ Second, merging features and tokens modestly boosts
191
+
192
+ Table 6: The speedup ratios and average acceptance length $\tau$ using different training datasets evaluated on the MT-bench, with the target LLM being LLaMA2-Chat 7B and the temperature set to 0. "Fixed dataset" refers to both questions and answers originating from the ShareGPT dataset. "Data generated by target LLM" denotes that while questions are sourced from the ShareGPT dataset, the answers are generated by the target LLM.
193
+
194
+ <table><tr><td>Training data</td><td>Speedup</td><td>τ</td></tr><tr><td>Fixed dataset</td><td>2.78x</td><td>3.62</td></tr><tr><td>Data generated by target LLM</td><td>2.88x</td><td>3.75</td></tr></table>
195
+
196
+ performance, mainly as discrete, error-free tokens mitigate feature error accumulation, evident from the similar $0 - \alpha$ of feature&unshifted-token and feature-only draft models, with a significantly improved $1 - \alpha$ .
197
+
198
+ - Third, addressing the randomness inherent in the sampling process results in the most significant improvement. The feature&shifted-token scheme, compared to feature&unshifted-token, adds no complexity yet markedly enhances the draft model's capability by simply advancing the token by one time step, allowing the draft model to account for the randomness in sampling.
199
+
200
+ # 4.3.3. TRAINING DATA
201
+
202
+ EAGLE uses a fixed dataset for training, avoiding increased overhead from using the target LLM for generating training data. Ablation study (see Table 6) shows that data from the target LLM marginally improves performance, indicating EAGLE's low sensitivity to training data and justifying the fixed dataset approach for cost reduction.
203
+
204
+ # 4.4. Batch size and throughput
205
+
206
+ Inference in LLMs is memory-bound (Patterson, 2004; Shazeer, 2019), leaving GPU computational resources underutilized. The principle behind the speculative sampling-based approach in enhancing generation speed lies in more effectively utilizing GPU computational resources. As the batch size increases, the available computational capacity of the GPU decreases, leading to a reduction in the acceleration effect. In this section, we present experimental results for scenarios where the batch size exceeds 1. As demonstrated in Table 7, the speedup ratio diminishes with increasing batch size. When using Vicuna 7B as the target LLM, the speedup ratio at $\text{bs} = 4$ is higher than at $\text{bs} = 3$ . This is attributed to the fact that, during the verification phase of EAGLE, the target LLM processes multiple tokens in a single forward pass, and the processing at $\text{bs} = 4$ is faster than at $\text{bs} = 3$ . In contrast, with vanilla autoregressive decoding
207
+
208
+ ![](images/55a41ed71359199c53e0d468c8aed79f664ec69f8689dd20198a4adfc91521f7.jpg)
209
+
210
+ ![](images/69757b7b2973bcb2d762f9aa9a8b842ca1f8a470831d1b6adbd973ec79c334b3.jpg)
211
+
212
+ ![](images/13fd560b911fd633a67c61c9f19e7a0528f3fc5ae5407dad4269155d8c20d8c2.jpg)
213
+
214
+ ![](images/763085b22612ab09946e7df47519b42bdd46782f3f5bacd37032b6510b88d84e.jpg)
215
+
216
+ ![](images/291d397fa5142b07b0aeef4117a23b56a8f6f5f3269fcb1bb1e6269c030d2e31.jpg)
217
+
218
+ ![](images/7fda2c23cf9d61b370d1aa061603853d368c81bb57c866c0c4de36117c2d5994.jpg)
219
+ Figure 8: Performance of draft models with varying inputs. The target LLM is Vicuna 7B, and the test dataset is MT-bench. Speed refers to the walltime speedup ratio, $\tau$ denotes the average acceptance length, $0 - \alpha$ represents the acceptance rate with entirely precise inputs, $1 - \alpha$ indicates the acceptance rate when the input includes one imprecise feature, and $T$ refers to the temperature.
220
+
221
+ ![](images/ae1e1e5dcad1eee3d790c36ec03d9453e9cc9f267131097d1997da21f8577bec.jpg)
222
+
223
+ ![](images/27f526eafcd945e355661ce2d62bfd6a77652b5a637e57414ba2e0defffe4422.jpg)
224
+
225
+ ![](images/c04015b102ff3c2e2c1eb0324538f148f41dec99116c0b5c3a922ef3180881f5.jpg)
226
+
227
+ where the target LLM processes one token per forward pass, the speeds at $\mathrm{bs} = 3$ and $\mathrm{bs} = 4$ are nearly identical.
228
+
229
+ Although speculative sampling-based methods predominantly focus on latency, we also investigated EAGLE's throughput for batch size $>1$ , another key metric for LLM systems. Compared to vanilla autoregressive decoding, EAGLE requires slightly more CUDA memory. For Vicuna 7B as the target LLM, operating under a memory constraint of a single RTX 3090 with 24G of CUDA memory, the maximum batch size (bs) for vanilla autoregressive decoding and EAGLE are 8 and 7, respectively. In the case of LLaMA2-Chat 70B, constrained by 4 A100 (40G) GPUs totaling 160G of CUDA memory, the maximum bs for vanilla autoregressive decoding and EAGLE are 5 and 4, respectively. All evaluations were conducted at FP16 precision. We calculated the throughput for different bs and selected the maximum value. Both vanilla autoregressive decoding and EAGLE achieve maximum throughput at their respective maximum bs. Tree attention consumes more computational resources. At $\mathrm{bs} = 7$ , the computational resources are less abundant, making the non-use of tree attention more advantageous. As illustrated in Table 7, EAGLE achieves a $2\mathrm{x}$ increase in throughput.
230
+
231
+ Table 7: Speedup ratios at different batch sizes and throughput of EAGLE. The evaluation dataset is MT-bench, with the temperature parameter set to 0.
232
+
233
+ <table><tr><td>Batch size</td><td>1</td><td>2</td><td>3</td><td>4</td><td>Throughput</td></tr><tr><td>Vicuna 7B</td><td>2.90x</td><td>2.87x</td><td>2.65x</td><td>2.76x</td><td>1.97x</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.01x</td><td>2.81x</td><td>2.50x</td><td>2.40x</td><td>1.99x</td></tr></table>
234
+
235
+ # 5. Related Work
236
+
237
+ There has been considerable research into accelerating language models, involving techniques such as distillation (Hinton et al., 2015), quantization (Hubara et al., 2018; Shen et al., 2020; Kim et al., 2021; Zadeh et al., 2020; Zafrir et al., 2019), pruning (Gale et al., 2019; Sanh et al., 2020; Kurtic et al., 2022; Voita et al., 2019), and innovative network architecture designs (Gu & Dao, 2023; Wu et al., 2020). These methods aim to reduce the latency per forward pass.
238
+
239
+ Similar to our approach are frameworks based on speculative sampling. Early works (Stern et al., 2018; Sun et al., 2021) accelerated greedy decoding, while speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) extended it to non-greedy sampling, provably maintaining the original output distribution. Ensuring unchanged output distribution makes acceleration more challenging; many studies have explored lossy acceleration as a trade-off. For instance,
240
+
241
+ DistillSpec (Zhou et al., 2023) modifies acceptance probabilities using a lenience function, BiLD (Kim et al., 2023) accepts drafts if the distance metric from the target LLM distribution is below a certain threshold, and Medusa (Cai et al., 2023) uses a minimum of a hard threshold and an entropy-dependent threshold for truncation. In contrast, EAGLE does not employ any relaxations and maintains the output distribution of the LLM unchanged.
242
+
243
+ The primary differences among speculative sampling-based methods manifest predominantly in the drafting phase. Speculative sampling (Leviathan et al., 2023; Chen et al., 2023a) utilizes a lower-parameter version of the target LLM as the draft model. Self-Speculative Decoding (Zhang et al., 2023) skips some layers of the target LLM during draft generation. SpecInfer (Miao et al., 2023) employs a set of small models to generate drafts in parallel. Cascade Speculative Drafting (Chen et al., 2023b) and Staged Speculative Decoding (Spector & Re, 2023) cascade different overhead draft models. Online Speculative Decoding (Liu et al., 2023) trains the draft model on a distribution of queries. Methods (Hooper et al., 2023; Fu et al., 2023; Yang et al., 2023b) such as Medusa (Cai et al., 2023) do not employ a separate target LLM; instead, they generate drafts by utilizing features or weights from the target LLM. REST (He et al., 2023) generates drafts based on retrieval methods. LLMA (Yang et al., 2023a), used for tasks like grammatical correction where input and output overlap, retrieves drafts directly from the input.
244
+
245
+ # 6. Conclusion
246
+
247
+ In this paper, we introduce EAGLE, an efficient framework for speculative sampling. EAGLE conducts the drafting process autoregressively at the more structured (second-to-top-layer) feature level and mitigates sampling uncertainty in predicting the next feature by incorporating tokens from one time step ahead. EAGLE is guaranteed to preserve the output distribution of the LLM while significantly enhancing generation speed. On MT-bench, EAGLE is $2.1\mathrm{x} - 3.8\mathrm{x}$ faster than vanilla autoregressive decoding, $1.7\mathrm{x} - 2.1\mathrm{x}$ faster than Lookahead, and $1.5\mathrm{x} - 1.6\mathrm{x}$ faster than Medusa.
248
+
249
+ Acknowledgements. We acknowledge useful discussions with the Medusa's team leader Tianle Cai, the Lookahead's team leader Hao Zhang, the SpecTr's team leader Ziteng Sun, interactions with the gpt-fast team leaders Horace He and Soumith Chintala on X, and Yihan Wu.
250
+
251
+ # References
252
+
253
+ Cai, T., Li, Y., Geng, Z., Peng, H., and Dao, T. Medusa: Simple framework for accelerating LLM generation with multiple decoding heads. https://github.com/FasterDecoding/Medusa, 2023.
254
+
255
+ Chen, C., Borgeaud, S., Irving, G., Lespiau, J.-B., Sifre, L., and Jumper, J. Accelerating large language model decoding with speculative sampling. arXiv preprint arXiv:2302.01318, 2023a.
256
+ Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. d. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021.
257
+ Chen, Z., Yang, X., Lin, J., Sun, C., Huang, J., and Chang, K. C.-C. Cascade speculative drafting for even faster LLM inference. arXiv preprint arXiv:2312.11462, 2023b.
258
+ Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
259
+ Fu, Y., Bailis, P., Stoica, I., and Zhang, H. Breaking the sequential dependency of LLM inference using lookahead decoding, November 2023. URL https://lmsys.org/blog/2023-11-21-lookahead-decoding/.
260
+ Gale, T., Olsen, E., and Hooker, S. The state of sparsity in deep neural networks.(2019). arXiv preprint cs.LG/1902.09574, 2019.
261
+ Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023.
262
+ He, Z., Zhong, Z., Cai, T., Lee, J. D., and He, D. Rest: Retrieval-based speculative decoding. arXiv preprint arXiv:2311.08252, 2023.
263
+ Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015.
264
+ Hooper, C., Kim, S., Mohammadzadeh, H., Genc, H., Keutzer, K., Gholami, A., and Shao, S. Speed: Speculative pipelined execution for efficient decoding. arXiv preprint arXiv:2310.12072, 2023.
265
+ Hubara, I., Courbariaux, M., Soudry, D., El-Yaniv, R., and Bengio, Y. Quantized neural networks: Training neural networks with low precision weights and activations. journal of machine learning research, 18(187):1-30, 2018.
266
+ Jain, N., Chiang, P.-y., Wen, Y., Kirchenbauer, J., Chu, H.-M., Somepalli, G., Bartoldson, B. R., Kailkhura, B., Schwarzschild, A., Saha, A., et al. NEFTune: Noisy embeddings improve instruction finetuning. arXiv preprint arXiv:2310.05914, 2023.
267
+
268
+ Kim, S., Gholami, A., Yao, Z., Mahoney, M. W., and Keutzer, K. I-bert: Integer-only bert quantization. In International conference on machine learning, pp. 5506-5518. PMLR, 2021.
269
+ Kim, S., Mangalam, K., Moon, S., Malik, J., Mahoney, M. W., Gholami, A., and Keutzer, K. Speculative decoding with big little decoder. In Thirty-seventh Conference on Neural Information Processing Systems, 2023.
270
+ Kurtic, E., Campos, D., Nguyen, T., Frantar, E., Kurtz, M., Fineran, B., Goin, M., and Alistarh, D. The optimal bert surgeon: Scalable and accurate second-order pruning for large language models. arXiv preprint arXiv:2203.07259, 2022.
271
+ Leviathan, Y., Kalman, M., and Matias, Y. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pp. 19274-19286. PMLR, 2023.
272
+ Liu, X., Hu, L., Bailis, P., Stoica, I., Deng, Z., Cheung, A., and Zhang, H. Online speculative decoding. arXiv preprint arXiv:2310.07177, 2023.
273
+ Miao, X., Oliaro, G., Zhang, Z., Cheng, X., Wang, Z., Wong, R. Y. Y., Chen, Z., Arfeen, D., Abhyankar, R., and Jia, Z. SpecInfer: Accelerating generative LLM serving with speculative inference and token tree verification. arXiv preprint arXiv:2305.09781, 2023.
274
+ Patterson, D. A. Latency lags bandwidth. Communications of the ACM, 47(10):71-75, 2004.
275
+ PyTorch Labs. gpt-fast. https://github.com/pytorch-labs/gpt-fast/, 2023.
276
+ Sanh, V., Wolf, T., and Rush, A. Movement pruning: Adaptive sparsity by fine-tuning. Advances in Neural Information Processing Systems, 33:20378-20389, 2020.
277
+ Santilli, A., Severino, S., Postolache, E., Maiorca, V., Mancusi, M., Marin, R., and Rodola, E. Accelerating transformer inference for translation via parallel decoding. In Rogers, A., Boyd-Graber, J., and Okazaki, N. (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 12336-12355, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.689. URL https://aclanthology.org/2023.acl-long.689.
278
+ Shazeer, N. Fast transformer decoding: One write-head is all you need. arXiv preprint arXiv:1911.02150, 2019.
279
+ Shen, S., Dong, Z., Ye, J., Ma, L., Yao, Z., Gholami, A., Mahoney, M. W., and Keutzer, K. Q-bert: Hessian based ultra low precision quantization of bert. In Proceedings
280
+
281
+ of the AAAI Conference on Artificial Intelligence, volume 34, pp. 8815-8821, 2020.
282
+ Spector, B. and Re, C. Accelerating LLM inference with staged speculative decoding. arXiv preprint arXiv:2308.04623, 2023.
283
+ Stern, M., Shazeer, N., and Uszkoreit, J. Blockwise parallel decoding for deep autoregressive models. Advances in Neural Information Processing Systems, 31, 2018.
284
+ Sun, X., Ge, T., Wei, F., and Wang, H. Instantaneous grammatical error correction with shallow aggressive decoding. arXiv preprint arXiv:2106.04970, 2021.
285
+ Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., and Hashimoto, T. B. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.
286
+ Touvron, H., Martin, L., Stone, K., Albert, P., Almahairi, A., Babaei, Y., Bashlykov, N., Batra, S., Bhargava, P., Bhosale, S., et al. LIAMA 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288, 2023.
287
+ Voita, E., Talbot, D., Moiseev, F., Sennrich, R., and Titov, I. Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned. arXiv preprint arXiv:1905.09418, 2019.
288
+ Wu, Z., Liu, Z., Lin, J., Lin, Y., and Han, S. Lite transformer with long-short range attention. arXiv preprint arXiv:2004.11886, 2020.
289
+ Xia, H., Ge, T., Wang, P., Chen, S.-Q., Wei, F., and Sui, Z. Speculative decoding: Exploiting speculative execution for accelerating seq2seq generation. In Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3909-3925, 2023.
290
+ Yang, N., Ge, T., Wang, L., Jiao, B., Jiang, D., Yang, L., Majumder, R., and Wei, F. Inference with reference: Lossless acceleration of large language models. arXiv preprint arXiv:2304.04487, 2023a.
291
+ Yang, S., Lee, G., Cho, J., Papailiopoulos, D., and Lee, K. Predictive pipelined decoding: A comput-. latency trade-off for exact ltm decoding. arXiv preprint arXiv:2307.05908, 2023b.
292
+ Zadeh, A. H., Edo, I., Awad, O. M., and Moshovos, A. Gobo: Quantizing attention-based nlp models for low latency and energy efficient inference. In 2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO), pp. 811-824. IEEE, 2020.
293
+
294
+ Zafrir, O., Boudoukh, G., Izsak, P., and Wasserblat, M. Q8bert: Quantized 8bit bert. In 2019 Fifth Workshop on Energy Efficient Machine Learning and Cognitive Computing-NeurIPS Edition (EMC2-NIPS), pp. 36-39. IEEE, 2019.
295
+ Zhang, J., Wang, J., Li, H., Shou, L., Chen, K., Chen, G., and Mehrotra, S. Draft & verify: Lossless large language model acceleration via self-speculative decoding. arXiv preprint arXiv:2309.08168, 2023.
296
+ Zhang, P., Zeng, G., Wang, T., and Lu, W. TinyLlama: An open-source small language model. arXiv preprint arXiv:2401.02385, 2024.
297
+ Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E., et al. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685, 2023.
298
+ Zhou, Y., Lyu, K., Rawat, A. S., Menon, A. K., Rostamizadeh, A., Kumar, S., Kagy, J.-F., and Agarwal, R. DistillSpec: Improving speculative decoding via knowledge distillation. arXiv preprint arXiv:2310.08461, 2023.
299
+
300
+ # A. Implementation Details
301
+
302
+ # A.1. Tree Structure
303
+
304
+ Utilizing tree attention, EAGLE generates a tree-structured draft. The left side of Figure 9 illustrates the tree structure of the draft, while the right side depicts the corresponding chain-structured draft when tree attention is not used (as utilized in the ablation study detailed in Section 4.3.1). In a greedy setting, we select the top $k$ tokens with the highest probabilities as child nodes. In a non-greedy setting, we sample $k$ tokens. The number of child nodes, $k$ , can be inferred from Figure 9; for instance, $k = 4$ at the root node. Regardless of employing a tree-structured or chain-structured draft, the draft model undergoes 5 forward passes during the draft phase. During the verification phase, each token's probability is obtained through a single forward pass by the target LLM.
305
+
306
+ Why do we use such a tree structure? The choice of the tree structure, as depicted in Figure 9, was not rigorously optimized but rather based on intuition: branches of higher-probability tokens should be deeper and wider. For this paper, all models across all experiments utilized the draft structure shown in Figure 9. However, the optimal tree structure is likely context-dependent. For instance, as batch size increases and redundant computational resources decrease, a smaller tree might be preferable. Tuning the draft structure could potentially lead to improved performance.
307
+
308
+ ![](images/db562c2514d7e9c23c2097acad387afda67e36d5c4f4a8a35271bdf6236659a6.jpg)
309
+ Figure 9: Structure of EAGLE's draft. The left side shows the draft structure when tree attention is employed, while the right side depicts the draft structure without the use of tree attention.
310
+
311
+ # A.2. Multi-Round Speculative Sampling
312
+
313
+ Unlike the chain-structured draft of speculative sampling, EAGLE employs a tree-structured draft, necessitating modifications to the sampling algorithm. The sampling algorithm A of speculative sampling can be briefly described as: if a token is accepted, it returns that token; otherwise, it samples a token from the adjusted distribution. For a tree-structured draft with $k$ candidate tokens, Multi-round speculative sampling recursively invokes algorithm A. Instead of directly sampling from the adjusted distribution after rejecting a token, Multi-round speculative sampling calls A again. If all tokens are rejected, it then directly samples from the adjusted distribution. The pseudocode for Multi-round speculative sampling is provided in Algorithm 1.
314
+
315
+ # B. Detailed experimental results
316
+
317
+ Table 8 displays the speedup ratio, average acceptance length $\tau$ and acceptance rate $\alpha$ of EAGLE on HumanEval, GSM8K, and Alpaca datasets.
318
+
319
+ Algorithm 1 Multi-round speculative sampling
320
+
321
+ Input: Target distribution $p$ , samples $t_i$ and distributions $\hat{p}_i$ for each $i$ from 1 to $k$ , where $t_i$ is sampled from $\hat{p}_i$ ,
322
+
323
+ Output: a sample $x \sim p$ ;
324
+
325
+ $i\gets 1$
326
+
327
+ for $i\leq k$ do
328
+
329
+ $r\gets U(0,1)$
330
+
331
+ if $r < p(t_i) / \hat{p}_i(t_i)$ then
332
+
333
+ Return $t_i$
334
+
335
+ end if
336
+
337
+ $p\gets \text{norm}(\max(0,p(t_i) - \hat{p}_i(t_i)))$
338
+
339
+ $i\gets i + 1$
340
+
341
+ end for
342
+
343
+ Sample $t\sim p$
344
+
345
+ Return $t$
346
+
347
+ Table 8: Speedup ratio, average acceptance length $\tau$ and acceptance rate $\alpha$ on HumanEval, GSM8K, and Alpaca at temperature = 0.
348
+
349
+ <table><tr><td>Dataset</td><td>Model</td><td>Speedup</td><td>τ</td><td>0-α</td><td>1-α</td><td>2-α</td><td>3-α</td><td>4-α</td></tr><tr><td rowspan="6">HumanEval</td><td>Vicuna 7B</td><td>3.33x</td><td>4.29</td><td>0.82</td><td>0.77</td><td>0.72</td><td>0.69</td><td>0.71</td></tr><tr><td>Vicuna13B</td><td>3.58x</td><td>4.39</td><td>0.85</td><td>0.78</td><td>0.74</td><td>0.72</td><td>0.73</td></tr><tr><td>Vicuna 33B</td><td>3.67x</td><td>4.28</td><td>0.83</td><td>0.77</td><td>0.74</td><td>0.70</td><td>0.70</td></tr><tr><td>LLaMA2-Chat 7B</td><td>3.17x</td><td>4.24</td><td>0.81</td><td>0.76</td><td>0.73</td><td>0.74</td><td>0.72</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.76x</td><td>4.52</td><td>0.85</td><td>0.80</td><td>0.78</td><td>0.76</td><td>0.75</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.52x</td><td>4.42</td><td>0.84</td><td>0.79</td><td>0.75</td><td>0.73</td><td>0.74</td></tr><tr><td rowspan="6">GSM8K</td><td>Vicuna 7B</td><td>3.01x</td><td>4.00</td><td>0.79</td><td>0.71</td><td>0.70</td><td>0.71</td><td>0.70</td></tr><tr><td>Vicuna13B</td><td>3.08x</td><td>3.97</td><td>0.79</td><td>0.71</td><td>0.67</td><td>0.68</td><td>0.64</td></tr><tr><td>Vicuna 33B</td><td>3.25x</td><td>3.94</td><td>0.79</td><td>0.71</td><td>0.67</td><td>0.67</td><td>0.67</td></tr><tr><td>LLaMA2-Chat 7B</td><td>2.91x</td><td>3.82</td><td>0.75</td><td>0.69</td><td>0.64</td><td>0.65</td><td>0.63</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.20x</td><td>4.03</td><td>0.80</td><td>0.70</td><td>0.70</td><td>0.68</td><td>0.66</td></tr><tr><td>LLaMA2-Chat 70B</td><td>3.03x</td><td>3.93</td><td>0.77</td><td>0.71</td><td>0.66</td><td>0.64</td><td>0.60</td></tr><tr><td rowspan="6">Alpaca</td><td>Vicuna 7B</td><td>2.79x</td><td>3.86</td><td>0.74</td><td>0.68</td><td>0.66</td><td>0.66</td><td>0.67</td></tr><tr><td>Vicuna13B</td><td>3.03x</td><td>3.95</td><td>0.72</td><td>0.67</td><td>0.64</td><td>0.63</td><td>0.64</td></tr><tr><td>Vicuna 33B</td><td>2.97x</td><td>3.61</td><td>0.70</td><td>0.64</td><td>0.64</td><td>0.63</td><td>0.64</td></tr><tr><td>LLaMA2-Chat 7B</td><td>2.78x</td><td>3.71</td><td>0.73</td><td>0.66</td><td>0.62</td><td>0.64</td><td>0.62</td></tr><tr><td>LLaMA2-Chat 13B</td><td>3.01x</td><td>3.83</td><td>0.75</td><td>0.67</td><td>0.64</td><td>0.63</td><td>0.63</td></tr><tr><td>LLaMA2-Chat 70B</td><td>2.97x</td><td>3.77</td><td>0.76</td><td>0.68</td><td>0.65</td><td>0.61</td><td>0.62</td></tr></table>
2401.15xxx/2401.15077/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be267adae88a86cbfce09c88edf06684743d3945206943748e3941bbb9cb8710
3
+ size 662492
2401.15xxx/2401.15077/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_content_list.json ADDED
@@ -0,0 +1,1130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Scalable Qualitative Coding with LLMs: Chain-of-Thought Reasoning Matches Human Performance in Some Hermeneutic Tasks",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 140,
8
+ 101,
9
+ 856,
10
+ 174
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Zackary Okun Dunivin",
17
+ "bbox": [
18
+ 416,
19
+ 207,
20
+ 583,
21
+ 220
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Center for Complex Networks and Systems Research",
28
+ "bbox": [
29
+ 323,
30
+ 222,
31
+ 673,
32
+ 234
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Luddy School of Informatics, Computer Science, and Engineering",
39
+ "bbox": [
40
+ 281,
41
+ 236,
42
+ 714,
43
+ 250
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Department of Sociology",
50
+ "bbox": [
51
+ 416,
52
+ 250,
53
+ 581,
54
+ 263
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Indiana University",
61
+ "bbox": [
62
+ 437,
63
+ 263,
64
+ 558,
65
+ 276
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Bloomington, Indiana, US",
72
+ "bbox": [
73
+ 411,
74
+ 277,
75
+ 584,
76
+ 290
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "zdunivin@iu.edu",
83
+ "bbox": [
84
+ 437,
85
+ 292,
86
+ 560,
87
+ 303
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "ABSTRACT",
94
+ "text_level": 1,
95
+ "bbox": [
96
+ 449,
97
+ 354,
98
+ 547,
99
+ 369
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "Qualitative coding, or content analysis, extracts meaning from text to discern quantitative patterns across a corpus of texts. Recently, advances in the interpretive abilities of large language models (LLMs) offer potential for automating the coding process (applying category labels to texts), thereby enabling human researchers to concentrate on more creative research aspects, while delegating these interpretive tasks to AI. Our case study comprises a set of socio-historical codes on dense, paragraph-long passages representative of a humanistic study. We show that GPT-4 is capable of human-equivalent interpretations, whereas GPT-3.5 is not. Compared to our human-derived gold standard, GPT-4 delivers excellent intercoder reliability (Cohen's $\\kappa \\geq 0.79$ ) for 3 of 9 codes, and substantial reliability ( $\\kappa \\geq 0.6$ ) for 8 of 9 codes. In contrast, GPT-3.5 greatly underperforms for all codes ( $mean(\\kappa) = 0.34$ ; $max(\\kappa) = 0.55$ ). Importantly, we find that coding fidelity improves considerably when the LLM is prompted to give rationale justifying its coding decisions (chain-of-thought reasoning). We present these and other findings along with a set of best practices for adapting traditional codebooks for LLMs. Our results indicate that for certain codebooks, state-of-the-art LLMs are already adept at large-scale content analysis. Furthermore, they suggest the next generation of models will likely render AI coding a viable option for a majority of codebooks.",
106
+ "bbox": [
107
+ 169,
108
+ 376,
109
+ 826,
110
+ 584
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "1 Introduction",
117
+ "text_level": 1,
118
+ "bbox": [
119
+ 112,
120
+ 604,
121
+ 253,
122
+ 619
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "Text categorization, commonly referred to as content analysis and qualitative coding in the social sciences, plays an important role in scholarly research and industrial applications. This process traditionally relies on human expertise to interpret the nuanced and often complex meanings embedded in texts [1, 2]. The difficulty lies in the multifaceted nature of meaning and the challenge of fitting real-world complexity into discrete categories, even for skilled readers. Historically, these challenges have positioned text categorization as a task unsuitable for machine learning approaches [3], despite robust attempts [4, 5].",
129
+ "bbox": [
130
+ 109,
131
+ 636,
132
+ 883,
133
+ 720
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "Recent developments in artificial intelligence, notably the advent of transformers with billions of parameters known as large language models (LLMs), have begun to challenge this notion. These models demonstrate increasing capabilities in knowledge, interpretation, reasoning, and creativity expressed in natural language, approaching or even surpassing human performance [6-8]. The processing speed of artificial intelligence opens up the possibility of categorizing vast quantities of text, far exceeding the limitations of human coding teams restricted to smaller samples. Yet, this opportunity raises a critical question: how can we ensure and maintain the accuracy of machine categorization at a level comparable to human standards?",
140
+ "bbox": [
141
+ 109,
142
+ 726,
143
+ 883,
144
+ 824
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "This study provides the strongest evidence to date that machines are capable of human-quality interpretations of text for the purposes of qualitative coding. Additionally, our report serves as a practical guide to employing LLMs in text categorization and as a reference for those encountering machine-assisted qualitative coding in empirical research. We contribute to the growing body of work that builds confidence in the rigor of LLM-based text categorization [9-12], a",
151
+ "bbox": [
152
+ 109,
153
+ 829,
154
+ 883,
155
+ 887
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "aside_text",
161
+ "text": "arXiv:2401.15170v2 [cs.CL] 12 Feb 2024",
162
+ "bbox": [
163
+ 22,
164
+ 265,
165
+ 57,
166
+ 708
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "footer",
172
+ "text": "Code and data available at https://osf.io/k4fg9",
173
+ "bbox": [
174
+ 112,
175
+ 897,
176
+ 421,
177
+ 911
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "field that will expand as these models continue to evolve. Our report emphasizes the redesign of codebooks—comprising category descriptions and coding instructions—specifically for LLMs. We demonstrate how the structure of prompts, the specific requests made to the generative model for categorizing passages, significantly impacts coding fidelity. Even as these models continue to rapidly improve, we expect most of the principles of prompt design we report will remain useful and informative as methodologists explore new models and empiricists automate their coding workflows. Our results are presented through narratives detailing our approach and highlighting potential challenges and demonstrated by LLM-generated analyses to a human-derived gold standard. A summary of best practices for content analysis with an LLM is also presented in tabular format for quick reference.",
184
+ "bbox": [
185
+ 109,
186
+ 90,
187
+ 883,
188
+ 203
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "Key findings of our study include:",
195
+ "bbox": [
196
+ 112,
197
+ 208,
198
+ 339,
199
+ 223
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "list",
205
+ "sub_type": "text",
206
+ "list_items": [
207
+ "- GPT-4 exhibits human-equivalent performance with zero-shot prompts. 8 of 9 tasks exceed the 0.6 threshold for substantial agreement using Cohen's $\\kappa$ . 3 of 9 tasks exceed the 0.75 threshold for excellent agreement.",
208
+ "- GPT-3.5, when given the same prompts, has an average intercoder reliability of 0.34 across all codes.",
209
+ "Codebooks designed for human coders need reworking for LLM application, requiring iterative manual testing to refine phrasing and improve model comprehension.",
210
+ "- Agreement improves when the LLM provides rationale for code assignments: $\\mu (\\kappa) = 0.68$ vs. $\\mu (\\kappa) = 0.59$ .",
211
+ "- Agreement improves when presenting each code as a separate prompt, rather than the codebook as a whole: $\\mu (\\kappa) = 0.68$ vs. $\\mu (\\kappa) = 0.60$ ."
212
+ ],
213
+ "bbox": [
214
+ 156,
215
+ 241,
216
+ 880,
217
+ 407
218
+ ],
219
+ "page_idx": 1
220
+ },
221
+ {
222
+ "type": "text",
223
+ "text": "1.1 Automating Content Analysis: Past and Present",
224
+ "text_level": 1,
225
+ "bbox": [
226
+ 112,
227
+ 433,
228
+ 486,
229
+ 448
230
+ ],
231
+ "page_idx": 1
232
+ },
233
+ {
234
+ "type": "text",
235
+ "text": "Prior work on automating content analysis entailed training machine learning models on large quantities of text. Supervised models, typically some form of linear regression, learn to associate text features with user-specified categories [13]. This process captures half the traditional human-coded process by using human-derived codes and examples, but fails to leverage abstract code descriptions found in a codebook, as well as requiring large quantities of human-annotated data. Unsupervised models, such as LDA [14, 15] or BERTTopic [16], develop their own categorizations from unlabeled training sets. This process does not require time-intensive labeling, but rarely captures the specific categories that a researcher intends to target.",
236
+ "bbox": [
237
+ 109,
238
+ 462,
239
+ 883,
240
+ 559
241
+ ],
242
+ "page_idx": 1
243
+ },
244
+ {
245
+ "type": "text",
246
+ "text": "The latest generation of LLMs (e.g., GPT [17], LLaMa [18], Mistral [19], Claude [20]) differ notably from previous machine learning models in that they can perform new tasks specified through natural language prompts. A user can specify a task that the model was not trained on, give few (single digit) or no examples, and the model will return output conforming to the specifications. Demonstrated successes include computer code generation [21], creative writing [22], and quantitative reasoning [23]. We are only beginning to understand and expand upon the limitations of these models. By converting natural language requests into highly intelligent output across vast and indeterminate domains, LLMs lower the technical barriers to machine learning by making its application more naturalistic and eliminating the need for large training data. Beyond this, LLM's capacities in many domains far exceed the specialized machine learning models that preceded them, suggesting that for many applications, including scholarly inquiry, artificial intelligence is overwhelmingly more accessible and capable in 2024 than it was just two years prior.",
247
+ "bbox": [
248
+ 109,
249
+ 566,
250
+ 883,
251
+ 705
252
+ ],
253
+ "page_idx": 1
254
+ },
255
+ {
256
+ "type": "text",
257
+ "text": "Early studies of content analysis with LLMs are encouraging. Xiao et al. [9] demonstrate moderate success, Cohen's $\\kappa = 0.61$ and $\\kappa = 0.38$ , in two linguistic tasks using GPT-3. Chew et al. [10] report high success on many of 19 tasks across three datasets, and results that are indistinguishable from random for others. It is difficult to evaluate their results due to the choice of Gwet's AC1, which is biased toward agreement on negative codings rather than positive, whereas most standard measures of intercoder reliability do the opposite [24]. However, we commend Chew et al.'s approach to adapting codebooks for LLMs, which is communicated with great detail and clarity. A survey of 20 empirical pieces reports \"mixed-results\" of using GPT-3 to automate \"text annotation,\" a term that ties their framing to \"data annotation\", labeling data for in machine learning [25], rather than content analysis in the tradition of grounded theory [26].",
258
+ "bbox": [
259
+ 109,
260
+ 710,
261
+ 883,
262
+ 823
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "We present here three advances to these studies. 1) We report the first methodological account of automating qualitative coding using GPT-4, which, along with other recent models, greatly improves upon many of GPT-3's capabilities [19, 27, 28]. 2) We provide the first conclusive evidence that LLMs are capable of human-equivalent performance in qualitative coding, and do so on larger passages of text, wherein meaning is often woven through multiple interrelated clauses. 3) We demonstrate that GPT is better at interpreting text when it is tasked with justifying its coding decisions (chain-of-thought prompting) rather than applying codes without an accompanying explanation.",
269
+ "bbox": [
270
+ 109,
271
+ 827,
272
+ 883,
273
+ 912
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "header",
279
+ "text": "Scalable Content Analysis with LLMs",
280
+ "bbox": [
281
+ 112,
282
+ 42,
283
+ 369,
284
+ 56
285
+ ],
286
+ "page_idx": 1
287
+ },
288
+ {
289
+ "type": "page_number",
290
+ "text": "2",
291
+ "bbox": [
292
+ 493,
293
+ 935,
294
+ 504,
295
+ 946
296
+ ],
297
+ "page_idx": 1
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "1.2 Case Study: W.E.B. Du Bois's Characterization in News Media",
302
+ "text_level": 1,
303
+ "bbox": [
304
+ 112,
305
+ 90,
306
+ 591,
307
+ 106
308
+ ],
309
+ "page_idx": 2
310
+ },
311
+ {
312
+ "type": "text",
313
+ "text": "In order to present a realistic challenge of using an LLM to do qualitative coding, we make a case study of our own work. We adapted a codebook written by the authors to understand how the scholar and activist W.E.B. Du Bois has been characterized in news media over time. The codebook is composed of 9 codes in 3 categories. Due to multiple layers of agency (who is doing what) and voice (who is saying what), the tasks are difficult even for human interpreters. Applying the codes is also complicated because it can be difficult to differentiate Du Bois's scholarship from his political activism, as Du Bois's theoretical contributions have profound implications for understanding race and the social-historical position of Black persons in the United States and beyond, making them powerful activist tools. We are particularly interested in understanding how different facets of Du Bois's activities contributed to his canonization in the public imagination as the preeminent figure for understanding Black political struggle. Table 1 gives the codes in brief. Complete examples of the original human and modified-for-GPT codebook are included in the appendix.",
314
+ "bbox": [
315
+ 109,
316
+ 117,
317
+ 883,
318
+ 257
319
+ ],
320
+ "page_idx": 2
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "The training and test data for our study were random samples of passages from New York Times articles (1970-2023) that mention W.E.B. Du Bois. 232 passages were automatically extracted as concurrent paragraphs containing \"Du Bois\". The average number of words was 94 ( $\\sigma = 70$ ), and the average number of sentences was 3.75 ( $\\sigma = 2.88$ ). To give a better sense of the size of our passages, this paragraph has 76 words across 4 sentences.",
325
+ "bbox": [
326
+ 109,
327
+ 262,
328
+ 883,
329
+ 319
330
+ ],
331
+ "page_idx": 2
332
+ },
333
+ {
334
+ "type": "table",
335
+ "img_path": "images/975c5e109df9c382e27c74c28e1833601a738a4cb160bf7450664d8e70f87d08.jpg",
336
+ "table_caption": [
337
+ "Table 1: Categories and descriptions for 9 codes."
338
+ ],
339
+ "table_footnote": [],
340
+ "table_body": "<table><tr><td colspan=\"2\">Characterization of Du Bois</td></tr><tr><td>Scholar</td><td>Describes Du Bois as a scholar or intellectual.</td></tr><tr><td>Activist</td><td>Refers to Du Bois&#x27;s political or social activism.</td></tr><tr><td colspan=\"2\">General Themes</td></tr><tr><td>Monumental Memorialization</td><td>Refers to an enduring cultural object named after Du Bois.</td></tr><tr><td>Mention of Scholarly Work</td><td>Mentions or quotes specific academic works by Du Bois.</td></tr><tr><td>Social/Political Advocacy</td><td>Mentions or implies social or political activism, advocacy, or critique.</td></tr><tr><td colspan=\"2\">Canonization Processes</td></tr><tr><td>Coalition Building</td><td>Refers to Du Bois&#x27;s activities with activist or academic organizations.</td></tr><tr><td>Out of the Mouth of Academics</td><td>Describes an academic organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Out of the Mouth of Activists</td><td>Describes an activist organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Collective Synecdoche</td><td>Mentions Du Bois alongside other figures in order to represent some facet of a culture, era, or ideology.</td></tr></table>",
341
+ "bbox": [
342
+ 114,
343
+ 356,
344
+ 883,
345
+ 628
346
+ ],
347
+ "page_idx": 2
348
+ },
349
+ {
350
+ "type": "text",
351
+ "text": "2 Results",
352
+ "text_level": 1,
353
+ "bbox": [
354
+ 112,
355
+ 693,
356
+ 209,
357
+ 709
358
+ ],
359
+ "page_idx": 2
360
+ },
361
+ {
362
+ "type": "text",
363
+ "text": "2.1 Adapting a Codebook for an LLM",
364
+ "text_level": 1,
365
+ "bbox": [
366
+ 112,
367
+ 724,
368
+ 393,
369
+ 739
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "Initially, we developed a codebook for human coders using standard methods. This process involved exploratory reading to define and refine codes. Codes were derived to probe particular substantive hypotheses, some of which preceded our exploratory reading, and others which resulted from it. We then applied these codes iteratively, adjusting them as needed until we achieved high intercoder reliability with a test set. We adapted these code descriptions for use with a large language model, evaluating the LLM's performance on a training set of text passages. Where we found ambiguities or deficiencies in the model's interpretation, we refined the code descriptions accordingly. This iterative process of definition, evaluation, and refinement follows Nelson's Computational Grounded Theory paradigm [4], the core of which is common to all qualitative code development processes whether or not coding is automated [26].",
376
+ "bbox": [
377
+ 109,
378
+ 752,
379
+ 883,
380
+ 864
381
+ ],
382
+ "page_idx": 2
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "Our experience modifying the code descriptions yielded several key insights related in the following paragraphs. We encourage readers interested in a fuller account of this process to read Chew et al.'s study [10] describing their process of LLM-Assisted Content Analysis (LACA), which relates a process similar to our own in greater detail.",
387
+ "bbox": [
388
+ 109,
389
+ 869,
390
+ 883,
391
+ 912
392
+ ],
393
+ "page_idx": 2
394
+ },
395
+ {
396
+ "type": "header",
397
+ "text": "Scalable Content Analysis with LLMs",
398
+ "bbox": [
399
+ 112,
400
+ 42,
401
+ 369,
402
+ 56
403
+ ],
404
+ "page_idx": 2
405
+ },
406
+ {
407
+ "type": "page_number",
408
+ "text": "3",
409
+ "bbox": [
410
+ 493,
411
+ 935,
412
+ 504,
413
+ 946
414
+ ],
415
+ "page_idx": 2
416
+ },
417
+ {
418
+ "type": "image",
419
+ "img_path": "images/19742177267e2154927c4106a2d111bd27e982eef3b3973235c3e8fa5fa4c94f.jpg",
420
+ "image_caption": [
421
+ "Figure 1: The chain-of-thought prompt sequence."
422
+ ],
423
+ "image_footnote": [],
424
+ "bbox": [
425
+ 109,
426
+ 87,
427
+ 883,
428
+ 494
429
+ ],
430
+ "page_idx": 3
431
+ },
432
+ {
433
+ "type": "text",
434
+ "text": "LLM-generated rationale are essential for evaluating performance. In adapting the codebook, we wanted to understand not just which codes the model struggled to interpret correctly, but what aspects of the code the model failed to capture. To achieve this, we structured our prompts to require GPT to justify its decision to apply or not apply each code. These rationale were invaluable. They often highlighted parts of the code description that were ambiguous or imprecisely defined, leading the model to misinterpret them. Whenever a rationale repeatedly pointed to such an issue, we revised the corresponding code. We then retested the passage to check that the code was correctly applied and the rationale aligned with our intended interpretation of the code. Sometimes a revision would not improve the interpretations for the passages in question; other times it would fix the interpretations for those passages, but would introduce new problems in passages which were previously coded correctly.",
435
+ "bbox": [
436
+ 109,
437
+ 544,
438
+ 883,
439
+ 671
440
+ ],
441
+ "page_idx": 3
442
+ },
443
+ {
444
+ "type": "text",
445
+ "text": "Figure 1 demonstrates an effective method of prompting GPT to provide rationale for its code selections. The initial instruction is given by the Justification section of the prompt, and solicited again in the Decision/ Formatting box.",
446
+ "bbox": [
447
+ 109,
448
+ 676,
449
+ 883,
450
+ 705
451
+ ],
452
+ "page_idx": 3
453
+ },
454
+ {
455
+ "type": "text",
456
+ "text": "LLMs require more precise descriptions than do human readers. Human coders do not rely solely on a written codebook. Their interpretation of the codes is enriched through the codebook development process, discussions with fellow coders, and supplementary oral instructions. An LLM lacks this interactive and historical context and must interpret codes entirely from written descriptions. Our work modifying the codebook for GPT revealed information that, while implicitly understood by the code developers, wasn't explicitly stated in the code descriptions. This process not only aided in refining the codebook for automated coding, but also improved our own understandings of the codes. This ultimately led to clearer definition of the codes, thereby enhancing future manual coding processes as well. Figure 2 demonstrates how the Monumental Memorialization and Social/Political Advocacy codes were redefined to improve GPT's comprehension.",
457
+ "bbox": [
458
+ 109,
459
+ 724,
460
+ 883,
461
+ 851
462
+ ],
463
+ "page_idx": 3
464
+ },
465
+ {
466
+ "type": "text",
467
+ "text": "Often, we encountered cases where ambiguous phrasing was obvious to humans, but challenging for the LLM. Our codebook contains two codes that relate to Du Bois's reputation among academics and activists. These codes are meant to evaluate whether Du Bois appears in a news story because either an academic or activist mentioned him. Initially, we titled this code \"Academic Repute,\" which worked well for human coders. GPT, however, consistently misinterpreted",
468
+ "bbox": [
469
+ 109,
470
+ 854,
471
+ 883,
472
+ 912
473
+ ],
474
+ "page_idx": 3
475
+ },
476
+ {
477
+ "type": "header",
478
+ "text": "Scalable Content Analysis with LLMs",
479
+ "bbox": [
480
+ 114,
481
+ 42,
482
+ 369,
483
+ 56
484
+ ],
485
+ "page_idx": 3
486
+ },
487
+ {
488
+ "type": "page_number",
489
+ "text": "4",
490
+ "bbox": [
491
+ 491,
492
+ 935,
493
+ 504,
494
+ 946
495
+ ],
496
+ "page_idx": 3
497
+ },
498
+ {
499
+ "type": "text",
500
+ "text": "Original: Activist",
501
+ "text_level": 1,
502
+ "bbox": [
503
+ 137,
504
+ 88,
505
+ 267,
506
+ 103
507
+ ],
508
+ "page_idx": 4
509
+ },
510
+ {
511
+ "type": "text",
512
+ "text": "Apply when Du Bois is developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, promoting a candidate, organization or initiative. Also apply when Du A Bois is explicitly described as an activist or leader.",
513
+ "bbox": [
514
+ 133,
515
+ 104,
516
+ 883,
517
+ 162
518
+ ],
519
+ "page_idx": 4
520
+ },
521
+ {
522
+ "type": "text",
523
+ "text": "Redefined: Activist",
524
+ "text_level": 1,
525
+ "bbox": [
526
+ 137,
527
+ 174,
528
+ 281,
529
+ 186
530
+ ],
531
+ "page_idx": 4
532
+ },
533
+ {
534
+ "type": "text",
535
+ "text": "Apply this code when Du Bois is explicitly called an \"activist\" or \"leader\", or when his political or social activism is either explicitly noted or clearly implied through context. Examples include being mentioned in the context of leadership, activism, developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, or promoting a candidate, organization, or initiative.",
536
+ "bbox": [
537
+ 112,
538
+ 189,
539
+ 883,
540
+ 276
541
+ ],
542
+ "page_idx": 4
543
+ },
544
+ {
545
+ "type": "text",
546
+ "text": "Original: Social/Political Activism (c)",
547
+ "text_level": 1,
548
+ "bbox": [
549
+ 137,
550
+ 303,
551
+ 413,
552
+ 320
553
+ ],
554
+ "page_idx": 4
555
+ },
556
+ {
557
+ "type": "list",
558
+ "sub_type": "text",
559
+ "list_items": [
560
+ "DRefers to any form of social or political participation in promoting change in society. Can be used in conjunction with Du Bois' scholarly work, specifically E when Du Bois's ideas are used to frame Black political struggle. Can be used",
561
+ "F to describe Du Bois' political work or those who invoke Du Bois in service of G their activism."
562
+ ],
563
+ "bbox": [
564
+ 112,
565
+ 320,
566
+ 883,
567
+ 391
568
+ ],
569
+ "page_idx": 4
570
+ },
571
+ {
572
+ "type": "text",
573
+ "text": "Redefined: Social/Political Advocacy ©",
574
+ "text_level": 1,
575
+ "bbox": [
576
+ 137,
577
+ 402,
578
+ 436,
579
+ 419
580
+ ],
581
+ "page_idx": 4
582
+ },
583
+ {
584
+ "type": "list",
585
+ "sub_type": "text",
586
+ "list_items": [
587
+ "D This code applies when a passage mentions or implies any form of social or political activism, advocacy, critique, or discourse, including discussions about current or historical social problems. This includes not only direct activism of Du Bois and others, but also the framing and challenging of social norms,",
588
+ "E historical narratives, and racial or cultural identities. Apply this code when Du Bois's work, persona, or ideas are invoked in discussions that critically engage with Black identity, positionality, or broader systemic circumstances of Black people. Adjacency to other activists, such as inclusion in a list, is insufficient; advocacy must be explicitly mentioned in the passage."
589
+ ],
590
+ "bbox": [
591
+ 112,
592
+ 419,
593
+ 883,
594
+ 550
595
+ ],
596
+ "page_idx": 4
597
+ },
598
+ {
599
+ "type": "text",
600
+ "text": "Figure 2: Two examples of prompt redefinition. Colored, alphabetically labeled blocks of text show alterations derived through iterative code refinement. Italics draw attention to direction to constrain interpretive scope to implicit or explicit information.",
601
+ "bbox": [
602
+ 112,
603
+ 571,
604
+ 883,
605
+ 599
606
+ ],
607
+ "page_idx": 4
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "this code as pertaining to Du Bois's esteem as an academic, rather than among or by academics (the meaning of \"among\" remains ambiguous even here). We tried numerous iterations this code without success. Nevertheless, altering the title of the code to the far more literal \"Out of the Mouth of Academics\" dramatically improved performance, even when paired with the original code description. In another case, the code titled \"Social/Political Activism\" was revised to \"Social/Political Advocacy\" (Figure 2, C) because GPT did not consider social critique to be a form of activism, even when it was specifically instructed to.",
612
+ "bbox": [
613
+ 109,
614
+ 628,
615
+ 883,
616
+ 714
617
+ ],
618
+ "page_idx": 4
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "We found that words indicating how much the model should draw on context or its own outside knowledge had large impacts on the model's outputs, often to the desired effect. In particular, instructing the model to restrict itself to \"explicit\" meanings, or to draw on \"implicit\" meanings, often helped the model with part of a code description it had struggled with. Figure 2 B and D demonstrate the addition of such verbiage to control scope.",
623
+ "bbox": [
624
+ 109,
625
+ 719,
626
+ 883,
627
+ 776
628
+ ],
629
+ "page_idx": 4
630
+ },
631
+ {
632
+ "type": "text",
633
+ "text": "Both mandatory (do) and prohibitory (do not) phrasing were observed by the model, though mandatory phrasing seemed more successful, a finding reported by other researchers [29]. The ordering of directives also impacted how likely the model was to follow them. We found that moving a phrase that was ignored in the coding rationale toward the front of the definition made the model more likely to follow its specifications, as in Figure 2 A. When a very specific problem was observed repeatedly, it was sometimes necessary to add a directive to correct it, as in Figure 2 H.",
634
+ "bbox": [
635
+ 109,
636
+ 781,
637
+ 883,
638
+ 852
639
+ ],
640
+ "page_idx": 4
641
+ },
642
+ {
643
+ "type": "text",
644
+ "text": "Prompting for machine-readable output. To fully automate the coding process, model output must be reliably readable by a computer. The LLM generates text, which must be interpreted by another script into a data structure, such as a table, for further analysis. Instructing exactly how to format the output produced machine-readable results with",
645
+ "bbox": [
646
+ 109,
647
+ 869,
648
+ 883,
649
+ 912
650
+ ],
651
+ "page_idx": 4
652
+ },
653
+ {
654
+ "type": "header",
655
+ "text": "Scalable Content Analysis with LLMs",
656
+ "bbox": [
657
+ 112,
658
+ 42,
659
+ 369,
660
+ 56
661
+ ],
662
+ "page_idx": 4
663
+ },
664
+ {
665
+ "type": "page_number",
666
+ "text": "5",
667
+ "bbox": [
668
+ 493,
669
+ 935,
670
+ 504,
671
+ 946
672
+ ],
673
+ "page_idx": 4
674
+ },
675
+ {
676
+ "type": "text",
677
+ "text": "GPT-4 and GPT-3.5. Critically, this involves specifying a tag that the interpreting script locates, after which follows a reliably formatted list of codes. The Decision/ Formatting component Figure 1 illustrates how to constrain model output and produce consistent results across queries. Additionally, because GPT tends to be excessively verbose and summarize its output, particularly at higher temperatures, we informed the model that we do not want any output to follow the code list.",
678
+ "bbox": [
679
+ 109,
680
+ 90,
681
+ 883,
682
+ 161
683
+ ],
684
+ "page_idx": 5
685
+ },
686
+ {
687
+ "type": "text",
688
+ "text": "2.2 Selecting a Model and Writing Prompts for Optimal Performance",
689
+ "text_level": 1,
690
+ "bbox": [
691
+ 112,
692
+ 176,
693
+ 612,
694
+ 193
695
+ ],
696
+ "page_idx": 5
697
+ },
698
+ {
699
+ "type": "text",
700
+ "text": "Once the code descriptions have been revised for LLM text categorization, numerous other decisions remain about how to prompt a model to execute the content analysis. We present these as a separate step for the sake of clarity, but in reality, we developed our approach in iteratively and tandem with revising the code descriptions. We hope future methodologists and empiricists will benefit from what we learned during this process, and that less exploration of these components will be necessary so practitioners can focus on application or exploration of calibrations not explored here. We summarize all our recommendations for qualitative coding with an LLM in Table 3.",
701
+ "bbox": [
702
+ 109,
703
+ 203,
704
+ 883,
705
+ 287
706
+ ],
707
+ "page_idx": 5
708
+ },
709
+ {
710
+ "type": "text",
711
+ "text": "There is large and growing body of academic and nonacademic literature on prompt engineering: constructing user-defined input to elicit the best model output. In fact, the codebook adaptation in the previous section was in large part an exercise in prompt engineering. However, in this second section, prompt engineering refers more to the broader context of task description than the code definitions. In this section we report how different prompts influence the quality of machine categorization. Additionally we compare performance when the LLM is tasked with assigning each code independently to when the model is given the full codebook and assigned with coding all 9 codes as a single task. We refer to these as the \"Per Code\" and \"Full Codebook\" approach respectively.",
712
+ "bbox": [
713
+ 109,
714
+ 292,
715
+ 883,
716
+ 391
717
+ ],
718
+ "page_idx": 5
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "Studies have shown that LLM decision-making improves when the model is prompted to account for its decisions [30, 31]. This is generally known as chain-of-thought (CoT) prompting or reasoning, and refers to breaking down tasks into specific components, one or more of which involve planning for future steps or reflection on previous ones. Our prompts, which can be viewed in full in the appendix, apply chain-of-thought prompting by including 1) a role assignment step, informing the machine of its purpose, 2) a task description step, specifying the code definition, 3) a justification step, instructing the model to provide a rationale for its decision, and 4) a decision step, wherein the model delivers its ultimate analysis in a consistent, machine-readable format. An example of the chain-of-thought prompt sequence is given by Figure 1.",
723
+ "bbox": [
724
+ 109,
725
+ 396,
726
+ 883,
727
+ 508
728
+ ],
729
+ "page_idx": 5
730
+ },
731
+ {
732
+ "type": "text",
733
+ "text": "We use zero-shot prompts throughout this study. Zero-shot refers to providing the model only the task description, without giving examples of correctly executed responses. Xiao et al. found few-shot prompting improves coding and performance on other tasks [9], whereas Chew et al. largely employed zero-shot prompts [10]. Our case study involves evaluating paragraph-long passages rather than single clauses. We found that information in the examples was drawn upon by the LLM and interfered with its coding decisions. We also found that in Full Codebook prompts, giving examples greatly expanded the prompt, negatively impacting results. We suggest that when content is more literary or historical, zero-shot prompts are probably preferred, but that most coding tasks will benefit from few-shot prompting as demonstrated by the results of many other studies across domains.",
734
+ "bbox": [
735
+ 109,
736
+ 513,
737
+ 883,
738
+ 625
739
+ ],
740
+ "page_idx": 5
741
+ },
742
+ {
743
+ "type": "text",
744
+ "text": "Performance comparisons are relative to the human-derived gold standard hidden from the LLM at all stages of development. We used the default settings for the GPT API where temperature is set to 0 and nucleus sampling (top_p) is set to 1. We specified the task description as a \"system prompt\", and provide each passage as a \"user prompt\". A system prompt gives the LLM its purpose, clearly specifying the task it is meant to address, whereas a user prompt provides the input to which the model responds by generating output. We did not investigate whether intercoder agreement suffers with the default system prompt, while combining the task description and passage as a user prompt.",
745
+ "bbox": [
746
+ 109,
747
+ 630,
748
+ 883,
749
+ 715
750
+ ],
751
+ "page_idx": 5
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "GPT-4 greatly outperforms GPT-3.5. We found that GPT-4 approaches human performance for 3 codes: Activist: $\\kappa = 0.81$ ; Monumental Memorialization: $\\kappa = 1.00$ ; Collective Synecdoche $\\kappa = 0.79$ . GPT-4 prompted for rationale provides considerably higher quality code assignments than GPT-3.5, except in the case of the Out of the Mouth of Activists code, which no configuration handled well. It is especially notable that GPT-4 and GPT-3.5 differed in their most accurately interpreted codes. In the 3 tasks GPT-4 executed best, GPT-3.5's performance was slightly below its own average, mean $(\\kappa_{all}) = 0.34$ vs. mean $(\\kappa) = 0.32$ .",
756
+ "bbox": [
757
+ 109,
758
+ 729,
759
+ 883,
760
+ 813
761
+ ],
762
+ "page_idx": 5
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "Coding fidelity improves when codes are presented as individual tasks. We adapted our codebook by presenting the entire codebook to GPT along with task instructions. However, we found in testing that performance improved when GPT was given each task independently. This \"per code\" approach was taken by one recent study exploring content analysis with non-mutually exclusive codes (permitting multiply coded passages) [10], but not another, which tested only two codes [9]. Table 2 compares the GPT-4 performance when presented individual tasks for each code (\"Per Code\") and when presented all tasks in a single prompt (\"Full Codebook\"). We found that for the 3 human-equivalent",
767
+ "bbox": [
768
+ 109,
769
+ 828,
770
+ 883,
771
+ 912
772
+ ],
773
+ "page_idx": 5
774
+ },
775
+ {
776
+ "type": "header",
777
+ "text": "Scalable Content Analysis with LLMs",
778
+ "bbox": [
779
+ 112,
780
+ 42,
781
+ 370,
782
+ 56
783
+ ],
784
+ "page_idx": 5
785
+ },
786
+ {
787
+ "type": "page_number",
788
+ "text": "6",
789
+ "bbox": [
790
+ 491,
791
+ 936,
792
+ 504,
793
+ 946
794
+ ],
795
+ "page_idx": 5
796
+ },
797
+ {
798
+ "type": "table",
799
+ "img_path": "images/6df25eecf073fd7f0327b24d5b457a4b971a78a1e9544545478b7c64845e61fe.jpg",
800
+ "table_caption": [
801
+ "Table 2: Intercoder reliability (Cohen's $\\kappa$ ) for all codes on 111 gold standard passages. Best overall performance is shown in bold. Italics indicate the highest intercoder reliability between pairs with and without prompting for rationale (CoT vs. No CoT); if the pair are equivalent neither is italicized. Two values are considered equivalent if their difference does not exceed 0.02."
802
+ ],
803
+ "table_footnote": [],
804
+ "table_body": "<table><tr><td rowspan=\"3\">Code</td><td rowspan=\"3\">Count</td><td colspan=\"4\">GPT-4</td><td colspan=\"2\">GPT-3.5</td></tr><tr><td colspan=\"2\">Per Code</td><td colspan=\"2\">Full Codebook</td><td colspan=\"2\">Per Code</td></tr><tr><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td></tr><tr><td>Scholar</td><td>27</td><td>0.61</td><td>0.52</td><td>0.59</td><td>0.42</td><td>0.29</td><td>0.21</td></tr><tr><td>Activist</td><td>23</td><td>0.81</td><td>0.65</td><td>0.67</td><td>0.62</td><td>0.39</td><td>0.32</td></tr><tr><td>Monumental Memorialization</td><td>13</td><td>1.00</td><td>0.91</td><td>0.75</td><td>0.48</td><td>0.29</td><td>0.31</td></tr><tr><td>Mention of Scholarly Work</td><td>24</td><td>0.71</td><td>0.69</td><td>0.52</td><td>0.44</td><td>0.33</td><td>0.39</td></tr><tr><td>Social/Political Advocacy</td><td>51</td><td>0.64</td><td>0.60</td><td>0.60</td><td>0.60</td><td>0.55</td><td>0.51</td></tr><tr><td>Coalition Building</td><td>9</td><td>0.60</td><td>0.44</td><td>0.43</td><td>0.13</td><td>0.33</td><td>0.17</td></tr><tr><td>Out of the Mouth of Academics</td><td>30</td><td>0.63</td><td>0.65</td><td>0.65</td><td>0.62</td><td>0.37</td><td>0.33</td></tr><tr><td>Out of the Mouth of Activists</td><td>11</td><td>0.30</td><td>0.09</td><td>0.34</td><td>0.18</td><td>0.21</td><td>0.09</td></tr><tr><td>Collective Synecdoche</td><td>26</td><td>0.79</td><td>0.78</td><td>0.81</td><td>0.71</td><td>0.27</td><td>0.27</td></tr><tr><td>Mean</td><td>24</td><td>0.68</td><td>0.59</td><td>0.60</td><td>0.46</td><td>0.34</td><td>0.29</td></tr></table>",
805
+ "bbox": [
806
+ 132,
807
+ 136,
808
+ 859,
809
+ 421
810
+ ],
811
+ "page_idx": 6
812
+ },
813
+ {
814
+ "type": "text",
815
+ "text": "tasks (Activist, Monumental Memorialization, and Collective Synecdoche) the Per Code performance far exceeded the Full Codebook for 2 tasks, and was comparable for 1. For 2 other tasks, Mention of Scholarly Work and Coalition Building, we found that the Per Code configuration produced considerably higher agreement, whereas Full Codebook performed comparably to the Per Code in the remaining 4 tasks.",
816
+ "bbox": [
817
+ 109,
818
+ 444,
819
+ 883,
820
+ 503
821
+ ],
822
+ "page_idx": 6
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "Coding fidelity improves when the model is prompted to justify its coding decisions. Consistent with other experiments with chain-of-thought (CoT) reasoning in LLMs, we found that coding agreement benefited strongly from prompting the model to explain itself [32]. Table 2 shows the effect of prompting for rationale on three pairs of conditions: Per Code GPT-4, Full Codebook GPT-4, and Per Code GPT-3.5. We found that across all codes and conditions, with one exception, CoT prompting produces higher or equivalent intercoder reliability with the gold standard. Using GPT-4, average Per Code agreement improved from 0.59 to 0.68, and average Full Codebook agreement improved from 0.46 to 0.60. Furthermore, a majority of pairs showed substantial improvement when the codes were assigned after providing reasoning for coding decisions.",
827
+ "bbox": [
828
+ 109,
829
+ 513,
830
+ 883,
831
+ 627
832
+ ],
833
+ "page_idx": 6
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "3 Discussion",
838
+ "text_level": 1,
839
+ "bbox": [
840
+ 112,
841
+ 645,
842
+ 235,
843
+ 660
844
+ ],
845
+ "page_idx": 6
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "Determining appropriate domains for LLM-assisted qualitative coding. Previous methods of automated text categorization, both supervised and unsupervised, rarely met the standards of traditional social scientists and humanists, and were instead generally employed by data scientists. Capturing meaning, particularly complex meaning, through machine learning has largely been an elusive goal [3]. Despite our own former skepticism, we predict that LLMs will be capable of applying most qualitative codebooks within the year. However, our results show that even within the scope of a single codebook, interpretation quality varies. Thus, different disciplines and domains should expect model success and the ease of transitioning a codebook to vary considerably. We suspect that more humanistic and \"softer\" scientific approaches will (continue to) be more resistant to machine interpretation than problems posed by scholars who identify with \"harder\" sciences, to say nothing of their ability to convince their peers of its validity. We do not oppose developing evaluation benchmarks for qualitative coding to assess which models are adept at what variety of task, but neither do we advocate it; meaning is manifold and emergent, and much of its beauty derives from its resistance to reduction and definition. Instead, we suggest those who wish to employ an LLM to perform content analysis survey similar attempts and simply experiment on their own. The process of discovering triumphs, workarounds, and limitations of working with these models was not only fascinating, but tremendously fun.",
850
+ "bbox": [
851
+ 109,
852
+ 675,
853
+ 883,
854
+ 869
855
+ ],
856
+ "page_idx": 6
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "Practical aspects of transitioning to content analysis with LLMs. While artificial intelligence potentially opens up much larger datasets to qualitative scholars, there is still a considerable technical barrier to automating content",
861
+ "bbox": [
862
+ 109,
863
+ 883,
864
+ 883,
865
+ 912
866
+ ],
867
+ "page_idx": 6
868
+ },
869
+ {
870
+ "type": "header",
871
+ "text": "Scalable Content Analysis with LLMs",
872
+ "bbox": [
873
+ 114,
874
+ 42,
875
+ 369,
876
+ 56
877
+ ],
878
+ "page_idx": 6
879
+ },
880
+ {
881
+ "type": "page_number",
882
+ "text": "7",
883
+ "bbox": [
884
+ 491,
885
+ 935,
886
+ 504,
887
+ 946
888
+ ],
889
+ "page_idx": 6
890
+ },
891
+ {
892
+ "type": "table",
893
+ "img_path": "images/b3cf3aed59a5c56a41c10f872cbadf838bd8748c07ed08aa88dfceeb6f8c4340.jpg",
894
+ "table_caption": [
895
+ "Table 3: Principles of prompting an LLM for qualitative coding."
896
+ ],
897
+ "table_footnote": [],
898
+ "table_body": "<table><tr><td colspan=\"2\">Task Instructions</td></tr><tr><td>Prompt for Rationale</td><td>Model fidelity improves when instructed to justify its coding decisions.</td></tr><tr><td>One Task Per Code</td><td>Model fidelity improves when given each code as a separate task.</td></tr><tr><td>Brevity</td><td>Shorter task descriptions are more likely to be faithfully executed by the model.</td></tr><tr><td>Structured Output</td><td>Instruct the model to format its output to ensure uniform responses.</td></tr><tr><td colspan=\"2\">Code Definitions</td></tr><tr><td>Word Choice</td><td>A single high-content word can be changed to align with the LLM&#x27;s built-in ontology.</td></tr><tr><td>Clause Order</td><td>Clauses are more likely to be observed when introduced earlier in the code description.</td></tr><tr><td>Mandates/Prohibitions</td><td>Both can be effective, but it is easier to get the model to “do” than “do not”.</td></tr><tr><td>Code Titles</td><td>Altering the code title can have a large effect even without altering the definition.</td></tr><tr><td>Interpretation Scope</td><td>Use words like “implicit” and “explicit” when interpretation is too limited or expansive.</td></tr><tr><td colspan=\"2\">Chain-of-Thought Prompt Sequence</td></tr><tr><td>1. Role Assignment</td><td>Supply the model its purpose, e.g., &quot;You will be applying category labels to passages.&quot;</td></tr><tr><td>2. Code Definition</td><td>Provide the code title(s) and description(s).</td></tr><tr><td>3. Justification</td><td>Request that the model provide evidence of its reasoning.</td></tr><tr><td>4. Decision</td><td>Instruct the model to list the codes that apply to the passage in a consistent format.</td></tr></table>",
899
+ "bbox": [
900
+ 114,
901
+ 111,
902
+ 883,
903
+ 448
904
+ ],
905
+ "page_idx": 7
906
+ },
907
+ {
908
+ "type": "text",
909
+ "text": "analysis. Development of an LLM adapted codebook is feasible for anyone regardless of technical skill by interacting with an LLM through chat-like Web platforms provided by proprietary model developers. However, systematically testing prompts or applying a completed codebook to the full dataset requires moderate skill in writing scripts in a language such as Python. Rather than suggest that all scholars become programmers, we encourage researchers to develop partnerships with students or community members seeking programming or research experience as a form of project-based education. Conversely, we suggest that data scientists pursue partnerships with traditional social scientists and humanists, who are often better positioned to develop coding schema to flush out complex meanings embedded in text, which are now more tractable to machine learning.",
910
+ "bbox": [
911
+ 109,
912
+ 507,
913
+ 883,
914
+ 619
915
+ ],
916
+ "page_idx": 7
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "Handling passages where model interpretation is poor. Overwhelmingly, GPT-4's interpretations were accurate and human-like. However, we found repeatedly that GPT-4, like a human reader, struggled with edge cases, especially where implicit information was required to make a judgment. We are encouraged by this finding, and argue that with automated analysis, fidelity is less important than it is with humans. Because statistical power increases with the number of observations, noise is more tolerable in machine-applied codes, as automated coding potentially increases sample size by orders of magnitude. Notably, this assumes that error is restricted to edge cases and is not otherwise systematically biased. We also advise against automated coding where datasets are small, as in interviews, where it is likely as efficient to code entirely by hand. As models improve and can provide confidence estimates for their statements [33-35], ML content analysis workflows should include manual review of passages with uncertain code assignments. Anecdotally, we found that GPT-4 could intelligently reflect on its responses when prompted to do so. When presented the output of another model instance, GPT-4, acting as an untrained \"critic\" model [36], was often able to identify when it had encountered an edge case without prompting, as well as recognize and revise obvious mistakes. Our experiences suggest that a human-in-the-loop tag-for-manual-review workflow or a two-step automated reflect-and-revise workflow may already be feasible with GPT-4 and similar models.",
921
+ "bbox": [
922
+ 109,
923
+ 635,
924
+ 883,
925
+ 830
926
+ ],
927
+ "page_idx": 7
928
+ },
929
+ {
930
+ "type": "text",
931
+ "text": "4 Conclusion",
932
+ "text_level": 1,
933
+ "bbox": [
934
+ 112,
935
+ 851,
936
+ 240,
937
+ 867
938
+ ],
939
+ "page_idx": 7
940
+ },
941
+ {
942
+ "type": "text",
943
+ "text": "Our results using state-of-the-art models lead us to recommend that scholars who do much qualitative coding consider automated coding with LLMs a potentially viable option today. We especially encourage skeptics to probe these tools'",
944
+ "bbox": [
945
+ 109,
946
+ 883,
947
+ 883,
948
+ 912
949
+ ],
950
+ "page_idx": 7
951
+ },
952
+ {
953
+ "type": "header",
954
+ "text": "Scalable Content Analysis with LLMs",
955
+ "bbox": [
956
+ 114,
957
+ 42,
958
+ 369,
959
+ 56
960
+ ],
961
+ "page_idx": 7
962
+ },
963
+ {
964
+ "type": "page_number",
965
+ "text": "8",
966
+ "bbox": [
967
+ 493,
968
+ 935,
969
+ 504,
970
+ 946
971
+ ],
972
+ "page_idx": 7
973
+ },
974
+ {
975
+ "type": "text",
976
+ "text": "capacities, as it is useful to know their limitations. Over the next year, models such as those incorporating memory [37], (multi-)agential models that dialogue and revise prior to rendering output [38, 39], and architectures that can handle larger inputs [40], will almost certainly greatly improve upon GPT-4's current abilities. When those models are made available, researchers who have already experimented with LLMs will be best positioned to make use of the new tools. The efficiency of automation is compelling, but we are most enthusiastic about the ability to probe much larger datasets than ever before, potentially illuminating patterns too rare or too fuzzy to detect with a sample numbering in the tens or hundreds rather than thousands or beyond.",
977
+ "bbox": [
978
+ 109,
979
+ 90,
980
+ 887,
981
+ 189
982
+ ],
983
+ "page_idx": 8
984
+ },
985
+ {
986
+ "type": "text",
987
+ "text": "Acknowledgments",
988
+ "text_level": 1,
989
+ "bbox": [
990
+ 112,
991
+ 209,
992
+ 272,
993
+ 226
994
+ ],
995
+ "page_idx": 8
996
+ },
997
+ {
998
+ "type": "text",
999
+ "text": "The author thanks Harry Yan, Pat Wall, Patrick Kaminski, Adam Fisch, Alicia Chen, and Francisco Muñoz for their helpful comments toward improving this manuscript. I am especially grateful to Tania Ravaei for collaborating on codebook development.",
1000
+ "bbox": [
1001
+ 109,
1002
+ 241,
1003
+ 883,
1004
+ 285
1005
+ ],
1006
+ "page_idx": 8
1007
+ },
1008
+ {
1009
+ "type": "text",
1010
+ "text": "References",
1011
+ "text_level": 1,
1012
+ "bbox": [
1013
+ 114,
1014
+ 305,
1015
+ 210,
1016
+ 321
1017
+ ],
1018
+ "page_idx": 8
1019
+ },
1020
+ {
1021
+ "type": "list",
1022
+ "sub_type": "ref_text",
1023
+ "list_items": [
1024
+ "[1] Anselm L Strauss. 1967. The discovery of grounded theory: Strategies for qualitative research. Aldine.",
1025
+ "[2] Johnny Saldana. 2009. The coding manual for qualitative researchers. SAGE Publications.",
1026
+ "[3] Momin M Malik. 2020. A hierarchy of limitations in machine learning. arXiv preprint arXiv:2002.05193.",
1027
+ "[4] Laura K Nelson. 2017. Computational Grounded Theory: A methodological framework. Sociological Methods & Research, 49(1):3-42.",
1028
+ "[5] Ankita Dhar, Himadri Mukherjee, Niladri Sekhar Dash, and Kaushik Roy. 2021. Text categorization: past and present. Artificial Intelligence Review, 54:3007-3054.",
1029
+ "[6] Sébastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712.",
1030
+ "[7] Bernardino Romero-Paredes, Mohammadamin Barekatain, Alexander Novikov, Matej Balog, M Pawan Kumar, Emilien Dupont, Francisco JR Ruiz, Jordan S Ellenberg, Pengming Wang, Omar Fawzi, et al. 2023. Mathematical discoveries from program search with large language models. Nature, 625:1-3.",
1031
+ "[8] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805.",
1032
+ "[9] Ziang Xiao, Xingdi Yuan, Q Vera Liao, Rania Abdelghani, and Pierre-Yves Oudeyer. 2023. Supporting qualitative analysis with large language models: Combining codebook with GPT-3 for deductive coding. In Companion Proceedings of the 28th International Conference on Intelligent User Interfaces.",
1033
+ "[10] Robert Chew, John Bollenbacher, Michael Wenger, Jessica Speer, and Annice Kim. 2023. LLM-assisted content analysis: Using large language models to support deductive coding. arXiv preprint arXiv:2306.14924.",
1034
+ "[11] Shih-Chieh Dai, Aiping Xiong, and Lun-Wei Ku. 2023. LLM-in-the-loop: Leveraging large language model for thematic analysis. arXiv preprint arXiv:2310.15100.",
1035
+ "[12] Robert H Tai, Lillian R Bentley, Xin Xia, Jason M Sitt, Sarah C Fankhauser, Ana M Chicas-Mosier, and Barnas G Monteith. 2023. An examination of the use of large language models to aid analysis of textual data. bioRxiv preprint bioRxiv:2023.07.17.549361.",
1036
+ "[13] Ammar Ismael Kadhim. 2019. Survey on supervised machine learning techniques for automatic text classification. Artificial Intelligence Review, 52(1):273-292.",
1037
+ "[14] David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3(1):993-1022.",
1038
+ "[15] Hamed Jelodar, Yongli Wang, Chi Yuan, Xia Feng, Xiahui Jiang, Yanchao Li, and Liang Zhao. 2019. Latent Dirichlet allocation (LDA) and topic modeling: Models, applications, a survey. Multimedia Tools and Applications, 78:15169-15211.",
1039
+ "[16] Maarten Grootendorst. 2022. BERTopic: Neural topic modeling with a class-based TF-IDF procedure. arXiv preprint arXiv:2203.05794."
1040
+ ],
1041
+ "bbox": [
1042
+ 112,
1043
+ 329,
1044
+ 883,
1045
+ 912
1046
+ ],
1047
+ "page_idx": 8
1048
+ },
1049
+ {
1050
+ "type": "header",
1051
+ "text": "Scalable Content Analysis with LLMs",
1052
+ "bbox": [
1053
+ 112,
1054
+ 42,
1055
+ 369,
1056
+ 56
1057
+ ],
1058
+ "page_idx": 8
1059
+ },
1060
+ {
1061
+ "type": "page_number",
1062
+ "text": "9",
1063
+ "bbox": [
1064
+ 491,
1065
+ 935,
1066
+ 504,
1067
+ 946
1068
+ ],
1069
+ "page_idx": 8
1070
+ },
1071
+ {
1072
+ "type": "list",
1073
+ "sub_type": "ref_text",
1074
+ "list_items": [
1075
+ "[17] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems, 33:1877-1901.",
1076
+ "[18] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. LLaMa: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.",
1077
+ "[19] MistralAI. 2023. Mixtral of experts: A high quality sparse mixture-of-experts. https://mistral.ai/news/mixtral-of-experts. Accessed: 2024-01-13.",
1078
+ "[20] Anthropic. 2023. Claude 2. https://www.anthropic.com/index/claude-2. Accessed: 2024-01-18.",
1079
+ "[21] Jan Nowakowski and Jan Keller. 2024. AI-powered patching: The future of automated vulnerability fixes. Google Security Engineering Technical Report.",
1080
+ "[22] Carlos Gómez-Rodríguez and Paul Williams. 2023. A confederacy of models: A comprehensive evaluation of LLMs on creative writing. arXiv preprint arXiv:2310.08433.",
1081
+ "[23] Manon Bischoff. 2024. AI matches the abilities of the best Math Olympians. Scientific American.",
1082
+ "[24] Werner Vach and Oke Gerke. 2023. Gwet's AC1 is not a substitute for Cohen's kappa – A comparison of basic properties. MethodsX, 10:102212.",
1083
+ "[25] Etienne Ollion, Rubing Shen, Ana Macanovic, and Arnault Chatelain. 2023. ChatGPT for text annotation? Mind the hype! SocArXiv preprint doi:10.31235/osf.io/x58kn.",
1084
+ "[26] Anselm Strauss and Juliet M Corbin. 1997. Grounded theory in practice. SAGE Publications.",
1085
+ "[27] OpenAI. 2023. GPT-4. https://openai.com/research/gpt-4. Accessed: 2024-01-18.",
1086
+ "[28] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. 2024. Self-rewarding language models. arXiv preprint arXiv:2401.10020.",
1087
+ "[29] Sondos Mahmoud Bsharat, Aidar Myrzakhan, and Zhiqiang Shen. 2023. Principled instructions are all you need for questioning LLaMA-1/2, GPT-3.5/4. arXiv preprint arXiv:2312.16171.",
1088
+ "[30] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837.",
1089
+ "[31] Aman Madaan and Amir Yazdanbakhsh. 2022. Text and patterns: For effective chain of thought, it takes two to tango. arXiv preprint arXiv:2209.07686.",
1090
+ "[32] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. 2023. A survey of chain of thought reasoning: Advances, frontiers and future. arXiv preprint arXiv:2309.15402.",
1091
+ "[33] Zhen Lin, Shubhendu Trivedi, and Jimeng Sun. 2023. Generating with confidence: Uncertainty quantification for black-box large language models. arXiv preprint arXiv:2305.19187.",
1092
+ "[34] Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. 2023. Navigating the grey area: Expressions of overconfidence and uncertainty in language models. arXiv preprint arXiv:2302.13439.",
1093
+ "[35] Chen, Jiefeng and Yoon, Jinsung. 2024. Introducing ASPIRE for selective prediction in LLMs. https://blog.research.google/2024/01/introducing-aspire-for-selective.html?m=1. Accessed: 2024-01-20.",
1094
+ "[36] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. REFINER: Reasoning feedback on intermediate representations. arXiv preprint arXiv:2304.01904.",
1095
+ "[37] Noah Shinn, Beck Labash, and Ashwin Gopinath. 2023. Reflexion: An autonomous agent with dynamic memory and self-reflection. arXiv preprint arXiv:2303.11366.",
1096
+ "[38] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. ReAct: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629.",
1097
+ "[39] Zhenran Xu, Senbao Shi, Baotian Hu, Jindi Yu, Dongfang Li, Min Zhang, and Yuxiang Wu. 2023. Towards reasoning in large language models via multi-agent peer review collaboration. arXiv preprint arXiv:2311.08152.",
1098
+ "[40] Albert Gu and Tri Dao. 2023. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752."
1099
+ ],
1100
+ "bbox": [
1101
+ 114,
1102
+ 90,
1103
+ 883,
1104
+ 883
1105
+ ],
1106
+ "page_idx": 9
1107
+ },
1108
+ {
1109
+ "type": "header",
1110
+ "text": "Scalable Content Analysis with LLMs",
1111
+ "bbox": [
1112
+ 114,
1113
+ 42,
1114
+ 369,
1115
+ 56
1116
+ ],
1117
+ "page_idx": 9
1118
+ },
1119
+ {
1120
+ "type": "page_number",
1121
+ "text": "10",
1122
+ "bbox": [
1123
+ 488,
1124
+ 935,
1125
+ 506,
1126
+ 946
1127
+ ],
1128
+ "page_idx": 9
1129
+ }
1130
+ ]
2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_model.json ADDED
@@ -0,0 +1,1639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.266,
8
+ 0.058,
9
+ 0.709
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2401.15170v2 [cs.CL] 12 Feb 2024"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.141,
18
+ 0.102,
19
+ 0.857,
20
+ 0.175
21
+ ],
22
+ "angle": 0,
23
+ "content": "Scalable Qualitative Coding with LLMs: Chain-of-Thought Reasoning Matches Human Performance in Some Hermeneutic Tasks"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.418,
29
+ 0.208,
30
+ 0.584,
31
+ 0.222
32
+ ],
33
+ "angle": 0,
34
+ "content": "Zackary Okun Dunivin"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.324,
40
+ 0.223,
41
+ 0.674,
42
+ 0.236
43
+ ],
44
+ "angle": 0,
45
+ "content": "Center for Complex Networks and Systems Research"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.282,
51
+ 0.237,
52
+ 0.715,
53
+ 0.25
54
+ ],
55
+ "angle": 0,
56
+ "content": "Luddy School of Informatics, Computer Science, and Engineering"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.417,
62
+ 0.251,
63
+ 0.582,
64
+ 0.264
65
+ ],
66
+ "angle": 0,
67
+ "content": "Department of Sociology"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.438,
73
+ 0.265,
74
+ 0.56,
75
+ 0.277
76
+ ],
77
+ "angle": 0,
78
+ "content": "Indiana University"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.413,
84
+ 0.278,
85
+ 0.586,
86
+ 0.291
87
+ ],
88
+ "angle": 0,
89
+ "content": "Bloomington, Indiana, US"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.438,
95
+ 0.293,
96
+ 0.561,
97
+ 0.304
98
+ ],
99
+ "angle": 0,
100
+ "content": "zdunivin@iu.edu"
101
+ },
102
+ {
103
+ "type": "title",
104
+ "bbox": [
105
+ 0.45,
106
+ 0.355,
107
+ 0.548,
108
+ 0.37
109
+ ],
110
+ "angle": 0,
111
+ "content": "ABSTRACT"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.17,
117
+ 0.377,
118
+ 0.828,
119
+ 0.585
120
+ ],
121
+ "angle": 0,
122
+ "content": "Qualitative coding, or content analysis, extracts meaning from text to discern quantitative patterns across a corpus of texts. Recently, advances in the interpretive abilities of large language models (LLMs) offer potential for automating the coding process (applying category labels to texts), thereby enabling human researchers to concentrate on more creative research aspects, while delegating these interpretive tasks to AI. Our case study comprises a set of socio-historical codes on dense, paragraph-long passages representative of a humanistic study. We show that GPT-4 is capable of human-equivalent interpretations, whereas GPT-3.5 is not. Compared to our human-derived gold standard, GPT-4 delivers excellent intercoder reliability (Cohen's \\(\\kappa \\geq 0.79\\)) for 3 of 9 codes, and substantial reliability (\\(\\kappa \\geq 0.6\\)) for 8 of 9 codes. In contrast, GPT-3.5 greatly underperforms for all codes (\\(mean(\\kappa) = 0.34\\); \\(max(\\kappa) = 0.55\\)). Importantly, we find that coding fidelity improves considerably when the LLM is prompted to give rationale justifying its coding decisions (chain-of-thought reasoning). We present these and other findings along with a set of best practices for adapting traditional codebooks for LLMs. Our results indicate that for certain codebooks, state-of-the-art LLMs are already adept at large-scale content analysis. Furthermore, they suggest the next generation of models will likely render AI coding a viable option for a majority of codebooks."
123
+ },
124
+ {
125
+ "type": "title",
126
+ "bbox": [
127
+ 0.114,
128
+ 0.606,
129
+ 0.254,
130
+ 0.621
131
+ ],
132
+ "angle": 0,
133
+ "content": "1 Introduction"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.111,
139
+ 0.637,
140
+ 0.885,
141
+ 0.722
142
+ ],
143
+ "angle": 0,
144
+ "content": "Text categorization, commonly referred to as content analysis and qualitative coding in the social sciences, plays an important role in scholarly research and industrial applications. This process traditionally relies on human expertise to interpret the nuanced and often complex meanings embedded in texts [1, 2]. The difficulty lies in the multifaceted nature of meaning and the challenge of fitting real-world complexity into discrete categories, even for skilled readers. Historically, these challenges have positioned text categorization as a task unsuitable for machine learning approaches [3], despite robust attempts [4, 5]."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.111,
150
+ 0.727,
151
+ 0.885,
152
+ 0.825
153
+ ],
154
+ "angle": 0,
155
+ "content": "Recent developments in artificial intelligence, notably the advent of transformers with billions of parameters known as large language models (LLMs), have begun to challenge this notion. These models demonstrate increasing capabilities in knowledge, interpretation, reasoning, and creativity expressed in natural language, approaching or even surpassing human performance [6-8]. The processing speed of artificial intelligence opens up the possibility of categorizing vast quantities of text, far exceeding the limitations of human coding teams restricted to smaller samples. Yet, this opportunity raises a critical question: how can we ensure and maintain the accuracy of machine categorization at a level comparable to human standards?"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.111,
161
+ 0.83,
162
+ 0.885,
163
+ 0.888
164
+ ],
165
+ "angle": 0,
166
+ "content": "This study provides the strongest evidence to date that machines are capable of human-quality interpretations of text for the purposes of qualitative coding. Additionally, our report serves as a practical guide to employing LLMs in text categorization and as a reference for those encountering machine-assisted qualitative coding in empirical research. We contribute to the growing body of work that builds confidence in the rigor of LLM-based text categorization [9-12], a"
167
+ },
168
+ {
169
+ "type": "footer",
170
+ "bbox": [
171
+ 0.113,
172
+ 0.898,
173
+ 0.422,
174
+ 0.912
175
+ ],
176
+ "angle": 0,
177
+ "content": "Code and data available at https://osf.io/k4fg9"
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "header",
183
+ "bbox": [
184
+ 0.114,
185
+ 0.043,
186
+ 0.37,
187
+ 0.058
188
+ ],
189
+ "angle": 0,
190
+ "content": "Scalable Content Analysis with LLMs"
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.111,
196
+ 0.092,
197
+ 0.885,
198
+ 0.204
199
+ ],
200
+ "angle": 0,
201
+ "content": "field that will expand as these models continue to evolve. Our report emphasizes the redesign of codebooks—comprising category descriptions and coding instructions—specifically for LLMs. We demonstrate how the structure of prompts, the specific requests made to the generative model for categorizing passages, significantly impacts coding fidelity. Even as these models continue to rapidly improve, we expect most of the principles of prompt design we report will remain useful and informative as methodologists explore new models and empiricists automate their coding workflows. Our results are presented through narratives detailing our approach and highlighting potential challenges and demonstrated by LLM-generated analyses to a human-derived gold standard. A summary of best practices for content analysis with an LLM is also presented in tabular format for quick reference."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.113,
207
+ 0.209,
208
+ 0.341,
209
+ 0.224
210
+ ],
211
+ "angle": 0,
212
+ "content": "Key findings of our study include:"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.158,
218
+ 0.242,
219
+ 0.88,
220
+ 0.27
221
+ ],
222
+ "angle": 0,
223
+ "content": "- GPT-4 exhibits human-equivalent performance with zero-shot prompts. 8 of 9 tasks exceed the 0.6 threshold for substantial agreement using Cohen's \\(\\kappa\\). 3 of 9 tasks exceed the 0.75 threshold for excellent agreement."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.157,
229
+ 0.284,
230
+ 0.833,
231
+ 0.298
232
+ ],
233
+ "angle": 0,
234
+ "content": "- GPT-3.5, when given the same prompts, has an average intercoder reliability of 0.34 across all codes."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.157,
240
+ 0.311,
241
+ 0.881,
242
+ 0.339
243
+ ],
244
+ "angle": 0,
245
+ "content": "Codebooks designed for human coders need reworking for LLM application, requiring iterative manual testing to refine phrasing and improve model comprehension."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.157,
251
+ 0.352,
252
+ 0.881,
253
+ 0.366
254
+ ],
255
+ "angle": 0,
256
+ "content": "- Agreement improves when the LLM provides rationale for code assignments: \\(\\mu (\\kappa) = 0.68\\) vs. \\(\\mu (\\kappa) = 0.59\\)."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.157,
262
+ 0.38,
263
+ 0.882,
264
+ 0.408
265
+ ],
266
+ "angle": 0,
267
+ "content": "- Agreement improves when presenting each code as a separate prompt, rather than the codebook as a whole: \\(\\mu (\\kappa) = 0.68\\) vs. \\(\\mu (\\kappa) = 0.60\\)."
268
+ },
269
+ {
270
+ "type": "list",
271
+ "bbox": [
272
+ 0.157,
273
+ 0.242,
274
+ 0.882,
275
+ 0.408
276
+ ],
277
+ "angle": 0,
278
+ "content": null
279
+ },
280
+ {
281
+ "type": "title",
282
+ "bbox": [
283
+ 0.113,
284
+ 0.434,
285
+ 0.488,
286
+ 0.449
287
+ ],
288
+ "angle": 0,
289
+ "content": "1.1 Automating Content Analysis: Past and Present"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.111,
295
+ 0.463,
296
+ 0.885,
297
+ 0.56
298
+ ],
299
+ "angle": 0,
300
+ "content": "Prior work on automating content analysis entailed training machine learning models on large quantities of text. Supervised models, typically some form of linear regression, learn to associate text features with user-specified categories [13]. This process captures half the traditional human-coded process by using human-derived codes and examples, but fails to leverage abstract code descriptions found in a codebook, as well as requiring large quantities of human-annotated data. Unsupervised models, such as LDA [14, 15] or BERTTopic [16], develop their own categorizations from unlabeled training sets. This process does not require time-intensive labeling, but rarely captures the specific categories that a researcher intends to target."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.111,
306
+ 0.567,
307
+ 0.884,
308
+ 0.706
309
+ ],
310
+ "angle": 0,
311
+ "content": "The latest generation of LLMs (e.g., GPT [17], LLaMa [18], Mistral [19], Claude [20]) differ notably from previous machine learning models in that they can perform new tasks specified through natural language prompts. A user can specify a task that the model was not trained on, give few (single digit) or no examples, and the model will return output conforming to the specifications. Demonstrated successes include computer code generation [21], creative writing [22], and quantitative reasoning [23]. We are only beginning to understand and expand upon the limitations of these models. By converting natural language requests into highly intelligent output across vast and indeterminate domains, LLMs lower the technical barriers to machine learning by making its application more naturalistic and eliminating the need for large training data. Beyond this, LLM's capacities in many domains far exceed the specialized machine learning models that preceded them, suggesting that for many applications, including scholarly inquiry, artificial intelligence is overwhelmingly more accessible and capable in 2024 than it was just two years prior."
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.111,
317
+ 0.711,
318
+ 0.884,
319
+ 0.824
320
+ ],
321
+ "angle": 0,
322
+ "content": "Early studies of content analysis with LLMs are encouraging. Xiao et al. [9] demonstrate moderate success, Cohen's \\(\\kappa = 0.61\\) and \\(\\kappa = 0.38\\), in two linguistic tasks using GPT-3. Chew et al. [10] report high success on many of 19 tasks across three datasets, and results that are indistinguishable from random for others. It is difficult to evaluate their results due to the choice of Gwet's AC1, which is biased toward agreement on negative codings rather than positive, whereas most standard measures of intercoder reliability do the opposite [24]. However, we commend Chew et al.'s approach to adapting codebooks for LLMs, which is communicated with great detail and clarity. A survey of 20 empirical pieces reports \"mixed-results\" of using GPT-3 to automate \"text annotation,\" a term that ties their framing to \"data annotation\", labeling data for in machine learning [25], rather than content analysis in the tradition of grounded theory [26]."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.111,
328
+ 0.828,
329
+ 0.884,
330
+ 0.913
331
+ ],
332
+ "angle": 0,
333
+ "content": "We present here three advances to these studies. 1) We report the first methodological account of automating qualitative coding using GPT-4, which, along with other recent models, greatly improves upon many of GPT-3's capabilities [19, 27, 28]. 2) We provide the first conclusive evidence that LLMs are capable of human-equivalent performance in qualitative coding, and do so on larger passages of text, wherein meaning is often woven through multiple interrelated clauses. 3) We demonstrate that GPT is better at interpreting text when it is tasked with justifying its coding decisions (chain-of-thought prompting) rather than applying codes without an accompanying explanation."
334
+ },
335
+ {
336
+ "type": "page_number",
337
+ "bbox": [
338
+ 0.494,
339
+ 0.936,
340
+ 0.505,
341
+ 0.948
342
+ ],
343
+ "angle": 0,
344
+ "content": "2"
345
+ }
346
+ ],
347
+ [
348
+ {
349
+ "type": "header",
350
+ "bbox": [
351
+ 0.114,
352
+ 0.043,
353
+ 0.37,
354
+ 0.058
355
+ ],
356
+ "angle": 0,
357
+ "content": "Scalable Content Analysis with LLMs"
358
+ },
359
+ {
360
+ "type": "title",
361
+ "bbox": [
362
+ 0.114,
363
+ 0.092,
364
+ 0.593,
365
+ 0.107
366
+ ],
367
+ "angle": 0,
368
+ "content": "1.2 Case Study: W.E.B. Du Bois's Characterization in News Media"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.111,
374
+ 0.118,
375
+ 0.885,
376
+ 0.258
377
+ ],
378
+ "angle": 0,
379
+ "content": "In order to present a realistic challenge of using an LLM to do qualitative coding, we make a case study of our own work. We adapted a codebook written by the authors to understand how the scholar and activist W.E.B. Du Bois has been characterized in news media over time. The codebook is composed of 9 codes in 3 categories. Due to multiple layers of agency (who is doing what) and voice (who is saying what), the tasks are difficult even for human interpreters. Applying the codes is also complicated because it can be difficult to differentiate Du Bois's scholarship from his political activism, as Du Bois's theoretical contributions have profound implications for understanding race and the social-historical position of Black persons in the United States and beyond, making them powerful activist tools. We are particularly interested in understanding how different facets of Du Bois's activities contributed to his canonization in the public imagination as the preeminent figure for understanding Black political struggle. Table 1 gives the codes in brief. Complete examples of the original human and modified-for-GPT codebook are included in the appendix."
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.111,
385
+ 0.263,
386
+ 0.885,
387
+ 0.32
388
+ ],
389
+ "angle": 0,
390
+ "content": "The training and test data for our study were random samples of passages from New York Times articles (1970-2023) that mention W.E.B. Du Bois. 232 passages were automatically extracted as concurrent paragraphs containing \"Du Bois\". The average number of words was 94 (\\(\\sigma = 70\\)), and the average number of sentences was 3.75 (\\(\\sigma = 2.88\\)). To give a better sense of the size of our passages, this paragraph has 76 words across 4 sentences."
391
+ },
392
+ {
393
+ "type": "table_caption",
394
+ "bbox": [
395
+ 0.352,
396
+ 0.342,
397
+ 0.645,
398
+ 0.356
399
+ ],
400
+ "angle": 0,
401
+ "content": "Table 1: Categories and descriptions for 9 codes."
402
+ },
403
+ {
404
+ "type": "table",
405
+ "bbox": [
406
+ 0.115,
407
+ 0.357,
408
+ 0.884,
409
+ 0.63
410
+ ],
411
+ "angle": 0,
412
+ "content": "<table><tr><td colspan=\"2\">Characterization of Du Bois</td></tr><tr><td>Scholar</td><td>Describes Du Bois as a scholar or intellectual.</td></tr><tr><td>Activist</td><td>Refers to Du Bois&#x27;s political or social activism.</td></tr><tr><td colspan=\"2\">General Themes</td></tr><tr><td>Monumental Memorialization</td><td>Refers to an enduring cultural object named after Du Bois.</td></tr><tr><td>Mention of Scholarly Work</td><td>Mentions or quotes specific academic works by Du Bois.</td></tr><tr><td>Social/Political Advocacy</td><td>Mentions or implies social or political activism, advocacy, or critique.</td></tr><tr><td colspan=\"2\">Canonization Processes</td></tr><tr><td>Coalition Building</td><td>Refers to Du Bois&#x27;s activities with activist or academic organizations.</td></tr><tr><td>Out of the Mouth of Academics</td><td>Describes an academic organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Out of the Mouth of Activists</td><td>Describes an activist organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Collective Synecdoche</td><td>Mentions Du Bois alongside other figures in order to represent some facet of a culture, era, or ideology.</td></tr></table>"
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.113,
418
+ 0.694,
419
+ 0.21,
420
+ 0.71
421
+ ],
422
+ "angle": 0,
423
+ "content": "2 Results"
424
+ },
425
+ {
426
+ "type": "title",
427
+ "bbox": [
428
+ 0.113,
429
+ 0.726,
430
+ 0.395,
431
+ 0.741
432
+ ],
433
+ "angle": 0,
434
+ "content": "2.1 Adapting a Codebook for an LLM"
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.111,
440
+ 0.753,
441
+ 0.885,
442
+ 0.866
443
+ ],
444
+ "angle": 0,
445
+ "content": "Initially, we developed a codebook for human coders using standard methods. This process involved exploratory reading to define and refine codes. Codes were derived to probe particular substantive hypotheses, some of which preceded our exploratory reading, and others which resulted from it. We then applied these codes iteratively, adjusting them as needed until we achieved high intercoder reliability with a test set. We adapted these code descriptions for use with a large language model, evaluating the LLM's performance on a training set of text passages. Where we found ambiguities or deficiencies in the model's interpretation, we refined the code descriptions accordingly. This iterative process of definition, evaluation, and refinement follows Nelson's Computational Grounded Theory paradigm [4], the core of which is common to all qualitative code development processes whether or not coding is automated [26]."
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.111,
451
+ 0.87,
452
+ 0.885,
453
+ 0.913
454
+ ],
455
+ "angle": 0,
456
+ "content": "Our experience modifying the code descriptions yielded several key insights related in the following paragraphs. We encourage readers interested in a fuller account of this process to read Chew et al.'s study [10] describing their process of LLM-Assisted Content Analysis (LACA), which relates a process similar to our own in greater detail."
457
+ },
458
+ {
459
+ "type": "page_number",
460
+ "bbox": [
461
+ 0.494,
462
+ 0.936,
463
+ 0.505,
464
+ 0.948
465
+ ],
466
+ "angle": 0,
467
+ "content": "3"
468
+ }
469
+ ],
470
+ [
471
+ {
472
+ "type": "header",
473
+ "bbox": [
474
+ 0.115,
475
+ 0.043,
476
+ 0.37,
477
+ 0.058
478
+ ],
479
+ "angle": 0,
480
+ "content": "Scalable Content Analysis with LLMs"
481
+ },
482
+ {
483
+ "type": "image",
484
+ "bbox": [
485
+ 0.11,
486
+ 0.088,
487
+ 0.885,
488
+ 0.495
489
+ ],
490
+ "angle": 0,
491
+ "content": null
492
+ },
493
+ {
494
+ "type": "image_caption",
495
+ "bbox": [
496
+ 0.35,
497
+ 0.5,
498
+ 0.645,
499
+ 0.514
500
+ ],
501
+ "angle": 0,
502
+ "content": "Figure 1: The chain-of-thought prompt sequence."
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.111,
508
+ 0.545,
509
+ 0.884,
510
+ 0.672
511
+ ],
512
+ "angle": 0,
513
+ "content": "LLM-generated rationale are essential for evaluating performance. In adapting the codebook, we wanted to understand not just which codes the model struggled to interpret correctly, but what aspects of the code the model failed to capture. To achieve this, we structured our prompts to require GPT to justify its decision to apply or not apply each code. These rationale were invaluable. They often highlighted parts of the code description that were ambiguous or imprecisely defined, leading the model to misinterpret them. Whenever a rationale repeatedly pointed to such an issue, we revised the corresponding code. We then retested the passage to check that the code was correctly applied and the rationale aligned with our intended interpretation of the code. Sometimes a revision would not improve the interpretations for the passages in question; other times it would fix the interpretations for those passages, but would introduce new problems in passages which were previously coded correctly."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.111,
519
+ 0.677,
520
+ 0.884,
521
+ 0.707
522
+ ],
523
+ "angle": 0,
524
+ "content": "Figure 1 demonstrates an effective method of prompting GPT to provide rationale for its code selections. The initial instruction is given by the Justification section of the prompt, and solicited again in the Decision/ Formatting box."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.111,
530
+ 0.725,
531
+ 0.885,
532
+ 0.852
533
+ ],
534
+ "angle": 0,
535
+ "content": "LLMs require more precise descriptions than do human readers. Human coders do not rely solely on a written codebook. Their interpretation of the codes is enriched through the codebook development process, discussions with fellow coders, and supplementary oral instructions. An LLM lacks this interactive and historical context and must interpret codes entirely from written descriptions. Our work modifying the codebook for GPT revealed information that, while implicitly understood by the code developers, wasn't explicitly stated in the code descriptions. This process not only aided in refining the codebook for automated coding, but also improved our own understandings of the codes. This ultimately led to clearer definition of the codes, thereby enhancing future manual coding processes as well. Figure 2 demonstrates how the Monumental Memorialization and Social/Political Advocacy codes were redefined to improve GPT's comprehension."
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.111,
541
+ 0.856,
542
+ 0.884,
543
+ 0.913
544
+ ],
545
+ "angle": 0,
546
+ "content": "Often, we encountered cases where ambiguous phrasing was obvious to humans, but challenging for the LLM. Our codebook contains two codes that relate to Du Bois's reputation among academics and activists. These codes are meant to evaluate whether Du Bois appears in a news story because either an academic or activist mentioned him. Initially, we titled this code \"Academic Repute,\" which worked well for human coders. GPT, however, consistently misinterpreted"
547
+ },
548
+ {
549
+ "type": "page_number",
550
+ "bbox": [
551
+ 0.493,
552
+ 0.936,
553
+ 0.506,
554
+ 0.948
555
+ ],
556
+ "angle": 0,
557
+ "content": "4"
558
+ }
559
+ ],
560
+ [
561
+ {
562
+ "type": "header",
563
+ "bbox": [
564
+ 0.114,
565
+ 0.043,
566
+ 0.37,
567
+ 0.058
568
+ ],
569
+ "angle": 0,
570
+ "content": "Scalable Content Analysis with LLMs"
571
+ },
572
+ {
573
+ "type": "title",
574
+ "bbox": [
575
+ 0.138,
576
+ 0.089,
577
+ 0.269,
578
+ 0.104
579
+ ],
580
+ "angle": 0,
581
+ "content": "Original: Activist"
582
+ },
583
+ {
584
+ "type": "text",
585
+ "bbox": [
586
+ 0.135,
587
+ 0.105,
588
+ 0.884,
589
+ 0.163
590
+ ],
591
+ "angle": 0,
592
+ "content": "Apply when Du Bois is developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, promoting a candidate, organization or initiative. Also apply when Du A Bois is explicitly described as an activist or leader."
593
+ },
594
+ {
595
+ "type": "title",
596
+ "bbox": [
597
+ 0.138,
598
+ 0.175,
599
+ 0.282,
600
+ 0.188
601
+ ],
602
+ "angle": 0,
603
+ "content": "Redefined: Activist"
604
+ },
605
+ {
606
+ "type": "text",
607
+ "bbox": [
608
+ 0.114,
609
+ 0.19,
610
+ 0.884,
611
+ 0.277
612
+ ],
613
+ "angle": 0,
614
+ "content": "Apply this code when Du Bois is explicitly called an \"activist\" or \"leader\", or when his political or social activism is either explicitly noted or clearly implied through context. Examples include being mentioned in the context of leadership, activism, developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, or promoting a candidate, organization, or initiative."
615
+ },
616
+ {
617
+ "type": "title",
618
+ "bbox": [
619
+ 0.138,
620
+ 0.304,
621
+ 0.414,
622
+ 0.321
623
+ ],
624
+ "angle": 0,
625
+ "content": "Original: Social/Political Activism (c)"
626
+ },
627
+ {
628
+ "type": "text",
629
+ "bbox": [
630
+ 0.114,
631
+ 0.321,
632
+ 0.884,
633
+ 0.364
634
+ ],
635
+ "angle": 0,
636
+ "content": "DRefers to any form of social or political participation in promoting change in society. Can be used in conjunction with Du Bois' scholarly work, specifically E when Du Bois's ideas are used to frame Black political struggle. Can be used"
637
+ },
638
+ {
639
+ "type": "text",
640
+ "bbox": [
641
+ 0.114,
642
+ 0.364,
643
+ 0.884,
644
+ 0.392
645
+ ],
646
+ "angle": 0,
647
+ "content": "F to describe Du Bois' political work or those who invoke Du Bois in service of G their activism."
648
+ },
649
+ {
650
+ "type": "list",
651
+ "bbox": [
652
+ 0.114,
653
+ 0.321,
654
+ 0.884,
655
+ 0.392
656
+ ],
657
+ "angle": 0,
658
+ "content": null
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.138,
664
+ 0.403,
665
+ 0.437,
666
+ 0.42
667
+ ],
668
+ "angle": 0,
669
+ "content": "Redefined: Social/Political Advocacy ©"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.114,
675
+ 0.42,
676
+ 0.884,
677
+ 0.478
678
+ ],
679
+ "angle": 0,
680
+ "content": "D This code applies when a passage mentions or implies any form of social or political activism, advocacy, critique, or discourse, including discussions about current or historical social problems. This includes not only direct activism of Du Bois and others, but also the framing and challenging of social norms,"
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.114,
686
+ 0.478,
687
+ 0.884,
688
+ 0.551
689
+ ],
690
+ "angle": 0,
691
+ "content": "E historical narratives, and racial or cultural identities. Apply this code when Du Bois's work, persona, or ideas are invoked in discussions that critically engage with Black identity, positionality, or broader systemic circumstances of Black people. Adjacency to other activists, such as inclusion in a list, is insufficient; advocacy must be explicitly mentioned in the passage."
692
+ },
693
+ {
694
+ "type": "list",
695
+ "bbox": [
696
+ 0.114,
697
+ 0.42,
698
+ 0.884,
699
+ 0.551
700
+ ],
701
+ "angle": 0,
702
+ "content": null
703
+ },
704
+ {
705
+ "type": "image_caption",
706
+ "bbox": [
707
+ 0.113,
708
+ 0.573,
709
+ 0.884,
710
+ 0.601
711
+ ],
712
+ "angle": 0,
713
+ "content": "Figure 2: Two examples of prompt redefinition. Colored, alphabetically labeled blocks of text show alterations derived through iterative code refinement. Italics draw attention to direction to constrain interpretive scope to implicit or explicit information."
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.111,
719
+ 0.63,
720
+ 0.885,
721
+ 0.715
722
+ ],
723
+ "angle": 0,
724
+ "content": "this code as pertaining to Du Bois's esteem as an academic, rather than among or by academics (the meaning of \"among\" remains ambiguous even here). We tried numerous iterations this code without success. Nevertheless, altering the title of the code to the far more literal \"Out of the Mouth of Academics\" dramatically improved performance, even when paired with the original code description. In another case, the code titled \"Social/Political Activism\" was revised to \"Social/Political Advocacy\" (Figure 2, C) because GPT did not consider social critique to be a form of activism, even when it was specifically instructed to."
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.111,
730
+ 0.72,
731
+ 0.884,
732
+ 0.777
733
+ ],
734
+ "angle": 0,
735
+ "content": "We found that words indicating how much the model should draw on context or its own outside knowledge had large impacts on the model's outputs, often to the desired effect. In particular, instructing the model to restrict itself to \"explicit\" meanings, or to draw on \"implicit\" meanings, often helped the model with part of a code description it had struggled with. Figure 2 B and D demonstrate the addition of such verbiage to control scope."
736
+ },
737
+ {
738
+ "type": "text",
739
+ "bbox": [
740
+ 0.111,
741
+ 0.782,
742
+ 0.885,
743
+ 0.853
744
+ ],
745
+ "angle": 0,
746
+ "content": "Both mandatory (do) and prohibitory (do not) phrasing were observed by the model, though mandatory phrasing seemed more successful, a finding reported by other researchers [29]. The ordering of directives also impacted how likely the model was to follow them. We found that moving a phrase that was ignored in the coding rationale toward the front of the definition made the model more likely to follow its specifications, as in Figure 2 A. When a very specific problem was observed repeatedly, it was sometimes necessary to add a directive to correct it, as in Figure 2 H."
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.111,
752
+ 0.87,
753
+ 0.884,
754
+ 0.913
755
+ ],
756
+ "angle": 0,
757
+ "content": "Prompting for machine-readable output. To fully automate the coding process, model output must be reliably readable by a computer. The LLM generates text, which must be interpreted by another script into a data structure, such as a table, for further analysis. Instructing exactly how to format the output produced machine-readable results with"
758
+ },
759
+ {
760
+ "type": "page_number",
761
+ "bbox": [
762
+ 0.494,
763
+ 0.936,
764
+ 0.505,
765
+ 0.948
766
+ ],
767
+ "angle": 0,
768
+ "content": "5"
769
+ }
770
+ ],
771
+ [
772
+ {
773
+ "type": "header",
774
+ "bbox": [
775
+ 0.114,
776
+ 0.043,
777
+ 0.371,
778
+ 0.058
779
+ ],
780
+ "angle": 0,
781
+ "content": "Scalable Content Analysis with LLMs"
782
+ },
783
+ {
784
+ "type": "text",
785
+ "bbox": [
786
+ 0.111,
787
+ 0.092,
788
+ 0.885,
789
+ 0.162
790
+ ],
791
+ "angle": 0,
792
+ "content": "GPT-4 and GPT-3.5. Critically, this involves specifying a tag that the interpreting script locates, after which follows a reliably formatted list of codes. The Decision/ Formatting component Figure 1 illustrates how to constrain model output and produce consistent results across queries. Additionally, because GPT tends to be excessively verbose and summarize its output, particularly at higher temperatures, we informed the model that we do not want any output to follow the code list."
793
+ },
794
+ {
795
+ "type": "title",
796
+ "bbox": [
797
+ 0.113,
798
+ 0.178,
799
+ 0.613,
800
+ 0.194
801
+ ],
802
+ "angle": 0,
803
+ "content": "2.2 Selecting a Model and Writing Prompts for Optimal Performance"
804
+ },
805
+ {
806
+ "type": "text",
807
+ "bbox": [
808
+ 0.111,
809
+ 0.204,
810
+ 0.885,
811
+ 0.288
812
+ ],
813
+ "angle": 0,
814
+ "content": "Once the code descriptions have been revised for LLM text categorization, numerous other decisions remain about how to prompt a model to execute the content analysis. We present these as a separate step for the sake of clarity, but in reality, we developed our approach in iteratively and tandem with revising the code descriptions. We hope future methodologists and empiricists will benefit from what we learned during this process, and that less exploration of these components will be necessary so practitioners can focus on application or exploration of calibrations not explored here. We summarize all our recommendations for qualitative coding with an LLM in Table 3."
815
+ },
816
+ {
817
+ "type": "text",
818
+ "bbox": [
819
+ 0.111,
820
+ 0.294,
821
+ 0.885,
822
+ 0.392
823
+ ],
824
+ "angle": 0,
825
+ "content": "There is large and growing body of academic and nonacademic literature on prompt engineering: constructing user-defined input to elicit the best model output. In fact, the codebook adaptation in the previous section was in large part an exercise in prompt engineering. However, in this second section, prompt engineering refers more to the broader context of task description than the code definitions. In this section we report how different prompts influence the quality of machine categorization. Additionally we compare performance when the LLM is tasked with assigning each code independently to when the model is given the full codebook and assigned with coding all 9 codes as a single task. We refer to these as the \"Per Code\" and \"Full Codebook\" approach respectively."
826
+ },
827
+ {
828
+ "type": "text",
829
+ "bbox": [
830
+ 0.111,
831
+ 0.397,
832
+ 0.884,
833
+ 0.509
834
+ ],
835
+ "angle": 0,
836
+ "content": "Studies have shown that LLM decision-making improves when the model is prompted to account for its decisions [30, 31]. This is generally known as chain-of-thought (CoT) prompting or reasoning, and refers to breaking down tasks into specific components, one or more of which involve planning for future steps or reflection on previous ones. Our prompts, which can be viewed in full in the appendix, apply chain-of-thought prompting by including 1) a role assignment step, informing the machine of its purpose, 2) a task description step, specifying the code definition, 3) a justification step, instructing the model to provide a rationale for its decision, and 4) a decision step, wherein the model delivers its ultimate analysis in a consistent, machine-readable format. An example of the chain-of-thought prompt sequence is given by Figure 1."
837
+ },
838
+ {
839
+ "type": "text",
840
+ "bbox": [
841
+ 0.111,
842
+ 0.514,
843
+ 0.884,
844
+ 0.626
845
+ ],
846
+ "angle": 0,
847
+ "content": "We use zero-shot prompts throughout this study. Zero-shot refers to providing the model only the task description, without giving examples of correctly executed responses. Xiao et al. found few-shot prompting improves coding and performance on other tasks [9], whereas Chew et al. largely employed zero-shot prompts [10]. Our case study involves evaluating paragraph-long passages rather than single clauses. We found that information in the examples was drawn upon by the LLM and interfered with its coding decisions. We also found that in Full Codebook prompts, giving examples greatly expanded the prompt, negatively impacting results. We suggest that when content is more literary or historical, zero-shot prompts are probably preferred, but that most coding tasks will benefit from few-shot prompting as demonstrated by the results of many other studies across domains."
848
+ },
849
+ {
850
+ "type": "text",
851
+ "bbox": [
852
+ 0.111,
853
+ 0.631,
854
+ 0.885,
855
+ 0.716
856
+ ],
857
+ "angle": 0,
858
+ "content": "Performance comparisons are relative to the human-derived gold standard hidden from the LLM at all stages of development. We used the default settings for the GPT API where temperature is set to 0 and nucleus sampling (top_p) is set to 1. We specified the task description as a \"system prompt\", and provide each passage as a \"user prompt\". A system prompt gives the LLM its purpose, clearly specifying the task it is meant to address, whereas a user prompt provides the input to which the model responds by generating output. We did not investigate whether intercoder agreement suffers with the default system prompt, while combining the task description and passage as a user prompt."
859
+ },
860
+ {
861
+ "type": "text",
862
+ "bbox": [
863
+ 0.111,
864
+ 0.73,
865
+ 0.885,
866
+ 0.814
867
+ ],
868
+ "angle": 0,
869
+ "content": "GPT-4 greatly outperforms GPT-3.5. We found that GPT-4 approaches human performance for 3 codes: Activist: \\(\\kappa = 0.81\\); Monumental Memorialization: \\(\\kappa = 1.00\\); Collective Synecdoche \\(\\kappa = 0.79\\). GPT-4 prompted for rationale provides considerably higher quality code assignments than GPT-3.5, except in the case of the Out of the Mouth of Activists code, which no configuration handled well. It is especially notable that GPT-4 and GPT-3.5 differed in their most accurately interpreted codes. In the 3 tasks GPT-4 executed best, GPT-3.5's performance was slightly below its own average, mean \\((\\kappa_{all}) = 0.34\\) vs. mean \\((\\kappa) = 0.32\\)."
870
+ },
871
+ {
872
+ "type": "text",
873
+ "bbox": [
874
+ 0.111,
875
+ 0.829,
876
+ 0.884,
877
+ 0.913
878
+ ],
879
+ "angle": 0,
880
+ "content": "Coding fidelity improves when codes are presented as individual tasks. We adapted our codebook by presenting the entire codebook to GPT along with task instructions. However, we found in testing that performance improved when GPT was given each task independently. This \"per code\" approach was taken by one recent study exploring content analysis with non-mutually exclusive codes (permitting multiply coded passages) [10], but not another, which tested only two codes [9]. Table 2 compares the GPT-4 performance when presented individual tasks for each code (\"Per Code\") and when presented all tasks in a single prompt (\"Full Codebook\"). We found that for the 3 human-equivalent"
881
+ },
882
+ {
883
+ "type": "page_number",
884
+ "bbox": [
885
+ 0.493,
886
+ 0.937,
887
+ 0.506,
888
+ 0.948
889
+ ],
890
+ "angle": 0,
891
+ "content": "6"
892
+ }
893
+ ],
894
+ [
895
+ {
896
+ "type": "header",
897
+ "bbox": [
898
+ 0.115,
899
+ 0.043,
900
+ 0.37,
901
+ 0.058
902
+ ],
903
+ "angle": 0,
904
+ "content": "Scalable Content Analysis with LLMs"
905
+ },
906
+ {
907
+ "type": "table_caption",
908
+ "bbox": [
909
+ 0.114,
910
+ 0.097,
911
+ 0.885,
912
+ 0.137
913
+ ],
914
+ "angle": 0,
915
+ "content": "Table 2: Intercoder reliability (Cohen's \\( \\kappa \\) ) for all codes on 111 gold standard passages. Best overall performance is shown in bold. Italics indicate the highest intercoder reliability between pairs with and without prompting for rationale (CoT vs. No CoT); if the pair are equivalent neither is italicized. Two values are considered equivalent if their difference does not exceed 0.02."
916
+ },
917
+ {
918
+ "type": "table",
919
+ "bbox": [
920
+ 0.133,
921
+ 0.137,
922
+ 0.861,
923
+ 0.422
924
+ ],
925
+ "angle": 0,
926
+ "content": "<table><tr><td rowspan=\"3\">Code</td><td rowspan=\"3\">Count</td><td colspan=\"4\">GPT-4</td><td colspan=\"2\">GPT-3.5</td></tr><tr><td colspan=\"2\">Per Code</td><td colspan=\"2\">Full Codebook</td><td colspan=\"2\">Per Code</td></tr><tr><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td></tr><tr><td>Scholar</td><td>27</td><td>0.61</td><td>0.52</td><td>0.59</td><td>0.42</td><td>0.29</td><td>0.21</td></tr><tr><td>Activist</td><td>23</td><td>0.81</td><td>0.65</td><td>0.67</td><td>0.62</td><td>0.39</td><td>0.32</td></tr><tr><td>Monumental Memorialization</td><td>13</td><td>1.00</td><td>0.91</td><td>0.75</td><td>0.48</td><td>0.29</td><td>0.31</td></tr><tr><td>Mention of Scholarly Work</td><td>24</td><td>0.71</td><td>0.69</td><td>0.52</td><td>0.44</td><td>0.33</td><td>0.39</td></tr><tr><td>Social/Political Advocacy</td><td>51</td><td>0.64</td><td>0.60</td><td>0.60</td><td>0.60</td><td>0.55</td><td>0.51</td></tr><tr><td>Coalition Building</td><td>9</td><td>0.60</td><td>0.44</td><td>0.43</td><td>0.13</td><td>0.33</td><td>0.17</td></tr><tr><td>Out of the Mouth of Academics</td><td>30</td><td>0.63</td><td>0.65</td><td>0.65</td><td>0.62</td><td>0.37</td><td>0.33</td></tr><tr><td>Out of the Mouth of Activists</td><td>11</td><td>0.30</td><td>0.09</td><td>0.34</td><td>0.18</td><td>0.21</td><td>0.09</td></tr><tr><td>Collective Synecdoche</td><td>26</td><td>0.79</td><td>0.78</td><td>0.81</td><td>0.71</td><td>0.27</td><td>0.27</td></tr><tr><td>Mean</td><td>24</td><td>0.68</td><td>0.59</td><td>0.60</td><td>0.46</td><td>0.34</td><td>0.29</td></tr></table>"
927
+ },
928
+ {
929
+ "type": "text",
930
+ "bbox": [
931
+ 0.111,
932
+ 0.445,
933
+ 0.884,
934
+ 0.504
935
+ ],
936
+ "angle": 0,
937
+ "content": "tasks (Activist, Monumental Memorialization, and Collective Synecdoche) the Per Code performance far exceeded the Full Codebook for 2 tasks, and was comparable for 1. For 2 other tasks, Mention of Scholarly Work and Coalition Building, we found that the Per Code configuration produced considerably higher agreement, whereas Full Codebook performed comparably to the Per Code in the remaining 4 tasks."
938
+ },
939
+ {
940
+ "type": "text",
941
+ "bbox": [
942
+ 0.111,
943
+ 0.515,
944
+ 0.884,
945
+ 0.628
946
+ ],
947
+ "angle": 0,
948
+ "content": "Coding fidelity improves when the model is prompted to justify its coding decisions. Consistent with other experiments with chain-of-thought (CoT) reasoning in LLMs, we found that coding agreement benefited strongly from prompting the model to explain itself [32]. Table 2 shows the effect of prompting for rationale on three pairs of conditions: Per Code GPT-4, Full Codebook GPT-4, and Per Code GPT-3.5. We found that across all codes and conditions, with one exception, CoT prompting produces higher or equivalent intercoder reliability with the gold standard. Using GPT-4, average Per Code agreement improved from 0.59 to 0.68, and average Full Codebook agreement improved from 0.46 to 0.60. Furthermore, a majority of pairs showed substantial improvement when the codes were assigned after providing reasoning for coding decisions."
949
+ },
950
+ {
951
+ "type": "title",
952
+ "bbox": [
953
+ 0.113,
954
+ 0.646,
955
+ 0.236,
956
+ 0.661
957
+ ],
958
+ "angle": 0,
959
+ "content": "3 Discussion"
960
+ },
961
+ {
962
+ "type": "text",
963
+ "bbox": [
964
+ 0.111,
965
+ 0.676,
966
+ 0.885,
967
+ 0.871
968
+ ],
969
+ "angle": 0,
970
+ "content": "Determining appropriate domains for LLM-assisted qualitative coding. Previous methods of automated text categorization, both supervised and unsupervised, rarely met the standards of traditional social scientists and humanists, and were instead generally employed by data scientists. Capturing meaning, particularly complex meaning, through machine learning has largely been an elusive goal [3]. Despite our own former skepticism, we predict that LLMs will be capable of applying most qualitative codebooks within the year. However, our results show that even within the scope of a single codebook, interpretation quality varies. Thus, different disciplines and domains should expect model success and the ease of transitioning a codebook to vary considerably. We suspect that more humanistic and \"softer\" scientific approaches will (continue to) be more resistant to machine interpretation than problems posed by scholars who identify with \"harder\" sciences, to say nothing of their ability to convince their peers of its validity. We do not oppose developing evaluation benchmarks for qualitative coding to assess which models are adept at what variety of task, but neither do we advocate it; meaning is manifold and emergent, and much of its beauty derives from its resistance to reduction and definition. Instead, we suggest those who wish to employ an LLM to perform content analysis survey similar attempts and simply experiment on their own. The process of discovering triumphs, workarounds, and limitations of working with these models was not only fascinating, but tremendously fun."
971
+ },
972
+ {
973
+ "type": "text",
974
+ "bbox": [
975
+ 0.111,
976
+ 0.884,
977
+ 0.884,
978
+ 0.914
979
+ ],
980
+ "angle": 0,
981
+ "content": "Practical aspects of transitioning to content analysis with LLMs. While artificial intelligence potentially opens up much larger datasets to qualitative scholars, there is still a considerable technical barrier to automating content"
982
+ },
983
+ {
984
+ "type": "page_number",
985
+ "bbox": [
986
+ 0.493,
987
+ 0.936,
988
+ 0.506,
989
+ 0.948
990
+ ],
991
+ "angle": 0,
992
+ "content": "7"
993
+ }
994
+ ],
995
+ [
996
+ {
997
+ "type": "header",
998
+ "bbox": [
999
+ 0.115,
1000
+ 0.043,
1001
+ 0.37,
1002
+ 0.058
1003
+ ],
1004
+ "angle": 0,
1005
+ "content": "Scalable Content Analysis with LLMs"
1006
+ },
1007
+ {
1008
+ "type": "table_caption",
1009
+ "bbox": [
1010
+ 0.308,
1011
+ 0.098,
1012
+ 0.688,
1013
+ 0.112
1014
+ ],
1015
+ "angle": 0,
1016
+ "content": "Table 3: Principles of prompting an LLM for qualitative coding."
1017
+ },
1018
+ {
1019
+ "type": "table",
1020
+ "bbox": [
1021
+ 0.115,
1022
+ 0.112,
1023
+ 0.884,
1024
+ 0.449
1025
+ ],
1026
+ "angle": 0,
1027
+ "content": "<table><tr><td colspan=\"2\">Task Instructions</td></tr><tr><td>Prompt for Rationale</td><td>Model fidelity improves when instructed to justify its coding decisions.</td></tr><tr><td>One Task Per Code</td><td>Model fidelity improves when given each code as a separate task.</td></tr><tr><td>Brevity</td><td>Shorter task descriptions are more likely to be faithfully executed by the model.</td></tr><tr><td>Structured Output</td><td>Instruct the model to format its output to ensure uniform responses.</td></tr><tr><td colspan=\"2\">Code Definitions</td></tr><tr><td>Word Choice</td><td>A single high-content word can be changed to align with the LLM&#x27;s built-in ontology.</td></tr><tr><td>Clause Order</td><td>Clauses are more likely to be observed when introduced earlier in the code description.</td></tr><tr><td>Mandates/Prohibitions</td><td>Both can be effective, but it is easier to get the model to “do” than “do not”.</td></tr><tr><td>Code Titles</td><td>Altering the code title can have a large effect even without altering the definition.</td></tr><tr><td>Interpretation Scope</td><td>Use words like “implicit” and “explicit” when interpretation is too limited or expansive.</td></tr><tr><td colspan=\"2\">Chain-of-Thought Prompt Sequence</td></tr><tr><td>1. Role Assignment</td><td>Supply the model its purpose, e.g., &quot;You will be applying category labels to passages.&quot;</td></tr><tr><td>2. Code Definition</td><td>Provide the code title(s) and description(s).</td></tr><tr><td>3. Justification</td><td>Request that the model provide evidence of its reasoning.</td></tr><tr><td>4. Decision</td><td>Instruct the model to list the codes that apply to the passage in a consistent format.</td></tr></table>"
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "bbox": [
1032
+ 0.111,
1033
+ 0.508,
1034
+ 0.884,
1035
+ 0.621
1036
+ ],
1037
+ "angle": 0,
1038
+ "content": "analysis. Development of an LLM adapted codebook is feasible for anyone regardless of technical skill by interacting with an LLM through chat-like Web platforms provided by proprietary model developers. However, systematically testing prompts or applying a completed codebook to the full dataset requires moderate skill in writing scripts in a language such as Python. Rather than suggest that all scholars become programmers, we encourage researchers to develop partnerships with students or community members seeking programming or research experience as a form of project-based education. Conversely, we suggest that data scientists pursue partnerships with traditional social scientists and humanists, who are often better positioned to develop coding schema to flush out complex meanings embedded in text, which are now more tractable to machine learning."
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "bbox": [
1043
+ 0.111,
1044
+ 0.636,
1045
+ 0.885,
1046
+ 0.832
1047
+ ],
1048
+ "angle": 0,
1049
+ "content": "Handling passages where model interpretation is poor. Overwhelmingly, GPT-4's interpretations were accurate and human-like. However, we found repeatedly that GPT-4, like a human reader, struggled with edge cases, especially where implicit information was required to make a judgment. We are encouraged by this finding, and argue that with automated analysis, fidelity is less important than it is with humans. Because statistical power increases with the number of observations, noise is more tolerable in machine-applied codes, as automated coding potentially increases sample size by orders of magnitude. Notably, this assumes that error is restricted to edge cases and is not otherwise systematically biased. We also advise against automated coding where datasets are small, as in interviews, where it is likely as efficient to code entirely by hand. As models improve and can provide confidence estimates for their statements [33-35], ML content analysis workflows should include manual review of passages with uncertain code assignments. Anecdotally, we found that GPT-4 could intelligently reflect on its responses when prompted to do so. When presented the output of another model instance, GPT-4, acting as an untrained \"critic\" model [36], was often able to identify when it had encountered an edge case without prompting, as well as recognize and revise obvious mistakes. Our experiences suggest that a human-in-the-loop tag-for-manual-review workflow or a two-step automated reflect-and-revise workflow may already be feasible with GPT-4 and similar models."
1050
+ },
1051
+ {
1052
+ "type": "title",
1053
+ "bbox": [
1054
+ 0.113,
1055
+ 0.852,
1056
+ 0.241,
1057
+ 0.868
1058
+ ],
1059
+ "angle": 0,
1060
+ "content": "4 Conclusion"
1061
+ },
1062
+ {
1063
+ "type": "text",
1064
+ "bbox": [
1065
+ 0.111,
1066
+ 0.884,
1067
+ 0.884,
1068
+ 0.914
1069
+ ],
1070
+ "angle": 0,
1071
+ "content": "Our results using state-of-the-art models lead us to recommend that scholars who do much qualitative coding consider automated coding with LLMs a potentially viable option today. We especially encourage skeptics to probe these tools'"
1072
+ },
1073
+ {
1074
+ "type": "page_number",
1075
+ "bbox": [
1076
+ 0.494,
1077
+ 0.936,
1078
+ 0.505,
1079
+ 0.948
1080
+ ],
1081
+ "angle": 0,
1082
+ "content": "8"
1083
+ }
1084
+ ],
1085
+ [
1086
+ {
1087
+ "type": "header",
1088
+ "bbox": [
1089
+ 0.114,
1090
+ 0.043,
1091
+ 0.37,
1092
+ 0.058
1093
+ ],
1094
+ "angle": 0,
1095
+ "content": "Scalable Content Analysis with LLMs"
1096
+ },
1097
+ {
1098
+ "type": "text",
1099
+ "bbox": [
1100
+ 0.111,
1101
+ 0.092,
1102
+ 0.888,
1103
+ 0.19
1104
+ ],
1105
+ "angle": 0,
1106
+ "content": "capacities, as it is useful to know their limitations. Over the next year, models such as those incorporating memory [37], (multi-)agential models that dialogue and revise prior to rendering output [38, 39], and architectures that can handle larger inputs [40], will almost certainly greatly improve upon GPT-4's current abilities. When those models are made available, researchers who have already experimented with LLMs will be best positioned to make use of the new tools. The efficiency of automation is compelling, but we are most enthusiastic about the ability to probe much larger datasets than ever before, potentially illuminating patterns too rare or too fuzzy to detect with a sample numbering in the tens or hundreds rather than thousands or beyond."
1107
+ },
1108
+ {
1109
+ "type": "title",
1110
+ "bbox": [
1111
+ 0.114,
1112
+ 0.21,
1113
+ 0.273,
1114
+ 0.227
1115
+ ],
1116
+ "angle": 0,
1117
+ "content": "Acknowledgments"
1118
+ },
1119
+ {
1120
+ "type": "text",
1121
+ "bbox": [
1122
+ 0.111,
1123
+ 0.242,
1124
+ 0.885,
1125
+ 0.286
1126
+ ],
1127
+ "angle": 0,
1128
+ "content": "The author thanks Harry Yan, Pat Wall, Patrick Kaminski, Adam Fisch, Alicia Chen, and Francisco Muñoz for their helpful comments toward improving this manuscript. I am especially grateful to Tania Ravaei for collaborating on codebook development."
1129
+ },
1130
+ {
1131
+ "type": "title",
1132
+ "bbox": [
1133
+ 0.115,
1134
+ 0.306,
1135
+ 0.212,
1136
+ 0.322
1137
+ ],
1138
+ "angle": 0,
1139
+ "content": "References"
1140
+ },
1141
+ {
1142
+ "type": "ref_text",
1143
+ "bbox": [
1144
+ 0.12,
1145
+ 0.33,
1146
+ 0.832,
1147
+ 0.346
1148
+ ],
1149
+ "angle": 0,
1150
+ "content": "[1] Anselm L Strauss. 1967. The discovery of grounded theory: Strategies for qualitative research. Aldine."
1151
+ },
1152
+ {
1153
+ "type": "ref_text",
1154
+ "bbox": [
1155
+ 0.123,
1156
+ 0.35,
1157
+ 0.751,
1158
+ 0.366
1159
+ ],
1160
+ "angle": 0,
1161
+ "content": "[2] Johnny Saldana. 2009. The coding manual for qualitative researchers. SAGE Publications."
1162
+ },
1163
+ {
1164
+ "type": "ref_text",
1165
+ "bbox": [
1166
+ 0.123,
1167
+ 0.371,
1168
+ 0.848,
1169
+ 0.387
1170
+ ],
1171
+ "angle": 0,
1172
+ "content": "[3] Momin M Malik. 2020. A hierarchy of limitations in machine learning. arXiv preprint arXiv:2002.05193."
1173
+ },
1174
+ {
1175
+ "type": "ref_text",
1176
+ "bbox": [
1177
+ 0.123,
1178
+ 0.391,
1179
+ 0.885,
1180
+ 0.42
1181
+ ],
1182
+ "angle": 0,
1183
+ "content": "[4] Laura K Nelson. 2017. Computational Grounded Theory: A methodological framework. Sociological Methods & Research, 49(1):3-42."
1184
+ },
1185
+ {
1186
+ "type": "ref_text",
1187
+ "bbox": [
1188
+ 0.123,
1189
+ 0.424,
1190
+ 0.885,
1191
+ 0.455
1192
+ ],
1193
+ "angle": 0,
1194
+ "content": "[5] Ankita Dhar, Himadri Mukherjee, Niladri Sekhar Dash, and Kaushik Roy. 2021. Text categorization: past and present. Artificial Intelligence Review, 54:3007-3054."
1195
+ },
1196
+ {
1197
+ "type": "ref_text",
1198
+ "bbox": [
1199
+ 0.123,
1200
+ 0.459,
1201
+ 0.885,
1202
+ 0.502
1203
+ ],
1204
+ "angle": 0,
1205
+ "content": "[6] Sébastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712."
1206
+ },
1207
+ {
1208
+ "type": "ref_text",
1209
+ "bbox": [
1210
+ 0.123,
1211
+ 0.507,
1212
+ 0.885,
1213
+ 0.55
1214
+ ],
1215
+ "angle": 0,
1216
+ "content": "[7] Bernardino Romero-Paredes, Mohammadamin Barekatain, Alexander Novikov, Matej Balog, M Pawan Kumar, Emilien Dupont, Francisco JR Ruiz, Jordan S Ellenberg, Pengming Wang, Omar Fawzi, et al. 2023. Mathematical discoveries from program search with large language models. Nature, 625:1-3."
1217
+ },
1218
+ {
1219
+ "type": "ref_text",
1220
+ "bbox": [
1221
+ 0.123,
1222
+ 0.555,
1223
+ 0.885,
1224
+ 0.598
1225
+ ],
1226
+ "angle": 0,
1227
+ "content": "[8] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805."
1228
+ },
1229
+ {
1230
+ "type": "ref_text",
1231
+ "bbox": [
1232
+ 0.123,
1233
+ 0.603,
1234
+ 0.885,
1235
+ 0.647
1236
+ ],
1237
+ "angle": 0,
1238
+ "content": "[9] Ziang Xiao, Xingdi Yuan, Q Vera Liao, Rania Abdelghani, and Pierre-Yves Oudeyer. 2023. Supporting qualitative analysis with large language models: Combining codebook with GPT-3 for deductive coding. In Companion Proceedings of the 28th International Conference on Intelligent User Interfaces."
1239
+ },
1240
+ {
1241
+ "type": "ref_text",
1242
+ "bbox": [
1243
+ 0.115,
1244
+ 0.651,
1245
+ 0.885,
1246
+ 0.681
1247
+ ],
1248
+ "angle": 0,
1249
+ "content": "[10] Robert Chew, John Bollenbacher, Michael Wenger, Jessica Speer, and Annice Kim. 2023. LLM-assisted content analysis: Using large language models to support deductive coding. arXiv preprint arXiv:2306.14924."
1250
+ },
1251
+ {
1252
+ "type": "ref_text",
1253
+ "bbox": [
1254
+ 0.114,
1255
+ 0.685,
1256
+ 0.885,
1257
+ 0.715
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "[11] Shih-Chieh Dai, Aiping Xiong, and Lun-Wei Ku. 2023. LLM-in-the-loop: Leveraging large language model for thematic analysis. arXiv preprint arXiv:2310.15100."
1261
+ },
1262
+ {
1263
+ "type": "ref_text",
1264
+ "bbox": [
1265
+ 0.114,
1266
+ 0.719,
1267
+ 0.885,
1268
+ 0.763
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": "[12] Robert H Tai, Lillian R Bentley, Xin Xia, Jason M Sitt, Sarah C Fankhauser, Ana M Chicas-Mosier, and Barnas G Monteith. 2023. An examination of the use of large language models to aid analysis of textual data. bioRxiv preprint bioRxiv:2023.07.17.549361."
1272
+ },
1273
+ {
1274
+ "type": "ref_text",
1275
+ "bbox": [
1276
+ 0.114,
1277
+ 0.767,
1278
+ 0.885,
1279
+ 0.797
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "[13] Ammar Ismael Kadhim. 2019. Survey on supervised machine learning techniques for automatic text classification. Artificial Intelligence Review, 52(1):273-292."
1283
+ },
1284
+ {
1285
+ "type": "ref_text",
1286
+ "bbox": [
1287
+ 0.114,
1288
+ 0.801,
1289
+ 0.885,
1290
+ 0.831
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "[14] David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3(1):993-1022."
1294
+ },
1295
+ {
1296
+ "type": "ref_text",
1297
+ "bbox": [
1298
+ 0.114,
1299
+ 0.835,
1300
+ 0.885,
1301
+ 0.878
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "[15] Hamed Jelodar, Yongli Wang, Chi Yuan, Xia Feng, Xiahui Jiang, Yanchao Li, and Liang Zhao. 2019. Latent Dirichlet allocation (LDA) and topic modeling: Models, applications, a survey. Multimedia Tools and Applications, 78:15169-15211."
1305
+ },
1306
+ {
1307
+ "type": "ref_text",
1308
+ "bbox": [
1309
+ 0.114,
1310
+ 0.883,
1311
+ 0.885,
1312
+ 0.913
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "[16] Maarten Grootendorst. 2022. BERTopic: Neural topic modeling with a class-based TF-IDF procedure. arXiv preprint arXiv:2203.05794."
1316
+ },
1317
+ {
1318
+ "type": "list",
1319
+ "bbox": [
1320
+ 0.114,
1321
+ 0.33,
1322
+ 0.885,
1323
+ 0.913
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": null
1327
+ },
1328
+ {
1329
+ "type": "page_number",
1330
+ "bbox": [
1331
+ 0.493,
1332
+ 0.936,
1333
+ 0.506,
1334
+ 0.948
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "9"
1338
+ }
1339
+ ],
1340
+ [
1341
+ {
1342
+ "type": "header",
1343
+ "bbox": [
1344
+ 0.115,
1345
+ 0.043,
1346
+ 0.37,
1347
+ 0.058
1348
+ ],
1349
+ "angle": 0,
1350
+ "content": "Scalable Content Analysis with LLMs"
1351
+ },
1352
+ {
1353
+ "type": "ref_text",
1354
+ "bbox": [
1355
+ 0.115,
1356
+ 0.091,
1357
+ 0.885,
1358
+ 0.135
1359
+ ],
1360
+ "angle": 0,
1361
+ "content": "[17] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems, 33:1877-1901."
1362
+ },
1363
+ {
1364
+ "type": "ref_text",
1365
+ "bbox": [
1366
+ 0.115,
1367
+ 0.138,
1368
+ 0.885,
1369
+ 0.182
1370
+ ],
1371
+ "angle": 0,
1372
+ "content": "[18] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. LLaMa: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971."
1373
+ },
1374
+ {
1375
+ "type": "ref_text",
1376
+ "bbox": [
1377
+ 0.116,
1378
+ 0.185,
1379
+ 0.885,
1380
+ 0.214
1381
+ ],
1382
+ "angle": 0,
1383
+ "content": "[19] MistralAI. 2023. Mixtral of experts: A high quality sparse mixture-of-experts. https://mistral.ai/news/mixtral-of-experts. Accessed: 2024-01-13."
1384
+ },
1385
+ {
1386
+ "type": "ref_text",
1387
+ "bbox": [
1388
+ 0.116,
1389
+ 0.217,
1390
+ 0.828,
1391
+ 0.233
1392
+ ],
1393
+ "angle": 0,
1394
+ "content": "[20] Anthropic. 2023. Claude 2. https://www.anthropic.com/index/claude-2. Accessed: 2024-01-18."
1395
+ },
1396
+ {
1397
+ "type": "ref_text",
1398
+ "bbox": [
1399
+ 0.116,
1400
+ 0.236,
1401
+ 0.885,
1402
+ 0.265
1403
+ ],
1404
+ "angle": 0,
1405
+ "content": "[21] Jan Nowakowski and Jan Keller. 2024. AI-powered patching: The future of automated vulnerability fixes. Google Security Engineering Technical Report."
1406
+ },
1407
+ {
1408
+ "type": "ref_text",
1409
+ "bbox": [
1410
+ 0.116,
1411
+ 0.268,
1412
+ 0.885,
1413
+ 0.298
1414
+ ],
1415
+ "angle": 0,
1416
+ "content": "[22] Carlos Gómez-Rodríguez and Paul Williams. 2023. A confederacy of models: A comprehensive evaluation of LLMs on creative writing. arXiv preprint arXiv:2310.08433."
1417
+ },
1418
+ {
1419
+ "type": "ref_text",
1420
+ "bbox": [
1421
+ 0.116,
1422
+ 0.301,
1423
+ 0.794,
1424
+ 0.317
1425
+ ],
1426
+ "angle": 0,
1427
+ "content": "[23] Manon Bischoff. 2024. AI matches the abilities of the best Math Olympians. Scientific American."
1428
+ },
1429
+ {
1430
+ "type": "ref_text",
1431
+ "bbox": [
1432
+ 0.116,
1433
+ 0.32,
1434
+ 0.885,
1435
+ 0.35
1436
+ ],
1437
+ "angle": 0,
1438
+ "content": "[24] Werner Vach and Oke Gerke. 2023. Gwet's AC1 is not a substitute for Cohen's kappa – A comparison of basic properties. MethodsX, 10:102212."
1439
+ },
1440
+ {
1441
+ "type": "ref_text",
1442
+ "bbox": [
1443
+ 0.116,
1444
+ 0.352,
1445
+ 0.885,
1446
+ 0.383
1447
+ ],
1448
+ "angle": 0,
1449
+ "content": "[25] Etienne Ollion, Rubing Shen, Ana Macanovic, and Arnault Chatelain. 2023. ChatGPT for text annotation? Mind the hype! SocArXiv preprint doi:10.31235/osf.io/x58kn."
1450
+ },
1451
+ {
1452
+ "type": "ref_text",
1453
+ "bbox": [
1454
+ 0.116,
1455
+ 0.385,
1456
+ 0.768,
1457
+ 0.401
1458
+ ],
1459
+ "angle": 0,
1460
+ "content": "[26] Anselm Strauss and Juliet M Corbin. 1997. Grounded theory in practice. SAGE Publications."
1461
+ },
1462
+ {
1463
+ "type": "ref_text",
1464
+ "bbox": [
1465
+ 0.116,
1466
+ 0.404,
1467
+ 0.743,
1468
+ 0.42
1469
+ ],
1470
+ "angle": 0,
1471
+ "content": "[27] OpenAI. 2023. GPT-4. https://openai.com/research/gpt-4. Accessed: 2024-01-18."
1472
+ },
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.116,
1477
+ 0.422,
1478
+ 0.885,
1479
+ 0.452
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "[28] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. 2024. Self-rewarding language models. arXiv preprint arXiv:2401.10020."
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.116,
1488
+ 0.454,
1489
+ 0.885,
1490
+ 0.485
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "[29] Sondos Mahmoud Bsharat, Aidar Myrzakhan, and Zhiqiang Shen. 2023. Principled instructions are all you need for questioning LLaMA-1/2, GPT-3.5/4. arXiv preprint arXiv:2312.16171."
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.116,
1499
+ 0.488,
1500
+ 0.885,
1501
+ 0.53
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "[30] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837."
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.116,
1510
+ 0.533,
1511
+ 0.885,
1512
+ 0.564
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "[31] Aman Madaan and Amir Yazdanbakhsh. 2022. Text and patterns: For effective chain of thought, it takes two to tango. arXiv preprint arXiv:2209.07686."
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.116,
1521
+ 0.567,
1522
+ 0.885,
1523
+ 0.609
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "[32] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. 2023. A survey of chain of thought reasoning: Advances, frontiers and future. arXiv preprint arXiv:2309.15402."
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.116,
1532
+ 0.612,
1533
+ 0.885,
1534
+ 0.643
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "[33] Zhen Lin, Shubhendu Trivedi, and Jimeng Sun. 2023. Generating with confidence: Uncertainty quantification for black-box large language models. arXiv preprint arXiv:2305.19187."
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.116,
1543
+ 0.645,
1544
+ 0.885,
1545
+ 0.675
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "[34] Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. 2023. Navigating the grey area: Expressions of overconfidence and uncertainty in language models. arXiv preprint arXiv:2302.13439."
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.116,
1554
+ 0.678,
1555
+ 0.885,
1556
+ 0.72
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "[35] Chen, Jiefeng and Yoon, Jinsung. 2024. Introducing ASPIRE for selective prediction in LLMs. https://blog.research.google/2024/01/introducing-aspire-for-selective.html?m=1. Accessed: 2024-01-20."
1560
+ },
1561
+ {
1562
+ "type": "ref_text",
1563
+ "bbox": [
1564
+ 0.116,
1565
+ 0.724,
1566
+ 0.885,
1567
+ 0.755
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": "[36] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. REFINER: Reasoning feedback on intermediate representations. arXiv preprint arXiv:2304.01904."
1571
+ },
1572
+ {
1573
+ "type": "ref_text",
1574
+ "bbox": [
1575
+ 0.116,
1576
+ 0.757,
1577
+ 0.885,
1578
+ 0.787
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "[37] Noah Shinn, Beck Labash, and Ashwin Gopinath. 2023. Reflexion: An autonomous agent with dynamic memory and self-reflection. arXiv preprint arXiv:2303.11366."
1582
+ },
1583
+ {
1584
+ "type": "ref_text",
1585
+ "bbox": [
1586
+ 0.116,
1587
+ 0.789,
1588
+ 0.885,
1589
+ 0.82
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": "[38] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. ReAct: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629."
1593
+ },
1594
+ {
1595
+ "type": "ref_text",
1596
+ "bbox": [
1597
+ 0.116,
1598
+ 0.822,
1599
+ 0.885,
1600
+ 0.853
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "[39] Zhenran Xu, Senbao Shi, Baotian Hu, Jindi Yu, Dongfang Li, Min Zhang, and Yuxiang Wu. 2023. Towards reasoning in large language models via multi-agent peer review collaboration. arXiv preprint arXiv:2311.08152."
1604
+ },
1605
+ {
1606
+ "type": "ref_text",
1607
+ "bbox": [
1608
+ 0.116,
1609
+ 0.855,
1610
+ 0.885,
1611
+ 0.884
1612
+ ],
1613
+ "angle": 0,
1614
+ "content": "[40] Albert Gu and Tri Dao. 2023. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752."
1615
+ },
1616
+ {
1617
+ "type": "list",
1618
+ "bbox": [
1619
+ 0.115,
1620
+ 0.091,
1621
+ 0.885,
1622
+ 0.884
1623
+ ],
1624
+ "angle": 0,
1625
+ "content": null
1626
+ },
1627
+ {
1628
+ "type": "page_number",
1629
+ "bbox": [
1630
+ 0.49,
1631
+ 0.936,
1632
+ 0.508,
1633
+ 0.948
1634
+ ],
1635
+ "angle": 0,
1636
+ "content": "10"
1637
+ }
1638
+ ]
1639
+ ]
2401.15xxx/2401.15170/2029ac39-a848-424b-a575-67afb88416fa_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d52cb5b4e049dd31f543f6ce71e93fd2eea7553a4b322a272670c0d12a7daee
3
+ size 924345
2401.15xxx/2401.15170/full.md ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Scalable Qualitative Coding with LLMs: Chain-of-Thought Reasoning Matches Human Performance in Some Hermeneutic Tasks
2
+
3
+ Zackary Okun Dunivin
4
+
5
+ Center for Complex Networks and Systems Research
6
+
7
+ Luddy School of Informatics, Computer Science, and Engineering
8
+
9
+ Department of Sociology
10
+
11
+ Indiana University
12
+
13
+ Bloomington, Indiana, US
14
+
15
+ zdunivin@iu.edu
16
+
17
+ # ABSTRACT
18
+
19
+ Qualitative coding, or content analysis, extracts meaning from text to discern quantitative patterns across a corpus of texts. Recently, advances in the interpretive abilities of large language models (LLMs) offer potential for automating the coding process (applying category labels to texts), thereby enabling human researchers to concentrate on more creative research aspects, while delegating these interpretive tasks to AI. Our case study comprises a set of socio-historical codes on dense, paragraph-long passages representative of a humanistic study. We show that GPT-4 is capable of human-equivalent interpretations, whereas GPT-3.5 is not. Compared to our human-derived gold standard, GPT-4 delivers excellent intercoder reliability (Cohen's $\kappa \geq 0.79$ ) for 3 of 9 codes, and substantial reliability ( $\kappa \geq 0.6$ ) for 8 of 9 codes. In contrast, GPT-3.5 greatly underperforms for all codes ( $mean(\kappa) = 0.34$ ; $max(\kappa) = 0.55$ ). Importantly, we find that coding fidelity improves considerably when the LLM is prompted to give rationale justifying its coding decisions (chain-of-thought reasoning). We present these and other findings along with a set of best practices for adapting traditional codebooks for LLMs. Our results indicate that for certain codebooks, state-of-the-art LLMs are already adept at large-scale content analysis. Furthermore, they suggest the next generation of models will likely render AI coding a viable option for a majority of codebooks.
20
+
21
+ # 1 Introduction
22
+
23
+ Text categorization, commonly referred to as content analysis and qualitative coding in the social sciences, plays an important role in scholarly research and industrial applications. This process traditionally relies on human expertise to interpret the nuanced and often complex meanings embedded in texts [1, 2]. The difficulty lies in the multifaceted nature of meaning and the challenge of fitting real-world complexity into discrete categories, even for skilled readers. Historically, these challenges have positioned text categorization as a task unsuitable for machine learning approaches [3], despite robust attempts [4, 5].
24
+
25
+ Recent developments in artificial intelligence, notably the advent of transformers with billions of parameters known as large language models (LLMs), have begun to challenge this notion. These models demonstrate increasing capabilities in knowledge, interpretation, reasoning, and creativity expressed in natural language, approaching or even surpassing human performance [6-8]. The processing speed of artificial intelligence opens up the possibility of categorizing vast quantities of text, far exceeding the limitations of human coding teams restricted to smaller samples. Yet, this opportunity raises a critical question: how can we ensure and maintain the accuracy of machine categorization at a level comparable to human standards?
26
+
27
+ This study provides the strongest evidence to date that machines are capable of human-quality interpretations of text for the purposes of qualitative coding. Additionally, our report serves as a practical guide to employing LLMs in text categorization and as a reference for those encountering machine-assisted qualitative coding in empirical research. We contribute to the growing body of work that builds confidence in the rigor of LLM-based text categorization [9-12], a
28
+
29
+ field that will expand as these models continue to evolve. Our report emphasizes the redesign of codebooks—comprising category descriptions and coding instructions—specifically for LLMs. We demonstrate how the structure of prompts, the specific requests made to the generative model for categorizing passages, significantly impacts coding fidelity. Even as these models continue to rapidly improve, we expect most of the principles of prompt design we report will remain useful and informative as methodologists explore new models and empiricists automate their coding workflows. Our results are presented through narratives detailing our approach and highlighting potential challenges and demonstrated by LLM-generated analyses to a human-derived gold standard. A summary of best practices for content analysis with an LLM is also presented in tabular format for quick reference.
30
+
31
+ Key findings of our study include:
32
+
33
+ - GPT-4 exhibits human-equivalent performance with zero-shot prompts. 8 of 9 tasks exceed the 0.6 threshold for substantial agreement using Cohen's $\kappa$ . 3 of 9 tasks exceed the 0.75 threshold for excellent agreement.
34
+ - GPT-3.5, when given the same prompts, has an average intercoder reliability of 0.34 across all codes.
35
+ Codebooks designed for human coders need reworking for LLM application, requiring iterative manual testing to refine phrasing and improve model comprehension.
36
+ - Agreement improves when the LLM provides rationale for code assignments: $\mu (\kappa) = 0.68$ vs. $\mu (\kappa) = 0.59$ .
37
+ - Agreement improves when presenting each code as a separate prompt, rather than the codebook as a whole: $\mu (\kappa) = 0.68$ vs. $\mu (\kappa) = 0.60$ .
38
+
39
+ # 1.1 Automating Content Analysis: Past and Present
40
+
41
+ Prior work on automating content analysis entailed training machine learning models on large quantities of text. Supervised models, typically some form of linear regression, learn to associate text features with user-specified categories [13]. This process captures half the traditional human-coded process by using human-derived codes and examples, but fails to leverage abstract code descriptions found in a codebook, as well as requiring large quantities of human-annotated data. Unsupervised models, such as LDA [14, 15] or BERTTopic [16], develop their own categorizations from unlabeled training sets. This process does not require time-intensive labeling, but rarely captures the specific categories that a researcher intends to target.
42
+
43
+ The latest generation of LLMs (e.g., GPT [17], LLaMa [18], Mistral [19], Claude [20]) differ notably from previous machine learning models in that they can perform new tasks specified through natural language prompts. A user can specify a task that the model was not trained on, give few (single digit) or no examples, and the model will return output conforming to the specifications. Demonstrated successes include computer code generation [21], creative writing [22], and quantitative reasoning [23]. We are only beginning to understand and expand upon the limitations of these models. By converting natural language requests into highly intelligent output across vast and indeterminate domains, LLMs lower the technical barriers to machine learning by making its application more naturalistic and eliminating the need for large training data. Beyond this, LLM's capacities in many domains far exceed the specialized machine learning models that preceded them, suggesting that for many applications, including scholarly inquiry, artificial intelligence is overwhelmingly more accessible and capable in 2024 than it was just two years prior.
44
+
45
+ Early studies of content analysis with LLMs are encouraging. Xiao et al. [9] demonstrate moderate success, Cohen's $\kappa = 0.61$ and $\kappa = 0.38$ , in two linguistic tasks using GPT-3. Chew et al. [10] report high success on many of 19 tasks across three datasets, and results that are indistinguishable from random for others. It is difficult to evaluate their results due to the choice of Gwet's AC1, which is biased toward agreement on negative codings rather than positive, whereas most standard measures of intercoder reliability do the opposite [24]. However, we commend Chew et al.'s approach to adapting codebooks for LLMs, which is communicated with great detail and clarity. A survey of 20 empirical pieces reports "mixed-results" of using GPT-3 to automate "text annotation," a term that ties their framing to "data annotation", labeling data for in machine learning [25], rather than content analysis in the tradition of grounded theory [26].
46
+
47
+ We present here three advances to these studies. 1) We report the first methodological account of automating qualitative coding using GPT-4, which, along with other recent models, greatly improves upon many of GPT-3's capabilities [19, 27, 28]. 2) We provide the first conclusive evidence that LLMs are capable of human-equivalent performance in qualitative coding, and do so on larger passages of text, wherein meaning is often woven through multiple interrelated clauses. 3) We demonstrate that GPT is better at interpreting text when it is tasked with justifying its coding decisions (chain-of-thought prompting) rather than applying codes without an accompanying explanation.
48
+
49
+ # 1.2 Case Study: W.E.B. Du Bois's Characterization in News Media
50
+
51
+ In order to present a realistic challenge of using an LLM to do qualitative coding, we make a case study of our own work. We adapted a codebook written by the authors to understand how the scholar and activist W.E.B. Du Bois has been characterized in news media over time. The codebook is composed of 9 codes in 3 categories. Due to multiple layers of agency (who is doing what) and voice (who is saying what), the tasks are difficult even for human interpreters. Applying the codes is also complicated because it can be difficult to differentiate Du Bois's scholarship from his political activism, as Du Bois's theoretical contributions have profound implications for understanding race and the social-historical position of Black persons in the United States and beyond, making them powerful activist tools. We are particularly interested in understanding how different facets of Du Bois's activities contributed to his canonization in the public imagination as the preeminent figure for understanding Black political struggle. Table 1 gives the codes in brief. Complete examples of the original human and modified-for-GPT codebook are included in the appendix.
52
+
53
+ The training and test data for our study were random samples of passages from New York Times articles (1970-2023) that mention W.E.B. Du Bois. 232 passages were automatically extracted as concurrent paragraphs containing "Du Bois". The average number of words was 94 ( $\sigma = 70$ ), and the average number of sentences was 3.75 ( $\sigma = 2.88$ ). To give a better sense of the size of our passages, this paragraph has 76 words across 4 sentences.
54
+
55
+ Table 1: Categories and descriptions for 9 codes.
56
+
57
+ <table><tr><td colspan="2">Characterization of Du Bois</td></tr><tr><td>Scholar</td><td>Describes Du Bois as a scholar or intellectual.</td></tr><tr><td>Activist</td><td>Refers to Du Bois&#x27;s political or social activism.</td></tr><tr><td colspan="2">General Themes</td></tr><tr><td>Monumental Memorialization</td><td>Refers to an enduring cultural object named after Du Bois.</td></tr><tr><td>Mention of Scholarly Work</td><td>Mentions or quotes specific academic works by Du Bois.</td></tr><tr><td>Social/Political Advocacy</td><td>Mentions or implies social or political activism, advocacy, or critique.</td></tr><tr><td colspan="2">Canonization Processes</td></tr><tr><td>Coalition Building</td><td>Refers to Du Bois&#x27;s activities with activist or academic organizations.</td></tr><tr><td>Out of the Mouth of Academics</td><td>Describes an academic organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Out of the Mouth of Activists</td><td>Describes an activist organization engaging with Du Bois&#x27;s legacy.</td></tr><tr><td>Collective Synecdoche</td><td>Mentions Du Bois alongside other figures in order to represent some facet of a culture, era, or ideology.</td></tr></table>
58
+
59
+ # 2 Results
60
+
61
+ # 2.1 Adapting a Codebook for an LLM
62
+
63
+ Initially, we developed a codebook for human coders using standard methods. This process involved exploratory reading to define and refine codes. Codes were derived to probe particular substantive hypotheses, some of which preceded our exploratory reading, and others which resulted from it. We then applied these codes iteratively, adjusting them as needed until we achieved high intercoder reliability with a test set. We adapted these code descriptions for use with a large language model, evaluating the LLM's performance on a training set of text passages. Where we found ambiguities or deficiencies in the model's interpretation, we refined the code descriptions accordingly. This iterative process of definition, evaluation, and refinement follows Nelson's Computational Grounded Theory paradigm [4], the core of which is common to all qualitative code development processes whether or not coding is automated [26].
64
+
65
+ Our experience modifying the code descriptions yielded several key insights related in the following paragraphs. We encourage readers interested in a fuller account of this process to read Chew et al.'s study [10] describing their process of LLM-Assisted Content Analysis (LACA), which relates a process similar to our own in greater detail.
66
+
67
+ ![](images/19742177267e2154927c4106a2d111bd27e982eef3b3973235c3e8fa5fa4c94f.jpg)
68
+ Figure 1: The chain-of-thought prompt sequence.
69
+
70
+ LLM-generated rationale are essential for evaluating performance. In adapting the codebook, we wanted to understand not just which codes the model struggled to interpret correctly, but what aspects of the code the model failed to capture. To achieve this, we structured our prompts to require GPT to justify its decision to apply or not apply each code. These rationale were invaluable. They often highlighted parts of the code description that were ambiguous or imprecisely defined, leading the model to misinterpret them. Whenever a rationale repeatedly pointed to such an issue, we revised the corresponding code. We then retested the passage to check that the code was correctly applied and the rationale aligned with our intended interpretation of the code. Sometimes a revision would not improve the interpretations for the passages in question; other times it would fix the interpretations for those passages, but would introduce new problems in passages which were previously coded correctly.
71
+
72
+ Figure 1 demonstrates an effective method of prompting GPT to provide rationale for its code selections. The initial instruction is given by the Justification section of the prompt, and solicited again in the Decision/ Formatting box.
73
+
74
+ LLMs require more precise descriptions than do human readers. Human coders do not rely solely on a written codebook. Their interpretation of the codes is enriched through the codebook development process, discussions with fellow coders, and supplementary oral instructions. An LLM lacks this interactive and historical context and must interpret codes entirely from written descriptions. Our work modifying the codebook for GPT revealed information that, while implicitly understood by the code developers, wasn't explicitly stated in the code descriptions. This process not only aided in refining the codebook for automated coding, but also improved our own understandings of the codes. This ultimately led to clearer definition of the codes, thereby enhancing future manual coding processes as well. Figure 2 demonstrates how the Monumental Memorialization and Social/Political Advocacy codes were redefined to improve GPT's comprehension.
75
+
76
+ Often, we encountered cases where ambiguous phrasing was obvious to humans, but challenging for the LLM. Our codebook contains two codes that relate to Du Bois's reputation among academics and activists. These codes are meant to evaluate whether Du Bois appears in a news story because either an academic or activist mentioned him. Initially, we titled this code "Academic Repute," which worked well for human coders. GPT, however, consistently misinterpreted
77
+
78
+ # Original: Activist
79
+
80
+ Apply when Du Bois is developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, promoting a candidate, organization or initiative. Also apply when Du A Bois is explicitly described as an activist or leader.
81
+
82
+ # Redefined: Activist
83
+
84
+ Apply this code when Du Bois is explicitly called an "activist" or "leader", or when his political or social activism is either explicitly noted or clearly implied through context. Examples include being mentioned in the context of leadership, activism, developing activist organizations, giving public speeches, participating in meetings with politicians and organizers, running for office, or promoting a candidate, organization, or initiative.
85
+
86
+ # Original: Social/Political Activism (c)
87
+
88
+ DRefers to any form of social or political participation in promoting change in society. Can be used in conjunction with Du Bois' scholarly work, specifically E when Du Bois's ideas are used to frame Black political struggle. Can be used
89
+ F to describe Du Bois' political work or those who invoke Du Bois in service of G their activism.
90
+
91
+ # Redefined: Social/Political Advocacy ©
92
+
93
+ D This code applies when a passage mentions or implies any form of social or political activism, advocacy, critique, or discourse, including discussions about current or historical social problems. This includes not only direct activism of Du Bois and others, but also the framing and challenging of social norms,
94
+ E historical narratives, and racial or cultural identities. Apply this code when Du Bois's work, persona, or ideas are invoked in discussions that critically engage with Black identity, positionality, or broader systemic circumstances of Black people. Adjacency to other activists, such as inclusion in a list, is insufficient; advocacy must be explicitly mentioned in the passage.
95
+
96
+ Figure 2: Two examples of prompt redefinition. Colored, alphabetically labeled blocks of text show alterations derived through iterative code refinement. Italics draw attention to direction to constrain interpretive scope to implicit or explicit information.
97
+
98
+ this code as pertaining to Du Bois's esteem as an academic, rather than among or by academics (the meaning of "among" remains ambiguous even here). We tried numerous iterations this code without success. Nevertheless, altering the title of the code to the far more literal "Out of the Mouth of Academics" dramatically improved performance, even when paired with the original code description. In another case, the code titled "Social/Political Activism" was revised to "Social/Political Advocacy" (Figure 2, C) because GPT did not consider social critique to be a form of activism, even when it was specifically instructed to.
99
+
100
+ We found that words indicating how much the model should draw on context or its own outside knowledge had large impacts on the model's outputs, often to the desired effect. In particular, instructing the model to restrict itself to "explicit" meanings, or to draw on "implicit" meanings, often helped the model with part of a code description it had struggled with. Figure 2 B and D demonstrate the addition of such verbiage to control scope.
101
+
102
+ Both mandatory (do) and prohibitory (do not) phrasing were observed by the model, though mandatory phrasing seemed more successful, a finding reported by other researchers [29]. The ordering of directives also impacted how likely the model was to follow them. We found that moving a phrase that was ignored in the coding rationale toward the front of the definition made the model more likely to follow its specifications, as in Figure 2 A. When a very specific problem was observed repeatedly, it was sometimes necessary to add a directive to correct it, as in Figure 2 H.
103
+
104
+ Prompting for machine-readable output. To fully automate the coding process, model output must be reliably readable by a computer. The LLM generates text, which must be interpreted by another script into a data structure, such as a table, for further analysis. Instructing exactly how to format the output produced machine-readable results with
105
+
106
+ GPT-4 and GPT-3.5. Critically, this involves specifying a tag that the interpreting script locates, after which follows a reliably formatted list of codes. The Decision/ Formatting component Figure 1 illustrates how to constrain model output and produce consistent results across queries. Additionally, because GPT tends to be excessively verbose and summarize its output, particularly at higher temperatures, we informed the model that we do not want any output to follow the code list.
107
+
108
+ # 2.2 Selecting a Model and Writing Prompts for Optimal Performance
109
+
110
+ Once the code descriptions have been revised for LLM text categorization, numerous other decisions remain about how to prompt a model to execute the content analysis. We present these as a separate step for the sake of clarity, but in reality, we developed our approach in iteratively and tandem with revising the code descriptions. We hope future methodologists and empiricists will benefit from what we learned during this process, and that less exploration of these components will be necessary so practitioners can focus on application or exploration of calibrations not explored here. We summarize all our recommendations for qualitative coding with an LLM in Table 3.
111
+
112
+ There is large and growing body of academic and nonacademic literature on prompt engineering: constructing user-defined input to elicit the best model output. In fact, the codebook adaptation in the previous section was in large part an exercise in prompt engineering. However, in this second section, prompt engineering refers more to the broader context of task description than the code definitions. In this section we report how different prompts influence the quality of machine categorization. Additionally we compare performance when the LLM is tasked with assigning each code independently to when the model is given the full codebook and assigned with coding all 9 codes as a single task. We refer to these as the "Per Code" and "Full Codebook" approach respectively.
113
+
114
+ Studies have shown that LLM decision-making improves when the model is prompted to account for its decisions [30, 31]. This is generally known as chain-of-thought (CoT) prompting or reasoning, and refers to breaking down tasks into specific components, one or more of which involve planning for future steps or reflection on previous ones. Our prompts, which can be viewed in full in the appendix, apply chain-of-thought prompting by including 1) a role assignment step, informing the machine of its purpose, 2) a task description step, specifying the code definition, 3) a justification step, instructing the model to provide a rationale for its decision, and 4) a decision step, wherein the model delivers its ultimate analysis in a consistent, machine-readable format. An example of the chain-of-thought prompt sequence is given by Figure 1.
115
+
116
+ We use zero-shot prompts throughout this study. Zero-shot refers to providing the model only the task description, without giving examples of correctly executed responses. Xiao et al. found few-shot prompting improves coding and performance on other tasks [9], whereas Chew et al. largely employed zero-shot prompts [10]. Our case study involves evaluating paragraph-long passages rather than single clauses. We found that information in the examples was drawn upon by the LLM and interfered with its coding decisions. We also found that in Full Codebook prompts, giving examples greatly expanded the prompt, negatively impacting results. We suggest that when content is more literary or historical, zero-shot prompts are probably preferred, but that most coding tasks will benefit from few-shot prompting as demonstrated by the results of many other studies across domains.
117
+
118
+ Performance comparisons are relative to the human-derived gold standard hidden from the LLM at all stages of development. We used the default settings for the GPT API where temperature is set to 0 and nucleus sampling (top_p) is set to 1. We specified the task description as a "system prompt", and provide each passage as a "user prompt". A system prompt gives the LLM its purpose, clearly specifying the task it is meant to address, whereas a user prompt provides the input to which the model responds by generating output. We did not investigate whether intercoder agreement suffers with the default system prompt, while combining the task description and passage as a user prompt.
119
+
120
+ GPT-4 greatly outperforms GPT-3.5. We found that GPT-4 approaches human performance for 3 codes: Activist: $\kappa = 0.81$ ; Monumental Memorialization: $\kappa = 1.00$ ; Collective Synecdoche $\kappa = 0.79$ . GPT-4 prompted for rationale provides considerably higher quality code assignments than GPT-3.5, except in the case of the Out of the Mouth of Activists code, which no configuration handled well. It is especially notable that GPT-4 and GPT-3.5 differed in their most accurately interpreted codes. In the 3 tasks GPT-4 executed best, GPT-3.5's performance was slightly below its own average, mean $(\kappa_{all}) = 0.34$ vs. mean $(\kappa) = 0.32$ .
121
+
122
+ Coding fidelity improves when codes are presented as individual tasks. We adapted our codebook by presenting the entire codebook to GPT along with task instructions. However, we found in testing that performance improved when GPT was given each task independently. This "per code" approach was taken by one recent study exploring content analysis with non-mutually exclusive codes (permitting multiply coded passages) [10], but not another, which tested only two codes [9]. Table 2 compares the GPT-4 performance when presented individual tasks for each code ("Per Code") and when presented all tasks in a single prompt ("Full Codebook"). We found that for the 3 human-equivalent
123
+
124
+ Table 2: Intercoder reliability (Cohen's $\kappa$ ) for all codes on 111 gold standard passages. Best overall performance is shown in bold. Italics indicate the highest intercoder reliability between pairs with and without prompting for rationale (CoT vs. No CoT); if the pair are equivalent neither is italicized. Two values are considered equivalent if their difference does not exceed 0.02.
125
+
126
+ <table><tr><td rowspan="3">Code</td><td rowspan="3">Count</td><td colspan="4">GPT-4</td><td colspan="2">GPT-3.5</td></tr><tr><td colspan="2">Per Code</td><td colspan="2">Full Codebook</td><td colspan="2">Per Code</td></tr><tr><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td><td>CoT</td><td>No CoT</td></tr><tr><td>Scholar</td><td>27</td><td>0.61</td><td>0.52</td><td>0.59</td><td>0.42</td><td>0.29</td><td>0.21</td></tr><tr><td>Activist</td><td>23</td><td>0.81</td><td>0.65</td><td>0.67</td><td>0.62</td><td>0.39</td><td>0.32</td></tr><tr><td>Monumental Memorialization</td><td>13</td><td>1.00</td><td>0.91</td><td>0.75</td><td>0.48</td><td>0.29</td><td>0.31</td></tr><tr><td>Mention of Scholarly Work</td><td>24</td><td>0.71</td><td>0.69</td><td>0.52</td><td>0.44</td><td>0.33</td><td>0.39</td></tr><tr><td>Social/Political Advocacy</td><td>51</td><td>0.64</td><td>0.60</td><td>0.60</td><td>0.60</td><td>0.55</td><td>0.51</td></tr><tr><td>Coalition Building</td><td>9</td><td>0.60</td><td>0.44</td><td>0.43</td><td>0.13</td><td>0.33</td><td>0.17</td></tr><tr><td>Out of the Mouth of Academics</td><td>30</td><td>0.63</td><td>0.65</td><td>0.65</td><td>0.62</td><td>0.37</td><td>0.33</td></tr><tr><td>Out of the Mouth of Activists</td><td>11</td><td>0.30</td><td>0.09</td><td>0.34</td><td>0.18</td><td>0.21</td><td>0.09</td></tr><tr><td>Collective Synecdoche</td><td>26</td><td>0.79</td><td>0.78</td><td>0.81</td><td>0.71</td><td>0.27</td><td>0.27</td></tr><tr><td>Mean</td><td>24</td><td>0.68</td><td>0.59</td><td>0.60</td><td>0.46</td><td>0.34</td><td>0.29</td></tr></table>
127
+
128
+ tasks (Activist, Monumental Memorialization, and Collective Synecdoche) the Per Code performance far exceeded the Full Codebook for 2 tasks, and was comparable for 1. For 2 other tasks, Mention of Scholarly Work and Coalition Building, we found that the Per Code configuration produced considerably higher agreement, whereas Full Codebook performed comparably to the Per Code in the remaining 4 tasks.
129
+
130
+ Coding fidelity improves when the model is prompted to justify its coding decisions. Consistent with other experiments with chain-of-thought (CoT) reasoning in LLMs, we found that coding agreement benefited strongly from prompting the model to explain itself [32]. Table 2 shows the effect of prompting for rationale on three pairs of conditions: Per Code GPT-4, Full Codebook GPT-4, and Per Code GPT-3.5. We found that across all codes and conditions, with one exception, CoT prompting produces higher or equivalent intercoder reliability with the gold standard. Using GPT-4, average Per Code agreement improved from 0.59 to 0.68, and average Full Codebook agreement improved from 0.46 to 0.60. Furthermore, a majority of pairs showed substantial improvement when the codes were assigned after providing reasoning for coding decisions.
131
+
132
+ # 3 Discussion
133
+
134
+ Determining appropriate domains for LLM-assisted qualitative coding. Previous methods of automated text categorization, both supervised and unsupervised, rarely met the standards of traditional social scientists and humanists, and were instead generally employed by data scientists. Capturing meaning, particularly complex meaning, through machine learning has largely been an elusive goal [3]. Despite our own former skepticism, we predict that LLMs will be capable of applying most qualitative codebooks within the year. However, our results show that even within the scope of a single codebook, interpretation quality varies. Thus, different disciplines and domains should expect model success and the ease of transitioning a codebook to vary considerably. We suspect that more humanistic and "softer" scientific approaches will (continue to) be more resistant to machine interpretation than problems posed by scholars who identify with "harder" sciences, to say nothing of their ability to convince their peers of its validity. We do not oppose developing evaluation benchmarks for qualitative coding to assess which models are adept at what variety of task, but neither do we advocate it; meaning is manifold and emergent, and much of its beauty derives from its resistance to reduction and definition. Instead, we suggest those who wish to employ an LLM to perform content analysis survey similar attempts and simply experiment on their own. The process of discovering triumphs, workarounds, and limitations of working with these models was not only fascinating, but tremendously fun.
135
+
136
+ Practical aspects of transitioning to content analysis with LLMs. While artificial intelligence potentially opens up much larger datasets to qualitative scholars, there is still a considerable technical barrier to automating content
137
+
138
+ Table 3: Principles of prompting an LLM for qualitative coding.
139
+
140
+ <table><tr><td colspan="2">Task Instructions</td></tr><tr><td>Prompt for Rationale</td><td>Model fidelity improves when instructed to justify its coding decisions.</td></tr><tr><td>One Task Per Code</td><td>Model fidelity improves when given each code as a separate task.</td></tr><tr><td>Brevity</td><td>Shorter task descriptions are more likely to be faithfully executed by the model.</td></tr><tr><td>Structured Output</td><td>Instruct the model to format its output to ensure uniform responses.</td></tr><tr><td colspan="2">Code Definitions</td></tr><tr><td>Word Choice</td><td>A single high-content word can be changed to align with the LLM&#x27;s built-in ontology.</td></tr><tr><td>Clause Order</td><td>Clauses are more likely to be observed when introduced earlier in the code description.</td></tr><tr><td>Mandates/Prohibitions</td><td>Both can be effective, but it is easier to get the model to “do” than “do not”.</td></tr><tr><td>Code Titles</td><td>Altering the code title can have a large effect even without altering the definition.</td></tr><tr><td>Interpretation Scope</td><td>Use words like “implicit” and “explicit” when interpretation is too limited or expansive.</td></tr><tr><td colspan="2">Chain-of-Thought Prompt Sequence</td></tr><tr><td>1. Role Assignment</td><td>Supply the model its purpose, e.g., &quot;You will be applying category labels to passages.&quot;</td></tr><tr><td>2. Code Definition</td><td>Provide the code title(s) and description(s).</td></tr><tr><td>3. Justification</td><td>Request that the model provide evidence of its reasoning.</td></tr><tr><td>4. Decision</td><td>Instruct the model to list the codes that apply to the passage in a consistent format.</td></tr></table>
141
+
142
+ analysis. Development of an LLM adapted codebook is feasible for anyone regardless of technical skill by interacting with an LLM through chat-like Web platforms provided by proprietary model developers. However, systematically testing prompts or applying a completed codebook to the full dataset requires moderate skill in writing scripts in a language such as Python. Rather than suggest that all scholars become programmers, we encourage researchers to develop partnerships with students or community members seeking programming or research experience as a form of project-based education. Conversely, we suggest that data scientists pursue partnerships with traditional social scientists and humanists, who are often better positioned to develop coding schema to flush out complex meanings embedded in text, which are now more tractable to machine learning.
143
+
144
+ Handling passages where model interpretation is poor. Overwhelmingly, GPT-4's interpretations were accurate and human-like. However, we found repeatedly that GPT-4, like a human reader, struggled with edge cases, especially where implicit information was required to make a judgment. We are encouraged by this finding, and argue that with automated analysis, fidelity is less important than it is with humans. Because statistical power increases with the number of observations, noise is more tolerable in machine-applied codes, as automated coding potentially increases sample size by orders of magnitude. Notably, this assumes that error is restricted to edge cases and is not otherwise systematically biased. We also advise against automated coding where datasets are small, as in interviews, where it is likely as efficient to code entirely by hand. As models improve and can provide confidence estimates for their statements [33-35], ML content analysis workflows should include manual review of passages with uncertain code assignments. Anecdotally, we found that GPT-4 could intelligently reflect on its responses when prompted to do so. When presented the output of another model instance, GPT-4, acting as an untrained "critic" model [36], was often able to identify when it had encountered an edge case without prompting, as well as recognize and revise obvious mistakes. Our experiences suggest that a human-in-the-loop tag-for-manual-review workflow or a two-step automated reflect-and-revise workflow may already be feasible with GPT-4 and similar models.
145
+
146
+ # 4 Conclusion
147
+
148
+ Our results using state-of-the-art models lead us to recommend that scholars who do much qualitative coding consider automated coding with LLMs a potentially viable option today. We especially encourage skeptics to probe these tools'
149
+
150
+ capacities, as it is useful to know their limitations. Over the next year, models such as those incorporating memory [37], (multi-)agential models that dialogue and revise prior to rendering output [38, 39], and architectures that can handle larger inputs [40], will almost certainly greatly improve upon GPT-4's current abilities. When those models are made available, researchers who have already experimented with LLMs will be best positioned to make use of the new tools. The efficiency of automation is compelling, but we are most enthusiastic about the ability to probe much larger datasets than ever before, potentially illuminating patterns too rare or too fuzzy to detect with a sample numbering in the tens or hundreds rather than thousands or beyond.
151
+
152
+ # Acknowledgments
153
+
154
+ The author thanks Harry Yan, Pat Wall, Patrick Kaminski, Adam Fisch, Alicia Chen, and Francisco Muñoz for their helpful comments toward improving this manuscript. I am especially grateful to Tania Ravaei for collaborating on codebook development.
155
+
156
+ # References
157
+
158
+ [1] Anselm L Strauss. 1967. The discovery of grounded theory: Strategies for qualitative research. Aldine.
159
+ [2] Johnny Saldana. 2009. The coding manual for qualitative researchers. SAGE Publications.
160
+ [3] Momin M Malik. 2020. A hierarchy of limitations in machine learning. arXiv preprint arXiv:2002.05193.
161
+ [4] Laura K Nelson. 2017. Computational Grounded Theory: A methodological framework. Sociological Methods & Research, 49(1):3-42.
162
+ [5] Ankita Dhar, Himadri Mukherjee, Niladri Sekhar Dash, and Kaushik Roy. 2021. Text categorization: past and present. Artificial Intelligence Review, 54:3007-3054.
163
+ [6] Sébastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712.
164
+ [7] Bernardino Romero-Paredes, Mohammadamin Barekatain, Alexander Novikov, Matej Balog, M Pawan Kumar, Emilien Dupont, Francisco JR Ruiz, Jordan S Ellenberg, Pengming Wang, Omar Fawzi, et al. 2023. Mathematical discoveries from program search with large language models. Nature, 625:1-3.
165
+ [8] Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805.
166
+ [9] Ziang Xiao, Xingdi Yuan, Q Vera Liao, Rania Abdelghani, and Pierre-Yves Oudeyer. 2023. Supporting qualitative analysis with large language models: Combining codebook with GPT-3 for deductive coding. In Companion Proceedings of the 28th International Conference on Intelligent User Interfaces.
167
+ [10] Robert Chew, John Bollenbacher, Michael Wenger, Jessica Speer, and Annice Kim. 2023. LLM-assisted content analysis: Using large language models to support deductive coding. arXiv preprint arXiv:2306.14924.
168
+ [11] Shih-Chieh Dai, Aiping Xiong, and Lun-Wei Ku. 2023. LLM-in-the-loop: Leveraging large language model for thematic analysis. arXiv preprint arXiv:2310.15100.
169
+ [12] Robert H Tai, Lillian R Bentley, Xin Xia, Jason M Sitt, Sarah C Fankhauser, Ana M Chicas-Mosier, and Barnas G Monteith. 2023. An examination of the use of large language models to aid analysis of textual data. bioRxiv preprint bioRxiv:2023.07.17.549361.
170
+ [13] Ammar Ismael Kadhim. 2019. Survey on supervised machine learning techniques for automatic text classification. Artificial Intelligence Review, 52(1):273-292.
171
+ [14] David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3(1):993-1022.
172
+ [15] Hamed Jelodar, Yongli Wang, Chi Yuan, Xia Feng, Xiahui Jiang, Yanchao Li, and Liang Zhao. 2019. Latent Dirichlet allocation (LDA) and topic modeling: Models, applications, a survey. Multimedia Tools and Applications, 78:15169-15211.
173
+ [16] Maarten Grootendorst. 2022. BERTopic: Neural topic modeling with a class-based TF-IDF procedure. arXiv preprint arXiv:2203.05794.
174
+
175
+ [17] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems, 33:1877-1901.
176
+ [18] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. LLaMa: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
177
+ [19] MistralAI. 2023. Mixtral of experts: A high quality sparse mixture-of-experts. https://mistral.ai/news/mixtral-of-experts. Accessed: 2024-01-13.
178
+ [20] Anthropic. 2023. Claude 2. https://www.anthropic.com/index/claude-2. Accessed: 2024-01-18.
179
+ [21] Jan Nowakowski and Jan Keller. 2024. AI-powered patching: The future of automated vulnerability fixes. Google Security Engineering Technical Report.
180
+ [22] Carlos Gómez-Rodríguez and Paul Williams. 2023. A confederacy of models: A comprehensive evaluation of LLMs on creative writing. arXiv preprint arXiv:2310.08433.
181
+ [23] Manon Bischoff. 2024. AI matches the abilities of the best Math Olympians. Scientific American.
182
+ [24] Werner Vach and Oke Gerke. 2023. Gwet's AC1 is not a substitute for Cohen's kappa – A comparison of basic properties. MethodsX, 10:102212.
183
+ [25] Etienne Ollion, Rubing Shen, Ana Macanovic, and Arnault Chatelain. 2023. ChatGPT for text annotation? Mind the hype! SocArXiv preprint doi:10.31235/osf.io/x58kn.
184
+ [26] Anselm Strauss and Juliet M Corbin. 1997. Grounded theory in practice. SAGE Publications.
185
+ [27] OpenAI. 2023. GPT-4. https://openai.com/research/gpt-4. Accessed: 2024-01-18.
186
+ [28] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. 2024. Self-rewarding language models. arXiv preprint arXiv:2401.10020.
187
+ [29] Sondos Mahmoud Bsharat, Aidar Myrzakhan, and Zhiqiang Shen. 2023. Principled instructions are all you need for questioning LLaMA-1/2, GPT-3.5/4. arXiv preprint arXiv:2312.16171.
188
+ [30] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837.
189
+ [31] Aman Madaan and Amir Yazdanbakhsh. 2022. Text and patterns: For effective chain of thought, it takes two to tango. arXiv preprint arXiv:2209.07686.
190
+ [32] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. 2023. A survey of chain of thought reasoning: Advances, frontiers and future. arXiv preprint arXiv:2309.15402.
191
+ [33] Zhen Lin, Shubhendu Trivedi, and Jimeng Sun. 2023. Generating with confidence: Uncertainty quantification for black-box large language models. arXiv preprint arXiv:2305.19187.
192
+ [34] Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. 2023. Navigating the grey area: Expressions of overconfidence and uncertainty in language models. arXiv preprint arXiv:2302.13439.
193
+ [35] Chen, Jiefeng and Yoon, Jinsung. 2024. Introducing ASPIRE for selective prediction in LLMs. https://blog.research.google/2024/01/introducing-aspire-for-selective.html?m=1. Accessed: 2024-01-20.
194
+ [36] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. 2023. REFINER: Reasoning feedback on intermediate representations. arXiv preprint arXiv:2304.01904.
195
+ [37] Noah Shinn, Beck Labash, and Ashwin Gopinath. 2023. Reflexion: An autonomous agent with dynamic memory and self-reflection. arXiv preprint arXiv:2303.11366.
196
+ [38] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. ReAct: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629.
197
+ [39] Zhenran Xu, Senbao Shi, Baotian Hu, Jindi Yu, Dongfang Li, Min Zhang, and Yuxiang Wu. 2023. Towards reasoning in large language models via multi-agent peer review collaboration. arXiv preprint arXiv:2311.08152.
198
+ [40] Albert Gu and Tri Dao. 2023. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752.
2401.15xxx/2401.15170/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:647e24620d12ccabad1abe4feb0b7645a33626817f6aaa107d3b60292311d327
3
+ size 452929
2401.15xxx/2401.15170/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15174/f3de6325-3b32-4946-b7e4-f3b3c259dacf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc62a6996e3a9bc08300dea9e7d46a8b89bea4bad5402051cb5340869c81304
3
+ size 4125699
2401.15xxx/2401.15174/full.md ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LaMI: Large Language Models for Multi-Modal Human-Robot Interaction
2
+
3
+ Chao Wang
4
+
5
+ chao.wang@honda-ri.de
6
+
7
+ Honda Research Institute EU
8
+
9
+ Offenbach am Main, Germany
10
+
11
+ Stephan Hasler
12
+
13
+ Honda Research Institute EU
14
+
15
+ Offenbach am Main, Germany
16
+
17
+ Daniel Tanneberg
18
+
19
+ Honda Research Institute EU
20
+
21
+ Offenbach am Main, Germany
22
+
23
+ Felix Ocker
24
+
25
+ Honda Research Institute EU
26
+
27
+ Offenbach am Main, Germany
28
+
29
+ Frank Joublin
30
+
31
+ Honda Research Institute EU
32
+
33
+ Offenbach am Main, Germany
34
+
35
+ Antonello Ceravola
36
+
37
+ Honda Research Institute EU
38
+
39
+ Offenbach am Main, Germany
40
+
41
+ Joerg Deigmoeller
42
+
43
+ Honda Research Institute EU
44
+
45
+ Offenbach am Main, Germany
46
+
47
+ Michael Gienger
48
+
49
+ Honda Research Institute EU
50
+
51
+ Offenbach am Main, Germany
52
+
53
+ ![](images/f63339d5987ca232f3feeddd949a5221d796b3a9a3081423f3ccec90d2de6094.jpg)
54
+ Figure 1: LLM driven human-robot interaction centered around guidance, capabilities, and examples
55
+
56
+ ![](images/446372f4b1871af0a7d76dd8e261fe7a6ee5197759621548c163c30f09b88973.jpg)
57
+
58
+ ![](images/fc6f5879e4fbda83224e12bb5c71bd0372cf7c09b4b613f4318433b0b51fc274.jpg)
59
+
60
+ # ABSTRACT
61
+
62
+ This paper presents an innovative large language model (LLM)-based robotic system for enhancing multi-modal human-robot interaction (HRI). Traditional HRI systems relied on complex designs for intent estimation, reasoning, and behavior generation, which were resource-intensive. In contrast, our system empowers researchers and practitioners to regulate robot behavior through three key aspects: providing high-level linguistic guidance, creating "atomic actions" and expressions the robot can use, and offering a set of examples. Implemented on a physical robot, it demonstrates
63
+
64
+ proficiency in adapting to multi-modal inputs and determining the appropriate manner of action to assist humans with its arms, following researchers' defined guidelines. Simultaneously, it coordinates the robot's lid, neck, and ear movements with speech output to produce dynamic, multi-modal expressions. This showcases the system's potential to revolutionize HRI by shifting from conventional, manual state-and-flow design methods to an intuitive, guidance-based, and example-driven approach. Supplementary material can be found at https://hri-eu.github.io/Lami/
65
+
66
+ # CCS CONCEPTS
67
+
68
+ - Human-centered computing $\rightarrow$ Interactive systems and tools.
69
+
70
+ # KEYWORDS
71
+
72
+ Assisting robot, Human-robot interaction, Large language model
73
+
74
+ # ACM Reference Format:
75
+
76
+ Chao Wang, Stephan Hasler, Daniel Tanneberg, Felix Ocker, Frank Joublin, Antonello Ceravola, Joerg Deigmoeller, and Michael Gienger. 2024. LaMI: Large Language Models for Multi-Modal Human-Robot Interaction. In Extended Abstracts of the CHI Conference on Human Factors in Computing Systems (CHI EA '24), May 11–16, 2024, Honolulu, HI, USA. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3613905.3651029
77
+
78
+ # 1 INTRODUCTION
79
+
80
+ Seamless human-robot interaction (HRI) necessitates the robot's adept handling of multi-modal input from humans, including speech, gaze, and facial expressions, to accurately assess the human's intent and provide assistance accordingly. Simultaneously, robots must convey their own intent clearly to humans through multimodal output channels, encompassing speech, gesture, and gaze. Traditionally, achieving this functionality in robotic systems often required intricate design. In the realm of intent estimation, previous research commonly incorporated intention recognition modules to categorize human intent based on multi-modal input [3, 17]. Some systems additionally featured dedicated modules for detecting the human affective state, crucial for establishing a socially nuanced interaction [10, 16, 18]. However, the drawback of these approaches lies in their time-consuming and expensive training processes. For the output front, numerous prior systems integrated emotional state [8, 11] modules to control humanoid output cues, such as tone, gaze, or facial expressions, enhancing the transparency and vividness of feedback to humans. Regarding the motion generation, a variety of methods were proposed, including blending and mapping of pre-established motion sets [19, 25], and the use of motion capture data [5, 9, 15]. Notably, this involved the manual design of motions for each output modality correlated to specific states.
81
+
82
+ Recent advancements in large language models (LLMs), showcasing impressive capabilities in domains like chatbots, data processing, and code generation, are now revealing their potential applications in the field of robotics. By leveraging the ability of context understanding, reasoning and planning, in a short time, many robotic application were proposed [7, 12, 14, 20, 21, 28]. Among them, one typically example is the "SayCan" robot [1], which is able to interpret human's naturally language command, analyzing the environment and generate concrete executable actions sequence to satisfy human's requirement by using LLMs. However, interaction cues between robot and human are limited to voice command and even without speech output.
83
+
84
+ More recently, some researchers also tried to apply this technology in the realm of the HRI. For example, Zhang et al. utilized LLMs to estimate how much humans trust a robot [30]; Yoshida et al., use LLMs to generate low-level control command to drive a humanoid robot motion for social expression [29], rather than for practical assistance. Baermann et al., deployed LLMs not only to follow human's speech commands, but also corrects its mistakes via human's natural language feedback [2]. However, the communication primarily relies on speech interaction, with less focus on multi-modal sensing and expression capabilities. Ye et al. [27] developed an LLM-driven robotic system capable of collaborating with humans in assembly tasks within a VR environment. But this system is limited to processing human language inputs and controlling a single arm in virtual space. In general, compared to the rapid
85
+
86
+ ![](images/9671c4756010767158cc07458efdb9d43aa489860f7773d2d7305297bdf1045d.jpg)
87
+ Figure 2: Robot's Hardware and the Scenario Setup
88
+
89
+ advances of LLMs in the robotic task and motion planning domains, the attempts in HRI are not so numerous and often lack a systematic approach to maximize the capability of LLMs for multi-modal interaction with human.
90
+
91
+ This study proposes a novel LLM-based robotic system implemented in a physical robot. This architecture empowers researchers and practitioners to regulate robot behavior through three key aspects: providing high-level guidance in natural language, creating "atomic" of actions and expressions which robot can use, and a set of examples. In practice, our system can convert human's multimodal input, including observed human behavior, position, gaze and multi-person dialogue, along with scene information such as object identities and poses, to the language description that LLMs can process. Subsequently, the LLM analyzes the situation and determines the timing and manner of the robot's support actions to assist humans, following predefined guidelines. Simultaneously, it coordinates the robot's lid, neck, and ears movements with speech output to produce dynamic, multi-modal expressions. Preliminary test results demonstrate that the robot can effectively meet researcher expectations, suggesting that this approach holds the potential to transform human-robot interaction from a manual, state-and-flow design methodology to a more intuitive approach centered around guidance, capabilities, and example-driven frameworks.
92
+
93
+ # 2 LLM DRIVEN HUMAN-ROBOT INTERACTION
94
+
95
+ The bi-manual robotic system setup is composed of two robot arms<sup>1</sup>, which can manipulate objects on the desk. A robot head with two ears and a lid is installed on a pan-tilt unit as a neck, allowing the robot to perform non-verbal expressions to humans (Figure 2). Additionally, a speaker integrated into the robot's body delivers verbal output. In addition, a 7-inch LCD attached on the robot body communicates the thinking process of the robot via graphic user interface (GUI). A multi-channel microphone array<sup>2</sup>, installed on the robot, enables it to receive speech commands and discern voice sources. A RGBD camera<sup>3</sup> can track the skeletons of several humans and the 6-dof poses of the objects on the desk.
96
+
97
+ The system's architecture includes three key modules: "Scene Narrator", "Planner", and "Expresser" (Figure 3). The Scene Simulator mirrors the states of objects and humans as detected by sensors.
98
+
99
+ ![](images/872f310251987bdee499ac86d264e98868ea70e4c11b022ff97e044c6c456159.jpg)
100
+ Figure 3: The system structure.
101
+
102
+ The Planner module processes multi-modal inputs as event messages, encompassing the positions of individuals within the scene. Inter-module communication is facilitated using $\mathrm{ROS}^4$ .
103
+
104
+ # 2.1 The "Scene Narrator"
105
+
106
+ The "Scene Narrator" module senses the poses of the objects, human postures, and the dialogue information, i.e. which person speaks which content to which other person. This module constructs a 3D representation of the actual scene, enabling it to transform multimodal sensory data into events, following predefined rules. These events are formatted in natural language, making them understandable to the LLM. For instance, if a person turns towards another individual while speaking, the module conveys this as an event to the Planner module: '<Bob> speaks to <Sam>: 'Give me the cola bottle'. Similarly, if it detects a person's hand near a container that is being tilted towards another, it sends an event like ' <Sam> is pouring <the_bottle_one> into <the_glass_two>. An additional function of the Planner module involves receiving high-level action commands from the 'Planner' module. For instance, it may be instructed to place a glass within an area reachable by a specific person, followed by executing low-level trajectory planning to control the robot's arms. Further details about this process will be elaborated in subsequent sections.
107
+
108
+ # 2.2 The "Planner"
109
+
110
+ The primary role of the "Planner" module is to facilitate communication with LLMs to enable high-level planning for both assistance and human interaction. To achieve these objectives, the "Planner" integrates the GPT-4 tool API<sup>5</sup>, which is designed to execute eight predefined "atomic" functions. Seven of these functions facilitate communication with the "Narrator" module. Specifically, functions like get Objects(), get_persons(), can_person_reach_object(), and can_person_see_object() are employed to query the status and properties of objects or individuals. Meanwhile, commands
111
+
112
+ such as put_object_on_object() or move_object_to_person() instruct the "Scene Narrator" to manipulate the robot's arms for various tasks. Two distinct functions are dedicated to managing the robot's social communication. The "speak(") function is responsible for initiating verbal communication, while the "facial_expression(") function controls movements of the ears, lid, and head, enhancing non-verbal interaction. These functions are in synchronization with the "Expraser" module. The GPT-4 Tool API is engineered to allow users to define each function and its parameters. The LLM, in turn, selects the appropriate function(s) and arguments to execute the task at hand. As detailed earlier, when a function is called, it transmits high-level action commands to the "Scene Narrator" to develop low-level trajectory plans for controlling the robot's arms. For instance, if the LLM issues a command like put('the_bottle_two', 'the_table_one'), the Narrator module generates a trajectory enabling the robot's arm to place the bottle on a suitable spot on the table, while avoiding collisions with other objects or people. If the low-level plan is executable, the "Narrator" confirms success; otherwise, it provides feedback detailing the error, along with reasons and suggestions in a structured, rule-based natural language format. For example, it might respond with: "RESULT: 'Unable to place the_bottle_two on the_table_one.' SUGGESTION: Hand the object to a person or find a different location to place it." This feedback is then relayed back to the GPT-4 API for further high-level planning adjustments.
113
+
114
+ # 2.3 The "Expruser"
115
+
116
+ The "Expresser" module is responsible for controlling the actuators responsible for the robot's facial expressions. It houses a library of pre-designed "atomic animation clips" for each actuator's movements. These animations can be activated directly through the facial_expression() function in the "Planner" module, with parameters like {gazed_object: 'the_bottle_two', head_motion: 'nodding', ears_lid_motion: 'confirm'}. However, processing each LLM query, can take approximately 1-5 seconds, varying with the complexity of the plan. Sole reliance on this method for the robot's social expressions could result in idle periods, negatively
117
+
118
+ ![](images/a5b73dc250a5c65d1eae0b491f82668a62081204ab9942af614ff8f27907c225.jpg)
119
+
120
+ 它随之也。
121
+
122
+ ![](images/4e7925819593d4437fd91e3cbc34545a19ac645a51bac47a075f7282aff8b315.jpg)
123
+
124
+ Let's check if Daniel can reach the fanta bottle...
125
+ dai-oubeil est fañan夕宝リに層にがを確認しぃまう。
126
+
127
+ ![](images/5d43bd4d3498f3c49f3588844865dfb28fad9ddcb49a0e6a75c0419ba639ce9c.jpg)
128
+
129
+ Ok,Daniel cannot reach the fanta bottle.
130
+ DAIELIKA is a fanta botelc with his fla#n.
131
+
132
+ ![](images/a009f0dc9c7c6005bcc8b347639ee55bdeb4b0b2d7aac871b04780fd44e61e68.jpg)
133
+
134
+ Now, let's try to move the fanta bottle to Feili... さ、法兰夕トルをFIERUSに移動せてみしぃょう...
135
+
136
+ ![](images/fa61599a3f2500eaf4194cb6f13970bb459f676c3847a9ec583b27c8d0bbc544.jpg)
137
+
138
+ Ok, I moved the fanta bottle to Felix.
139
+ 才一兒、私はフィntonのボルをFi联系方式に勤かしぃた。
140
+
141
+ ![](images/70010b3eeb8723de3a58cb1be5c0ba26638b571117e12e40eea3582834dc66b4.jpg)
142
+
143
+ Now, let's inform Felix... さて、フィラリスに知らましぃう…
144
+
145
+ ![](images/e8e85f78465958506262e10f73c3034c209fa221cd2390cb76b16119afe09856.jpg)
146
+ Figure 4: The GUI illustrates the robot's "internal thoughts" by translating GPT-called functions and their outcomes into natural language, accompanied by relevant icons. Additionally, after each GPT query cycle, the LLM is prompted to summarize the reasoning behind its actions.
147
+
148
+ Ok, I said to Felix: I've passed the fanta bottle to you as Daniel couldn't see or reach it.
149
+ 才一、私は フリックスに言った:「ダニルが見た届かたのと、 フフナロトルをんだたに渡しおて。」
150
+
151
+ ![](images/c25f548cca10087b97f4682ebb051bb4b77991443e17d82f675a55bffd5e9b56.jpg)
152
+
153
+ I am done.
154
+ 私は終わった。
155
+
156
+ impacting user experience. To mitigate this, the "Expraser" also incorporates a rule-based mechanism. This mechanism enables it to provide rapid expressions in the interim between the request and response of each GPT query, which is also inline with "deliberative" and "reactive" approaches in HRI domain [3]. For instance, when the "Planner" module receives either trigger information or a function call result from the "Scene Narrator" - and before it issues a new request to GPT - it forwards this information to the "Expraser" to initiate rule-based expressions, such as "thinking" or gestures indicating "function-call success". The specifics of how expressions are distributed between rule-based and LLM-based methods are elaborated in Section 3.4. Additionally, the "Expraser" plays a crucial role in synchronizing the movements of different modules. A noteworthy point of consideration is the potential conflict in commands, especially when "gazed_object" requires the robot's neck to orient towards a specific object, which might clash with other head gestures like "nodding" or "shaking the head". To address this, commands related to "gazed_object" are always prioritized and sent first to the pan-tilt unit, followed by other head gestures. Finally, the "Expraser" module translates the information communicated with the LLM into natural language, displaying it through text and icons in the GUI (shown in Figure 4). This feature enables users to understand the robot's "internal thoughts" in an anthropomorphic way. After each GPT query round is completed, we guide the
157
+
158
+ LLM to summarize the reasoning process, thereby enhancing the explainability of the robot's behavior.
159
+
160
+ # 2.4 Interaction Flow: An Example
161
+
162
+ This section illustrates the interaction flow within our system through a scenario depicted in Figure 5:
163
+
164
+ The interaction typically begins with a person's speech. For instance, "Scene Narrator" detects "Felix speaks to Daniel: 'Please hand me the red glass'." This event is then translated into natural language and relayed to the "Planner" module, initiating a GPT query. Simultaneously, the "Planner" informs the "Expresser" for an immediate rule-based response, leading the robot to look at Felix while its ears and lid roll back, simulating a listening gesture. Approximately 2 seconds later, GPT responds by invoking the get_persons() and get_objects() functions to identify people and objects present. The resulting data, including "Felix", "Daniel" and object details, are sent back to GPT for further analysis. During the wait for GPT's next response, the robot exhibits a 'thinking' gesture, looking from side to side with blinking lid movements. Shortly after, the LLM calls check_hinding Reasons() to assess if Daniel can see and reach the red glass and whether he is busy. Concurrently, facial_expression() is activated for the robot to look towards Daniel. The outcome indicates Daniel can hand over the glass, and the robot, following pre-defined guidance, opts not to intervene, silently displaying the reasoning on the GUI. Subsequently, Felix asks Daniel to pour cola into the glass. The robot, attentive to their conversation, deduces through check_hinding Reasons that Daniel is occupied with a phone call and learns from is_person_buse_or_idle that Felix is holding the cup. The robot then opts to pour cola from the bottle into Felix's glass. Should Felix not be holding the glass, or if it's beyond the robot's reach, the robot will instead place the bottle near Felix. Directed by LLM, the robot's head tracks the bottle during pickup and shifts to the glass while pouring. Upon completion, the robot nods towards Felix and announces, "I've poured Coca-Cola into your glass as Daniel is currently busy".
165
+
166
+ # 2.5 Configuration space for human-robot interaction
167
+
168
+ Our system empowers HRI researchers to regulate the assistance - specifying "what" actions the robot should perform, "when" to offer assistance, and "how" to communicate social cues for fluid interaction via following ways:
169
+
170
+ 1) Providing high-level guidance. We use the GPT API's system message to embed guidance for the LLM's thinking process. The prompt encourages the robot to observe interactions, use functions for information gathering or action, and respond with reasoning. The prompt of guidance is like: "You are in control of a robot called 'the_robot' and observe persons talking in the form ':sender>' said to ':receiver':'. You can call given functions to gather information or act, or response with text only for reasoning. Your task is: You should check the reasons that could hinder the ':receiver' from performing the ':instruction>..."
171
+ 2) Defining atomic actions of arms. Researchers can define atomic actions by manipulating the joint transforms of the robot's
172
+
173
+ ![](images/86683ec3f2e41b02de765ec6a5436046feabaa869efe14bd483c38c7e95f7555.jpg)
174
+
175
+ ![](images/388620bb91635d43dbffb90d0bcde157f2c7428a6aa10b0a9893f00e0eb1f2af.jpg)
176
+
177
+ ![](images/cd3d5fbd8c895c0a7420fb16cfb9122ae319d409dacb06235f0ba35762a52b9e.jpg)
178
+
179
+ ![](images/a9b452a13c8cf60f92d7826159cad6e61beb9543a1ca57d893e619598a76ab0f.jpg)
180
+ Figure 5: The interaction flow. The blue square are the action generated by the LLM; the grey ones are rule-based function.
181
+
182
+ arms. For instance, a pourInto action involves a series of movements, such as aligning and rotating containers. These actions are communicated to the LLM via the GPT Tool Function $\mathrm{API}^6$ , where details and parameters of each callable function are specified.
183
+
184
+ 3) Creating atomic motion clips of ears/lid/head. Although the atomic actions of robot's arms is relative complex as it depends on affordance, collision avoidance and kinematic setup, the "Expresser" module allows HRI researchers/designers easily adding new atomic animation clips for LLM to generate non-verbal expression. As depicted in Figure 6, researchers first define key-frames specifying the timing and angle for the left/right ear and the lid. The "Expresser" then automatically interpolates between these keyframes using a sine function to ensure smooth actuator movement, storing the resulting clip. Subsequently, a natural language name and description, such as "observe": ears roll back, then forward; lid blinks twice," are assigned to each clip, providing LLM with a clear reference. These names and descriptions are cataloged in the "Planner" module under "description", readily accessible for GPT's on-the-fly utilization.
185
+ 4) Adding Examples. The importance of including examples in prompt engineering is highlighted in several research papers and guides [6, 24, 31]. In our system, we directly integrating the examples into the "system", in the form as allows: "For example, when call move_object_to_person(), can_person_see_object(), can_person_reach_object(), speak(),
186
+
187
+ you also need to call facial_expression(), such as: [(arguments = "head_motion": null, "ears_lid_motion": "observe", "gazed_target": "the_cola_bottle", function = "robot_facial_expression"), (arguments = "person_name": "Daniel", "object_name": "the_cola_bottle", function = "can_person_see_object")]
188
+
189
+ # 3 EVALUATION SETUP
190
+
191
+ # 3.1 Test Scenario
192
+
193
+ In our test scenario, two participants, "Daniel" and "Felix" are seated around a table with various objects, including a glass, a cola bottle, a salt shaker, a knife, an iPhone, and a trash bin. The robot is designed to detect their head orientation, speech, and actions, even when objects obstruct its view. The participants interact with these objects and each other, following scripted scenarios to test the robot's multi-modal reasoning and expression capabilities. For consistency and reproducibility in testing, we fix the model's snapshot using a "seed" argument in the GPT API<sup>7</sup>. The objective is for the robot to serve as a proactive assistant, demonstrating rich social expressions. The testing cases and expected behaviour of the robot are: 1) Reachable object: Felix ask Daniel to pass the cola bottle out of Daniel's reach, the robot informs Felix and assists. If the bottle is within reach, the robot observes without intervening. 2) Supporting while busy: If Felix asks for the salt shaker while Daniel is busy cutting a lemon, the robot offers assistance. Otherwise, it simply observes. 3) Finding object: When Daniel's iPhone is hidden, the robot guides him to its location, noting Felix's unawareness. 4) Assist when explicitly asked: Upon direct requests, the robot performs tasks like passing objects or pouring water, with appropriate expressions.
194
+
195
+ # 3.2 Preliminary test result and lesson learned
196
+
197
+ Three robotics researchers participated providing the guidance, defining atomic actions, and creating basic animation clips for facial expression. After tuning, the robot providing assistance as the
198
+
199
+ Figure 6: creating atomic animation clips
200
+ ![](images/07ee194d0d88d6f741fd0196dca4ded2ff3176950295269638c40e5a5f18be38.jpg)
201
+ - Name and description: "observe" - ears roll back then to the front; lid blinks twice.
202
+ $\rightarrow$ Automatically generating animation (sine curve interval)
203
+
204
+ ![](images/b21fce7e5d3feaf858eeb7303a8553aa5c82941d2a3e587e44166d1e079ffd94.jpg)
205
+ - Key frame: left_ear: [0, 90], [0.75, 150], [1, 150] ...; right_ear: [0, 90], [0.75, 150]...; lid: [0, 17], [0.25, 12] ...
206
+
207
+ expectation and conducted both clear verbal communication and vivid non-verbal expression. Some insights were generated for during the process:
208
+ 1) High-Level Guidance: For effective multi-step planning in complex scenarios, sophisticated reasoning models like Chain-of-Thought [23], Tree-of-Thought [26], or Graph-of-Thought [4] were considered. In our system, we directed the robot to adopt a deliberate thinking process of 'Observe' -> 'Reason' -> 'Act.' To support this approach, we designed prompts that guide the robot in using various observation functions to assess the state of persons and objects in its environment. The outcome of these functions provides insights into whether humans can independently resolve tasks. If assistance is necessary, the robot is prompted to identify potential obstacles preventing human success by analyzing the function call results. Subsequently, it explores solutions through its arm's atomic actions. After evaluating the plan's viability with the 'Scene Narrator', the robot either proceeds with the action or re-plans, seeking an effective solution.
209
+ 2) Defining Atomic Actions: The granularity of functions driving atomic actions, especially for information retrieval, was a critical factor. We experimented with a range from single functions for specific information (e.g., can_person_reach_object) to more aggregate functions. Single functions were found inefficient due to excessive calls and context bloat, while medium granularity (e.g., check Situation_from_person) struck a good balance. However, an aggregate function for retrieving all available information (get_environment_description) proved less reliable, likely overwhelmed by the volume of data and resultant formatting difficulties. Clear naming and encapsulation of functions were also essential; ensuring distinct function choices like hand_over over move_object_to was challenging, and strict preferences occasionally led to undesired behaviors, like avoiding certain functions altogether. A stop() action was instrumental in structuring the sequence, alongside guidelines in the system prompt directing the model to gather information iteratively, make decisions, and then act.
210
+ 3) Examples: The robot successfully calls the functions of observation and arms according to the guidance even without any examples. However, testing revealed that GPT does not always generate appropriate facial expressions. Critically, it sometimes processes facial_expression() after other actions instead of concurrently
211
+
212
+ which slowing the robot's response. This may because actions like "put" and "give" are common sense knowledge for LLM, but our robot has a unique design of the facial expression which LLM is not familiar with. But by just 3 examples, it can generate facial expression well, even concatenate small animation clips together.
213
+
214
+ 4) Rule-based reactive expression: In the context of Large Language Models (LLMs), inference latency remains an ongoing challenge [13, 22]. To address this, integrating rule-based "reactive" expressions with the more calculated "deliberate" expressions generated by LLMs significantly improves user interaction. For instance, employing gestures like "listening" or "thinking" effectively fills the gaps during GPT queries. Such gestures lead to greater user tolerance for latency, as they provide visual cues of the robot's processing, enhancing the overall experience of human-robot interaction.
215
+
216
+ # 4 CONCLUSIONS AND FUTURE WORK
217
+
218
+ Large Language Models have the potential to revolutionize robotic development, necessitating new approaches to human-robot interaction. This study introduces an innovative framework that seamlessly integrates LLMs with robotic systems to enhance human-robot interaction. Implemented on a physical robot, it demonstrates proficiency in adapting to multi-modal inputs, dynamically interacting with humans through speech, facial expressions, and a GUI module. Our upcoming study will compare LLM-based interactions with rule-based approaches. We'll evaluate the robot's perceived anthropomorphism and intelligence, and assess workload reduction for researchers using standardized questionnaires and interviews.
219
+
220
+ # REFERENCES
221
+
222
+ [1] Michael Ahn, Anthony Brohan, Noah Brown, Yevgen Chebotar, Omar Cortes, Byron David, Chelsea Finn, Chuyuan Fu, Keerthana Gopalakrishnan, Karol Hausman, et al. 2022. Do as i can, not as i say: Grounding language in robotic affordances. arXiv preprint arXiv:2204.01691 (2022).
223
+ [2] Leonard Bärmann, Rainer Kartmann, Fabian Peller-Konrad, Alex Waibel, and Tamim Asfour. 2023. Incremental Learning of Humanoid Robot Behavior from Natural Interaction and Large Language Models. arXiv preprint arXiv:2309.04316 (2023).
224
+ [3] Christoph Bartneck, Tony Belpaeme, Friederike Eyssel, Takayuki Kanda, Merel Keijsers, and Selma Sabanovic. 2020. Human-robot interaction: An introduction. Cambridge University Press.
225
+ [4] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Michal Podstawski, Hubert Niewiadomski, Piotr
226
+
227
+ Nczyk, et al. 2023. Graph of thoughts: Solving elaborate problems with large language models. arXiv preprint arXiv:2308.09687 (2023).
228
+ [5] Luc Boutin, Antoine Eon, Said Zeghloul, and Patrick Lacouture. 2010. An auto-adaptable algorithm to generate human-like locomotion for different humanoid robots based on motion capture data. In 2010 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 1256-1261.
229
+ [6] Julian Coda-Forno, Marcel Binz, Zeynep Akata, Matthew Botvinick, Jane X Wang, and Eric Schulz. 2023. Meta-in-context learning in large language models. arXiv preprint arXiv:2305.12907 (2023).
230
+ [7] Danny Driess, Fei Xia, Mehdi S. M. Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, Yevgen Chebotar, Pierre Sermanet, Daniel Duckworth, Sergey Levine, Vincent Vanhoucke, Karol Hausman, Marc Toussaint, Klaus Greff, Andy Zeng, Igor Mordatch, and Pete Florence. 2023. PalM-E: An Embodied Multimodal Language Model. (3 2023). http://arxiv.org/abs/2303.03378
231
+ [8] Maurizio Ficocelli, Junichi Terao, and Goldie Nejat. 2016. Promoting Interactions between Humans and Robots Using Robotic Emotional Behavior. IEEE Transactions on Cybernetics 46 (12 2016), 2911-2923. Issue 12. https://doi.org/10.1109/TCYB.2015.2492999
232
+ [9] Alexis Heloir, Sylvie Gibet, Franck Multon, and Nicolas Courty. 2006. Captured motion data processing for real time synthesis of sign language. In *Gesture in Human-Computer Interaction and Simulation: 6th International Gesture Workshop*, GW 2005, Berder Island, France, May 18-20, 2005, Revised Selected Papers 6. Springer, 168-171.
233
+ [10] Alexander Hong, Nolan Lunscher, Tianhao Hu, Yuma Tsuboi, Xinyi Zhang, Silas Franco Dos Reis Alves, Goldie Nejat, and Beno Benhabib. 2021. A Multimodal Emotional Human-Robot Interaction Architecture for Social Robots Engaged in Bidirectional Communication. IEEE Transactions on Cybernetics 51 (12 2021), 5954-5968. Issue 12. https://doi.org/10.1109/TCYB.2020.2974688
234
+ [11] Alexander Hong, Nolan Lunscher, Tianhao Hu, Yuma Tsuboi, Xinyi Zhang, Silas Franco dos Reis Alves, Goldie Nejat, and Beno Benhabib. 2020. A multimodal emotional human-robot interaction architecture for social robots engaged in bidirectional communication. IEEE transactions on cybernetics 51, 12 (2020), 5954-5968.
235
+ [12] Frank Joublin, Antonello Ceravola, Pavel Smirnov, Felix Ocker, Joerg Deigmoeller, Anna Belardinelli, Chao Wang, Stephan Hasler, Daniel Tanneberg, and Michael Gienger. 2023. CoPAL: Corrective Planning of Robot Actions with Large Language Models. arXiv preprint arXiv:2310.07263 (2023).
236
+ [13] Zichang Liu, Jue Wang, Tri Dao, Tianyi Zhou, Binhang Yuan, Zhao Song, Anshumali Shrivastava, Ce Zhang, Yuandong Tian, Christopher Re, et al. 2023. Deja vu: Contextual sparsity for efficient llms at inference time. In International Conference on Machine Learning. PMLR, 22137-22176.
237
+ [14] Felix Ocker, Jörg Deigmöller, and Julian Eggert. 2023. Exploring Large Language Models as a Source of Common-Sense Knowledge for Robots. arXiv preprint arXiv:2311.08412 (2023).
238
+ [15] José Rosado, Filipe Silva, and Vitor Santos. 2014. A Kinect-based motion capture system for robotic gesture imitation. In ROBOT2013: First Iberian Robotics Conference: Advances in Robotics, Vol. 1. Springer, 585-595.
239
+ [16] Eleanor Sandry, Randy Gomez, and Keisuke Nakamura. 2021. Art, Design and Communication Theory in Creating the Communicative Social Robot 'Haru'. Frontiers in Robotics and AI 8 (3 2021). https://doi.org/10.3389/frobt.2021.577107
240
+ [17] Hang Su, Wen Qi, Jiahao Chen, Chenguang Yang, Juan Sandoval, and Med Amine Laribi. 2023. Recent advancements in multimodal human-robot interaction. https://doi.org/10.3389/fnbot.2023.1084000
241
+ [18] Myrthe Tielman, Mark Neerincx, John-Jules Meyer, and Rosemarijn Looije. 2014. Adaptive emotional expression in robot-child interaction. In Proceedings of the 2014 ACM/IEEE international conference on Human-robot interaction. 407–414.
242
+ [19] Greet Van de Perre, Michael Van Damme, Dirk Lefeber, and Bram Vanderborght. 2015. Development of a generic method to generate upper-body emotional expressions for different social robots. Advanced Robotics 29, 9 (2015), 597-609.
243
+ [20] Sai Vemprala, Rogerio Bonatti, Arthur Tucker, and Ashish Kapoor. 2023. Chatgpt for robotics: Design principles and model abilities. Microsoft Auton. Syst. Robot. Res 2 (2023), 20.
244
+ [21] Naoki Wake, Atsushi Kanehira, Kazuhiro Sasabuchi, Jun Takamatsu, and Katsushi Ikeuchi. 2023. Chatgpt empowered long-step robot control in various environments: A case application. arXiv preprint arXiv:2304.03893 (2023).
245
+ [22] Zhongwei Wan, Xin Wang, Che Liu, Samiul Alam, Yu Zheng, Zhongnan Qu, Shen Yan, Yi Zhu, Quanlu Zhang, Mosharaf Chowdhury, et al. 2023. Efficient Large Language Models: A Survey. arXiv preprint arXiv:2312.03863 (2023).
246
+ [23] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems 35 (2022), 24824-24837.
247
+ [24] Jules White, Quchen Fu, Sam Hays, Michael Sandborn, Carlos Olea, Henry Gilbert, Ashraf Elnashar, Jesse Spencer-Smith, and Douglas C Schmidt. 2023. A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382 (2023).
248
+
249
+ [25] Jeong-Yean Yang and Dong-Soo Kwon. 2014. Robotic gesture generation based on a cognitive basis for non-verbal communication. In 2014 11th International Conference on Ubiquitous Robots and Ambient Intelligence (URAI). IEEE, 683-687.
250
+ [26] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. arXiv preprint arXiv:2305.10601 (2023).
251
+ [27] Yang Ye, Hengxu You, and Jing Du. 2023. Improved trust in human-robot collaboration with ChatGPT. IEEE Access (2023).
252
+ [28] Takuma Yoneda, Jiading Fang, Peng Li, Huanyu Zhang, Tianchong Jiang, Shengjie Lin, Ben Picker, David Yunis, Hongyuan Mei, and Matthew R Walter. 2023. Statler: State-maintaining language models for embodied reasoning. arXiv preprint arXiv:2306.17840 (2023).
253
+ [29] Takahide Yoshida, Atsushi Masumori, and Takashi Ikegami. 2023. From Text to Motion: Grounding GPT-4 in a Humanoid Robot "Alter3". (12 2023). http://arxiv.org/abs/2312.06571
254
+ [30] Bowen Zhang and Harold Soh. 2023. Large Language Models as Zero-Shot Human Models for Human-Robot Interaction. (3 2023). http://arxiv.org/abs/2303.03548
255
+ [31] Chenchi Zhang, Jun Xiao, Lei Chen, Jian Shao, and Long Chen. 2023. TreePrompt: Learning toCompose Tree Prompts for Explanable Visual Grounding. arXiv preprint arXiv:2305.11497 (2023).
256
+
257
+ # A GUIDANCE AND FUNCTION DESCRIPTIONS
258
+
259
+ # A.1 System Prompt
260
+
261
+ "You are a friendly, attentive, and silent service bot."
262
+ "You are in control of a physical robot called 'the_robot' and observe humans talking in the form ':<sender>' said to <receiver>: <instruction>'.
263
+ "Always infer the <instruction> and who is <sender> and <receiver>. "
264
+ "You have access to functions for gathering information, acting physically, and speaking out loud."
265
+ "You MUST behave as follows: "
266
+ "1. If 'the_robot' is the <receiver>, you MUST help or answer."
267
+ "2. When identifying requests or questions within the human conversation, check for ALL reasons that could hinder the <receiver> from performing or answering the <instruction>. "
268
+ "2.a) If there is NO hindering reason for the <receiver>, then you MUST do nothing and be silent."
269
+ "2.b) If there is a hindering reason for the <receiver>, then you MUST ALWAYS first speak and explain the reason for your help to the humans."
270
+ "2.c) AFTER your spoken explanation, you can ACT to solve the <instruction>, always addressing the <sender> with your actions."
271
+ "3. If you recognize a mistake in the humans conversation, you should help them and provide the missing or wrong information."
272
+ "IMPORTANT: Obey the following rules:"
273
+ "1. Always start by gathering relevant information using the functions 'get_objects', 'get_people' and the status of the <receiver>. "
274
+ "2. If you want to speak out loud, you must use the speak function and be concise."
275
+ "3. Try to infer which objects are meant when the name is unclear, but ask for clarification if unsure."
276
+ "4. ALWAYS call 'is_person_busey_or_idle' to check if <receiver> is busy or idle before helping."
277
+ "5. Prefer a handover over move_to as it is more accommodating, UNLESS the person is busy, then always use move_to."
278
+ "6. When executing physical actions, you should be as supportive as possible."
279
+ "7. You MUST call the 'stop' function to indicate you are finished."
280
+
281
+ ```txt
282
+ "When calling each function, call robot\_\facial\_\expression() at the same time to communicate you intent."
283
+ ```
284
+
285
+ ```txt
286
+ "When calling can\_\person\_\see\_\object(), the robot need to look at the person."
287
+ ```
288
+
289
+ # A.2 Some of the Callable functions and Descriptions
290
+
291
+ ```python
292
+ def can_person_reach_object(self, person_name: str, object_name: str) -> str:
293
+ ```
294
+
295
+ ```txt
296
+ Check if the person can reach the object. If the person cannot reach the object, it would be hindered from helping with the object.
297
+ ```
298
+
299
+ ```txt
300
+ :param person_name: The name of the person to check. The person must be available in the scene.
301
+ ```
302
+
303
+ ```txt
304
+ :param object_name: The name of the object to check. The object must be available in the scene.
305
+ ```
306
+
307
+ ```python
308
+ :return: Result message.
309
+ ""
310
+ ```
311
+
312
+ ```txt
313
+ if result is None or len(result) != 1: return (f"It could not be determined if {person_name} can reach {object_name}. There were technical problems."
314
+ ```
315
+
316
+ ```ruby
317
+ if result[0]["is_within_reach']: return f"[person_name] can reach {object_name}. " return f"[person_name] cannot reach {object_name}.
318
+ ```
319
+
320
+ ```python
321
+ def can_person_see_object(self, person_name: str, object_name: str) -> str:
322
+ ```
323
+
324
+ ```txt
325
+ Check if the person can see the object. If the person cannot see the object, it would be hindered from helping with the object.
326
+ ```
327
+
328
+ ```txt
329
+ :param person_name: The name of the person to check. The person must be available in the scene.
330
+ ```
331
+
332
+ ```txt
333
+ :param object_name: The name of the object to check. The object must be available in the scene. :return: Result message.
334
+ ```
335
+
336
+ ```txt
337
+ 1 1
338
+ ```
339
+
340
+ ```txt
341
+ ··
342
+ ```
343
+
344
+ ```txt
345
+ if result is None or len(result) != 1: return f"It could not be determined if { person_name} can see {object_name}. There were technical problems."
346
+ ```
347
+
348
+ ```txt
349
+ if result[0]["is Visible']: return f"[person_name] can see {object_name}.
350
+ ```
351
+
352
+ ```txt
353
+ return f"\{person_name\} cannot see {object_name}, it is occluded by {self.id_to_utterancemapped[result [0]['occludingobjects'] [0]]}"
354
+ ```
355
+
356
+ ```python
357
+ def move_object_to_person(self, object_name: str, person_name: str) -> str:
358
+ '''You move an object to a person.
359
+ ```
360
+
361
+ ```txt
362
+ :param object_name: The name of the object to move. The object must be an object that is available in the scene.
363
+ ```
364
+
365
+ ```txt
366
+ :param person_name: The name of the person to move the object to. The person must be available in the scene.
367
+ ```
368
+
369
+ ```txt
370
+ :return: Result message.
371
+ ```
372
+
373
+ ```txt
374
+ ···
375
+ ```
376
+
377
+ ```txt
378
+ if success: return f"You moved {object_name} to {person_name}."
379
+ ```
380
+
381
+ ```txt
382
+ return f"You were not able to move {object_name} to {person_name}. {message}"
383
+ ```
384
+
385
+ ```txt
386
+ def speak(self, person_name: str, text: str) -> str:
387
+ ```
388
+
389
+ ```txt
390
+ You speak out the given text.
391
+ ```
392
+
393
+ ```txt
394
+ :param person_name: The name of the person to speak to. The person must be available in the scene. Give "All" if you want to speak to everyone.
395
+ ```
396
+
397
+ ```txt
398
+ :param text: The text to speak.
399
+ ```
400
+
401
+ ```txt
402
+ :return: Result message.
403
+ ```
404
+
405
+ ```txt
406
+ ··
407
+ ```
408
+
409
+ ```txt
410
+ if not success: return "You were not able to speak. There were technical problems." return f"You said to {person_name}: {text}"
411
+ ```
412
+
413
+ ```txt
414
+ def robot_facial_expression(self, head_motion: str, ears_lid_motion: str, gazed_target: str) -> str:
415
+ ```
416
+
417
+ ```txt
418
+ Control the motion of the robot's head, gaze, ears and lid for enhancing communication
419
+ ```
420
+
421
+ ```txt
422
+ when speak to a person, you need to look at the person.
423
+ ```
424
+
425
+ ```txt
426
+ when try to manipulate an object, you need to look at the object or the place to put the object.
427
+ ```
428
+
429
+ ```txt
430
+ :param head_motion: The name of the animation for head, must be one of the value in the list ["shake_head", "nod", "thinking", null].
431
+ ```
432
+
433
+ ```txt
434
+ :param ears_lid_motion: The name of the animation for ears and lid, must be one of the value in the list ["confirm", "deny", "listen_to_person", "reset", "observe", "focus", "blink", null].
435
+ ```
436
+
437
+ ```txt
438
+ :param gazed_target: The name of the object that the robot is looking at, must be an object or a person that is available in the scene.
439
+ ```
440
+
441
+ ```txt
442
+ :return: Result message.
443
+ ```
444
+
445
+ ```txt
446
+ ···
447
+ ```
448
+
449
+ ```txt
450
+ return "The robot performed facial expressions."
451
+ ```
452
+
453
+ ```python
454
+ def is_person_busey_or_idle(self, person_name: str) -> str:
455
+ ```
456
+
457
+ ```txt
458
+ Check if the person is busy or idle. If the person is busy, it would be hindered from helping.
459
+ ```
460
+
461
+ ```txt
462
+ :param person_name: The name of the person to check. The person must be available in the scene. :return: Result message.
463
+ ```
464
+
465
+ ```txt
466
+ 111
467
+ ```
468
+
469
+ ```txt
470
+ .
471
+ ```
472
+
473
+ ```python
474
+ if result is None or len(result) != 1: return f"It could not be determined if { person_name} is busy. There were technical problems."
475
+ ```
476
+
477
+ ```txt
478
+ return f" {person_name} is {'busy' if result[0]['is Busy']} else 'idle'."
479
+ ```
480
+
481
+ # A.3 Examples of robot facial expression
482
+
483
+ "For example, when call move_object_to_person(),
484
+
485
+ can_person_see_object(), can_person_reach_object(), speak(), you also need to call robot_facial_expression(), such as:"
486
+
487
+ ```python
488
+ "tool_calls=['ChatCompletionMessageToolCall(id="..."', function=Function(args=['head_motion': null, "ears_lid_motion': "observe", "gazed_target': "the_cola_bottle"', "name='robot_facial_expression"', "type='function"', ChatCompletionMessageToolCall(id="..."', "function=Function(args=['person_name': "Daniel", "object_name': "the_cola_bottle"', "name='can_personseeing_object"', "type='function')"']")
489
+ ```
490
+
491
+ ```javascript
492
+ "tool_calls="["ChatCompletionMessageToolCall(id)."...","function=Function(arguments=[[head_motion]:null, "ears_lid_motion": "focus", "gazed_target": "the_cola_bottle]", "name=True"robot_facial_expression),"type=True","function=True", "chatCompletionMessageToolCall(id=True,...),"function=Function(arguments=True:"person_name": "Felix", "object_name": "the_cola_bottle"], "name=True", "move_object_to_person)", "type=True"function")"]
493
+ "tool_calls=[[chatCompletionMessageToolCall(id="..."...","function=Function(arguments=True:"person_name": "Felix", "object_name": "the_cola_bottle"], "name=True", "move_object_to_person")]".
494
+ ```
495
+
496
+ ```javascript
497
+ "tool_calls=['ChatCompletionMessageToolCall(id="..."', "function=Function arguments=['head_motion': null, "ears_lid_motion': "focus", "gazed_target': "the_cola_bottle}],", "name='robot_facial_expression',"), "type='function'"), "ChatCompletionMessageToolCall(id='..."', "function=Function arguments=['person_name': "Felix", "text': "Here is the coke, you can now pass it to Felix.}],", "name='speak'"), "type='function'')]"]
498
+ ```
499
+
500
+ # B ROBOT'S INTERACTION FLOW DATA
501
+
502
+ ```txt
503
+ Felix said to Daniel: Can you pass me the fanta bottle?
504
+ ```
505
+
506
+ Received 1 tool call(s).
507
+
508
+ 0. Function(args='{}', name='get_objects')
509
+
510
+ ```txt
511
+ Following objects were observed: the_cola_bottle, the_fanta_bottle, the_cola_zero_bottle, glass_one, glass_two, the_iPhone, lego_box.
512
+ ```
513
+
514
+ Received 1 tool call(s).
515
+
516
+ 0. Function(args='{}', name='get_people')
517
+
518
+ Following persons were observed: Felix, Daniel.
519
+
520
+ Received 2 tool call(s).
521
+
522
+ ```python
523
+ 0. Function(args='{"head_motion": null, "ears_lid_motion": "focus", "gazed_target": "the_fanta_bottle'}', name='robot_facial_expression')
524
+ ```
525
+
526
+ ```txt
527
+ The robot performed facial expressions.
528
+ ```
529
+
530
+ ```javascript
531
+ 1. Function(args='{"person_name":"Daniel","object_name":"the_fanta_bottle"}', name='can_personseeing_object')
532
+ ```
533
+
534
+ ```txt
535
+ Daniel cannot see the_fanta_bottle, it is occluded by lego_box
536
+ ```
537
+
538
+ Received 2 tool call(s).
539
+
540
+ ```javascript
541
+ 0. Function(args='{"head_motion": null, "ears_lid_motion": "focus", "gazed_target": "Daniel"}', name='robot_facial_expression')
542
+ ```
543
+
544
+ ```txt
545
+ The robot performed facial expressions.
546
+ ```
547
+
548
+ ```txt
549
+ 1. Function(args='{"person_name": "Daniel", "text": "The fanta bottle is behind the lego box, you cannot see it from where you are.}"}, name='speak')
550
+ ```
551
+
552
+ ```txt
553
+ You said to Daniel: The fanta bottle is behind the lego box, you cannot see it from where you are.
554
+ ```
555
+
556
+ Received 2 tool call(s).
557
+
558
+ ```python
559
+ 0. Function(args='{"head_motion": null, "ears_lid_motion": "focus", "gazed_target": "the_fanta_bottle'}', name='robot_facial_expression')
560
+ ```
561
+
562
+ ```txt
563
+ The robot performed facial expressions.
564
+ ```
565
+
566
+ 1. Function(args='{"object_name": "
567
+
568
+ ```python
569
+ the_fanta_bottle", "person_name": "Daniel}", name='move_object_to_person')
570
+ ```
571
+
572
+ ```txt
573
+ You were not able to move the_fanta_bottle to Daniel. []
574
+ ```
575
+
576
+ Received 2 tool call(s).
577
+
578
+ ```javascript
579
+ 0. Function(args={'head_motion': null, "ears_lid_motion": "observe", "gazed_target": "the_lego_box'}', name='robot_facial_expression')
580
+ ```
581
+
582
+ ```txt
583
+ The robot performed facial expressions.
584
+ ```
585
+
586
+ 1. Function(args='{"object_name": "
587
+
588
+ ```python
589
+ the_fanta_bottle", "person_name": "Daniel}", name='move_object_to_person')
590
+ ```
591
+
592
+ ```txt
593
+ You were not able to move the_fanta_bottle to Daniel. []
594
+ ```
595
+
596
+ Received 2 tool call(s).
597
+
598
+ ```javascript
599
+ 0. Function(args='{"head_motion": null, "ears_lid_motion": "focus", "gazed_target": "the_lego_box'}', name='robot_facial_expression')
600
+ ```
601
+
602
+ ```txt
603
+ The robot performed facial expressions.
604
+ ```
605
+
606
+ ```javascript
607
+ 1. Function(args='{"object_name": "the_lege_box", "person_name": "Daniel"}', name='move_object_to_person')
608
+ ```
609
+
610
+ ```txt
611
+ You were not able to move the_lego_box to Daniel. []
612
+ ```
613
+
614
+ Received 1 tool call(s).
615
+
616
+ ```javascript
617
+ 0. Function(args='{"person_name":"Daniel"'}', name='is_person_buse')
618
+ ```
619
+
620
+ ```txt
621
+ Daniel is not busy.
622
+ ```
623
+
624
+ Received 2 tool call(s).
625
+
626
+ ```javascript
627
+ 0. Function(args='{"head_motion": null, "ears_lid_motion": "focus", "gazed_target": "the_lego_box'}', name='robot_facial_expression')
628
+ ```
629
+
630
+ ```txt
631
+ The robot performed facial expressions.
632
+ ```
633
+
634
+ ```javascript
635
+ 1. Function(args='{"object_name": "the_lege_box", "person_name": "Daniel"}', name='move_object_to_person')
636
+ ```
637
+
638
+ ```txt
639
+ You were not able to move the_lege_box to Daniel. []
640
+ ```
641
+
642
+ ```txt
643
+ Received 2 tool call(s).
644
+ ```
645
+
646
+ 0. Function(args={'head_motion": "nod", "ears_lid_motion": "focus", "gazed_target": "the_lego_box'}', name='robot_facial_expression') The robot performed facial expressions.
647
+ 1. Function(args={'object_name":"the_lego_box", "person_name": "Daniel"》,name $=$ move_object_to_person') You were not able to move the_lege_box to Daniel.[]
648
+
649
+ Received 2 tool call(s).
650
+ 0. Function(args={'head_motion":"nod","ears_lid_motion":"focus","gazed_target":"the_lego_box'}', name='robot_facial_expression')
651
+ The robot performed facial expressions.
652
+
653
+ 1. Function(args='{"object_name": "the_lege_box", "person_name": "Daniel"}', name='move_object_to_person')
654
+ You were not able to move the_lege_box to Daniel. [ ]
655
+
656
+ I am currently unable to move the lego box that is obstructing the view of the fanta bottle for Daniel. This may require a different strategy or manual intervention. What would you like to do next?
657
+
658
+ Received 1 tool call(s).
659
+ 0. Function(args={''}, name='stop')
660
+ You successfully finished the task.
2401.15xxx/2401.15174/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abcfb67e56bd6091f31ac42e9d760abe13ca73c7c6d477bc4908384bf2e32e3e
3
+ size 399352
2401.15xxx/2401.15174/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.15xxx/2401.15204/8af84b54-198a-463e-ad3b-700716af27ab_content_list.json ADDED
@@ -0,0 +1,1155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "LYT-NET: Lightweight YUV Transformer-based Network for Low-light Image Enhancement",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 107,
8
+ 70,
9
+ 890,
10
+ 140
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Alexandru Brateanu*, Raul Balmez*, Adrian Avram†, Ciprian Orhei†‡ and Cosmin Ancuti†‡",
17
+ "bbox": [
18
+ 156,
19
+ 146,
20
+ 841,
21
+ 164
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "*University of Manchester, United Kingdom",
28
+ "bbox": [
29
+ 333,
30
+ 165,
31
+ 663,
32
+ 181
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "†University Politehnica Timisoara, Romania",
39
+ "bbox": [
40
+ 334,
41
+ 181,
42
+ 661,
43
+ 198
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "$^{\\ddagger}$ West University of Timisoara, Romania",
50
+ "bbox": [
51
+ 346,
52
+ 198,
53
+ 650,
54
+ 215
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Abstract—This letter introduces LYT-Net, a novel lightweight transformer-based model for low-light image enhancement. LYT-Net consists of several layers and detachable blocks, including our novel blocks—Channel-Wise Denoiser (CWD) and Multi-Stage Squeeze & Excite Fusion (MSEF)—along with the traditional Transformer block, Multi-Headed Self-Attention (MHSA). In our method we adopt a dual-path approach, treating chrominance channels $U$ and $V$ and luminance channel $Y$ as separate entities to help the model better handle illumination adjustment and corruption restoration. Our comprehensive evaluation on established LLIE datasets demonstrates that, despite its low complexity, our model outperforms recent LLIE methods. The source code and pre-trained models are available at https://github.com/albrateanu/LYT-Net",
61
+ "bbox": [
62
+ 73,
63
+ 265,
64
+ 491,
65
+ 444
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Index Terms—Low-light Image Enhancement, Vision Transformer, Deep Learning",
72
+ "bbox": [
73
+ 73,
74
+ 448,
75
+ 491,
76
+ 477
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "I. INTRODUCTION",
83
+ "text_level": 1,
84
+ "bbox": [
85
+ 215,
86
+ 493,
87
+ 351,
88
+ 507
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Low-light image enhancement (LLIE) is an important and challenging task in computational imaging. When images are captured in low-light conditions, their quality often deteriorates, leading to a loss of detail and contrast. This not only makes the images visually unappealing but also affects the performance of many imaging systems. The goal of LLIE is to improve the clarity and contrast of these images, while also correcting distortions that commonly occur in dark environments, all without introducing unwanted artifacts or causing imbalances in color.",
95
+ "bbox": [
96
+ 73,
97
+ 512,
98
+ 490,
99
+ 662
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "Earlier LLIE methods [1] primarily relied on frequency decomposition [2], [3], histogram equalization [4], [5], and Retinex theory [6], [7], [8], [9]. With the rapid advancement of deep learning, various CNN architectures [10], [11], [12], [13], [14], [15], [16], [17], [18], [19] have been shown to outperform traditional LLIE techniques. Based on Retinex theory, Retinex-Net [10] integrates Retinex decomposition with an original CNN architecture, while Diff-Retinex [12] proposes a generative framework to further address content loss and color deviation caused by low light.",
106
+ "bbox": [
107
+ 73,
108
+ 661,
109
+ 491,
110
+ 811
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "The development of Generative Adversarial Networks (GAN) [20] has provided a new perspective for LLIE, where low-light images are used as input to generate their normal-light counterparts. For instance, EnlightenGAN [21] employs a single generator model to directly convert low-light images to normal-light versions, effectively using both global and local discriminators in the transformation process.",
117
+ "bbox": [
118
+ 73,
119
+ 811,
120
+ 491,
121
+ 915
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "More recently, Vision Transformers (ViTs) [22] have demonstrated significant effectiveness in various computer",
128
+ "bbox": [
129
+ 73,
130
+ 914,
131
+ 491,
132
+ 945
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "image",
138
+ "img_path": "images/ecf6b64d31558d45712a9fad362f7a7c7dee54bca19a1f18eaef10e9350f4910.jpg",
139
+ "image_caption": [
140
+ "Fig. 1. Our model delivers SOTA performance in LLIE task, while maintaining computational efficiency (results are plotted on LOL dataset [10])."
141
+ ],
142
+ "image_footnote": [],
143
+ "bbox": [
144
+ 509,
145
+ 265,
146
+ 913,
147
+ 491
148
+ ],
149
+ "page_idx": 0
150
+ },
151
+ {
152
+ "type": "text",
153
+ "text": "vision tasks [23], [24], [25], [26], largely due to the self-attention (SA) mechanism. Despite these advancements, the application of ViTs to low-level vision tasks remains relatively underexplored. Only a few LLIE-ViT-based strategies have been introduced in the recent literature [27], [28], [29], [30]. Restormer [29], on the other hand, introduces a multi-Dconv head transposed attention (MDTA) block, replacing the vanilla multi-head self-attention.",
154
+ "bbox": [
155
+ 501,
156
+ 547,
157
+ 921,
158
+ 669
159
+ ],
160
+ "page_idx": 0
161
+ },
162
+ {
163
+ "type": "text",
164
+ "text": "Diffusion models have emerged as a powerful approach for LLIE, leveraging their ability to learn complex data distributions through a simulated forward process [31], [32], [33].",
165
+ "bbox": [
166
+ 503,
167
+ 671,
168
+ 921,
169
+ 717
170
+ ],
171
+ "page_idx": 0
172
+ },
173
+ {
174
+ "type": "text",
175
+ "text": "In this letter, we propose a novel lightweight transformer-based approach called LYT-Net. Different from the existing transformer-based methods, our method focuses on computational efficiency while still producing state-of-the-art (SOTA) results. Specifically, we first separate chrominance from luminance employing the YUV color space. The chrominance information (channels $U$ and $V$ ) is initially processed through a specialized Channel-wise Denoiser (CWD) block, which reduces noise while preserving fine details. To minimize computational complexity, the luminance channel $Y$ undergoes convolution and pooling to extract features, which are subsequently enhanced by a traditional Multi-headed Self-Attention (MHSA) block. The enhanced channels are then recombined and processed through a novel Multi-stage Squeeze and Excite Fusion (MSEF) block. Finally, the chrominance channels",
176
+ "bbox": [
177
+ 501,
178
+ 718,
179
+ 921,
180
+ 945
181
+ ],
182
+ "page_idx": 0
183
+ },
184
+ {
185
+ "type": "header",
186
+ "text": "LYT-NET: LIGHTWEIGHT YUV TRANSFORMER-BASED NETWORK FOR LOW-LIGHT IMAGE ENHANCEMENT",
187
+ "bbox": [
188
+ 76,
189
+ 27,
190
+ 656,
191
+ 40
192
+ ],
193
+ "page_idx": 0
194
+ },
195
+ {
196
+ "type": "page_number",
197
+ "text": "1",
198
+ "bbox": [
199
+ 911,
200
+ 30,
201
+ 919,
202
+ 40
203
+ ],
204
+ "page_idx": 0
205
+ },
206
+ {
207
+ "type": "aside_text",
208
+ "text": "arXiv:2401.15204v7 [cs.CV] 10 Sep 2025",
209
+ "bbox": [
210
+ 22,
211
+ 276,
212
+ 60,
213
+ 724
214
+ ],
215
+ "page_idx": 0
216
+ },
217
+ {
218
+ "type": "image",
219
+ "img_path": "images/1158ab23784ff214744766b986b94ce1b71148c75780887f806024526ce08959.jpg",
220
+ "image_caption": [
221
+ "Fig. 2. Overall framework of LyT-Net. The architecture consists of several detachable blocks like Channel-wise Denoiser (CWD), Multi-headed Self-Attention (MHSA), Multi-stage Squeeze and Excite Fusion (MSEF)."
222
+ ],
223
+ "image_footnote": [],
224
+ "bbox": [
225
+ 75,
226
+ 66,
227
+ 924,
228
+ 220
229
+ ],
230
+ "page_idx": 1
231
+ },
232
+ {
233
+ "type": "text",
234
+ "text": "$U$ and $V$ channels are concatenated with the luminance $Y$ channel and passed through a final set of convolutional layers to produce the restored image.",
235
+ "bbox": [
236
+ 73,
237
+ 263,
238
+ 491,
239
+ 306
240
+ ],
241
+ "page_idx": 1
242
+ },
243
+ {
244
+ "type": "text",
245
+ "text": "Our method has undergone extensive testing on established LLIE datasets. Both qualitative and quantitative evaluations indicate that our approach achieves highly competitive results. Fig. 1 presents a comparative analysis of performance over complexity between SOTA methods evaluated using the LOL dataset [10]. It can be observed that, despite its lightweight design, our method produces results that are not only comparable to, but often outperform, those of more complex recent deep learning LLIE techniques.",
246
+ "bbox": [
247
+ 73,
248
+ 308,
249
+ 491,
250
+ 443
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "II. OUR APPROACH",
257
+ "text_level": 1,
258
+ "bbox": [
259
+ 210,
260
+ 449,
261
+ 356,
262
+ 462
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "In Fig. 2, we illustrate the overall architecture of LYT-Net, which consists of several layers and detachable blocks, including our novel blocks—Channel-Wise Denoiser (CWD) and Multi-Stage Squeeze & Excite Fusion (MSEF)—along with the traditional ViT block, Multi-Headed Self-Attention (MHSA). We adopt a dual-path approach, treating chrominance and luminance as separate entities to help the model better handle illumination adjustment and corruption restoration. The luminance channel $Y$ undergoes convolution and pooling to extract features, which are then enhanced by the MHSA block. Chrominance channels $U$ and $V$ are processed through the CWD block to reduce noise while preserving details. The enhanced chrominance channels are then recombined and processed through the MSEF block. Finally, the chrominance $U, V$ and luminance $Y$ channels are concatenated and passed through a final set of convolutional layers to produce the output, resulting in a high-quality, enhanced image.",
269
+ "bbox": [
270
+ 73,
271
+ 467,
272
+ 493,
273
+ 726
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "text",
279
+ "text": "A. Channel-wise Denoiser Block",
280
+ "text_level": 1,
281
+ "bbox": [
282
+ 73,
283
+ 744,
284
+ 302,
285
+ 758
286
+ ],
287
+ "page_idx": 1
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "The CWD Block employs a U-shaped network with MHSA as the bottleneck, integrating convolutional and attention-based mechanisms. It includes multiple conv $3 \\times 3$ layers with varying strides and skip connections, facilitating detailed feature capture and denoising.",
292
+ "bbox": [
293
+ 73,
294
+ 765,
295
+ 490,
296
+ 839
297
+ ],
298
+ "page_idx": 1
299
+ },
300
+ {
301
+ "type": "text",
302
+ "text": "It consists of a series of four $conv3 \\times 3$ layers. The first $conv3 \\times 3$ has strides of 1 for feature extraction. The other three $conv3 \\times 3$ layers have strides of 2, helping with capturing features at different scales. The integration of the attention bottleneck enables the model to capture long-range dependencies, followed by upsampling layers and skip connections to reconstruct and facilitate the recovery of spatial resolution.",
303
+ "bbox": [
304
+ 73,
305
+ 839,
306
+ 491,
307
+ 945
308
+ ],
309
+ "page_idx": 1
310
+ },
311
+ {
312
+ "type": "text",
313
+ "text": "This approach allows us to apply MHSA on a feature map with reduced spatial dimensions, significantly improving computational efficiency. Additionally, using interpolation-based upsampling instead of transposed convolutions cuts the number of parameters in the CWD by more than half, while preserving performance.",
314
+ "bbox": [
315
+ 501,
316
+ 263,
317
+ 921,
318
+ 354
319
+ ],
320
+ "page_idx": 1
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "B. Multi-headed Self-attention Block",
325
+ "text_level": 1,
326
+ "bbox": [
327
+ 504,
328
+ 371,
329
+ 756,
330
+ 386
331
+ ],
332
+ "page_idx": 1
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "In our updated simplified transformer architecture, the input feature $\\mathbf{F}_{\\mathrm{in}} \\in \\mathbb{R}^{H \\times W \\times C}$ is first linearly projected into query (Q), key (K), and value (V) components through bias-free fully connected layers. The linear layers use parameter $D$ to determine projection head dimensionality.",
337
+ "bbox": [
338
+ 503,
339
+ 390,
340
+ 921,
341
+ 465
342
+ ],
343
+ "page_idx": 1
344
+ },
345
+ {
346
+ "type": "equation",
347
+ "text": "\n$$\n\\mathbf {Q} = \\mathbf {X} \\mathbf {W} _ {\\mathbf {Q}}, \\mathbf {K} = \\mathbf {X} \\mathbf {W} _ {\\mathbf {K}}, \\mathbf {V} = \\mathbf {X} \\mathbf {W} _ {\\mathbf {V}}, \\mathbf {Q}, \\mathbf {K}, \\mathbf {V} \\in \\mathbb {R} ^ {H W \\times D} \\tag {1}\n$$\n",
348
+ "text_format": "latex",
349
+ "bbox": [
350
+ 511,
351
+ 482,
352
+ 919,
353
+ 500
354
+ ],
355
+ "page_idx": 1
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "where $\\mathbf{W}_{\\mathbf{Q}}, \\mathbf{W}_{\\mathbf{K}}, \\mathbf{W}_{\\mathbf{V}}$ are fully connected layer weights. Next, these projected features are split into $k$ heads as such:",
360
+ "bbox": [
361
+ 503,
362
+ 506,
363
+ 919,
364
+ 536
365
+ ],
366
+ "page_idx": 1
367
+ },
368
+ {
369
+ "type": "equation",
370
+ "text": "\n$$\n\\mathbf {X} = \\left[ \\mathbf {X} _ {1}, \\mathbf {X} _ {2}, \\dots , \\mathbf {X} _ {k} \\right], \\quad \\mathbf {X} _ {i} \\in \\mathbb {R} ^ {H W \\times d _ {k}}, \\quad d _ {k} = \\frac {D}{k}, i = \\overline {{1 , k}} \\tag {2}\n$$\n",
371
+ "text_format": "latex",
372
+ "bbox": [
373
+ 514,
374
+ 539,
375
+ 919,
376
+ 566
377
+ ],
378
+ "page_idx": 1
379
+ },
380
+ {
381
+ "type": "text",
382
+ "text": "where each head operates independently with dimensionality $d_{k}$ . The self-attention mechanism is applied to each head, as defined below:",
383
+ "bbox": [
384
+ 503,
385
+ 568,
386
+ 919,
387
+ 611
388
+ ],
389
+ "page_idx": 1
390
+ },
391
+ {
392
+ "type": "equation",
393
+ "text": "\n$$\n\\operatorname {A t t e n t i o n} \\left(\\mathbf {Q} _ {i}, \\mathbf {K} _ {i}, \\mathbf {V} _ {i}\\right) = \\operatorname {S o f t m a x} \\left(\\frac {\\mathbf {Q} _ {i} \\mathbf {K} _ {i} ^ {\\mathrm {T}}}{\\sqrt {d _ {k}}}\\right) \\times \\mathbf {V} _ {i} \\tag {3}\n$$\n",
394
+ "text_format": "latex",
395
+ "bbox": [
396
+ 555,
397
+ 613,
398
+ 919,
399
+ 650
400
+ ],
401
+ "page_idx": 1
402
+ },
403
+ {
404
+ "type": "text",
405
+ "text": "Finally, the attention outputs from all heads are concatenated and the combined output is passed through a linear layer to project it back to the original embedding size. The output tokens $\\mathbf{X}_{\\mathrm{out}}$ are reshaped back into the original spatial dimensions to form the output feature $\\mathbf{F}_{\\mathrm{out}} \\in \\mathbb{R}^{H \\times W \\times C}$ .",
406
+ "bbox": [
407
+ 503,
408
+ 652,
409
+ 921,
410
+ 728
411
+ ],
412
+ "page_idx": 1
413
+ },
414
+ {
415
+ "type": "text",
416
+ "text": "C. Multi-stage Squeeze & Excite Fusion Block",
417
+ "text_level": 1,
418
+ "bbox": [
419
+ 504,
420
+ 744,
421
+ 823,
422
+ 758
423
+ ],
424
+ "page_idx": 1
425
+ },
426
+ {
427
+ "type": "text",
428
+ "text": "The MSEF Block enhances both spatial and channel-wise features of $\\mathbf{F}_{\\mathrm{in}}$ . Initially, $\\mathbf{F}_{\\mathrm{in}}$ undergoes layer normalization, followed by global average pooling to capture global spatial context and a reduced fully-connected layer with ReLU activation, producing a reduced descriptor $\\mathbf{S}_{\\mathrm{reduced}}$ , as shown in Eq. (4). This descriptor is then expanded back to the original dimensions through another fully-connected layer with Tanh activation, resulting in $\\mathbf{S}_{\\mathrm{expanded}}$ , Eq. (5).",
429
+ "bbox": [
430
+ 501,
431
+ 765,
432
+ 921,
433
+ 885
434
+ ],
435
+ "page_idx": 1
436
+ },
437
+ {
438
+ "type": "text",
439
+ "text": "These operations compress the feature map into a reduced descriptor (the squeezing operation) to capture essential details, and then re-expand it (the excitation operation) to restore the full dimensions, emphasizing the most relevant features.",
440
+ "bbox": [
441
+ 501,
442
+ 885,
443
+ 921,
444
+ 944
445
+ ],
446
+ "page_idx": 1
447
+ },
448
+ {
449
+ "type": "header",
450
+ "text": "LYT-NET: LIGHTWEIGHT YUV TRANSFORMER-BASED NETWORK FOR LOW-LIGHT IMAGE ENHANCEMENT",
451
+ "bbox": [
452
+ 76,
453
+ 29,
454
+ 656,
455
+ 41
456
+ ],
457
+ "page_idx": 1
458
+ },
459
+ {
460
+ "type": "page_number",
461
+ "text": "2",
462
+ "bbox": [
463
+ 911,
464
+ 30,
465
+ 919,
466
+ 40
467
+ ],
468
+ "page_idx": 1
469
+ },
470
+ {
471
+ "type": "image",
472
+ "img_path": "images/88d289f15ef0a20932bf0754f5d5591c41a8bedaf8acd429337ce54977466e46.jpg",
473
+ "image_caption": [
474
+ "Fig. 3. Qualitative comparison with SOTA LLIE methods on the LOL dataset. Zoom-in regions are used to illustrate differences."
475
+ ],
476
+ "image_footnote": [],
477
+ "bbox": [
478
+ 76,
479
+ 66,
480
+ 923,
481
+ 271
482
+ ],
483
+ "page_idx": 2
484
+ },
485
+ {
486
+ "type": "equation",
487
+ "text": "\n$$\n\\mathbf {S} _ {\\text {r e d u c e d}} = \\operatorname {R e L U} \\left(\\mathbf {W} _ {1} \\cdot \\operatorname {G l o b a l P o o l} \\left(\\operatorname {L a y e r N o r m} \\left(\\mathbf {F} _ {\\text {i n}}\\right)\\right)\\right) \\tag {4}\n$$\n",
488
+ "text_format": "latex",
489
+ "bbox": [
490
+ 114,
491
+ 315,
492
+ 491,
493
+ 330
494
+ ],
495
+ "page_idx": 2
496
+ },
497
+ {
498
+ "type": "equation",
499
+ "text": "\n$$\n\\mathbf {S} _ {\\text {e x p a n d e d}} = \\operatorname {T a n h} \\left(\\mathbf {W} _ {2} \\cdot \\mathbf {S} _ {\\text {r e d u c e d}}\\right) \\cdot \\operatorname {L a y e r N o r m} \\left(\\mathbf {F} _ {\\text {i n}}\\right) \\tag {5}\n$$\n",
500
+ "text_format": "latex",
501
+ "bbox": [
502
+ 125,
503
+ 338,
504
+ 491,
505
+ 354
506
+ ],
507
+ "page_idx": 2
508
+ },
509
+ {
510
+ "type": "text",
511
+ "text": "A residual connection is added to the fused output to produce the final output feature map $\\mathbf{F}_{\\mathrm{out}}$ , as in Eq. (6).",
512
+ "bbox": [
513
+ 73,
514
+ 364,
515
+ 491,
516
+ 395
517
+ ],
518
+ "page_idx": 2
519
+ },
520
+ {
521
+ "type": "equation",
522
+ "text": "\n$$\n\\mathbf {F} _ {\\text {o u t}} = \\operatorname {D W C o n v} \\left(\\operatorname {L a y e r N o r m} \\left(\\mathbf {F} _ {\\text {i n}}\\right)\\right) \\cdot \\mathbf {S} _ {\\text {e x p a n d e d}} + \\mathbf {F} _ {\\text {i n}} \\tag {6}\n$$\n",
523
+ "text_format": "latex",
524
+ "bbox": [
525
+ 120,
526
+ 400,
527
+ 488,
528
+ 416
529
+ ],
530
+ "page_idx": 2
531
+ },
532
+ {
533
+ "type": "text",
534
+ "text": "Consequently, the MSEF block acts as a multilayer perceptron capable of performing efficient feature extraction on selfattended and denoised chrominance features, enabling high-quality restoration with minor parameter count increase.",
535
+ "bbox": [
536
+ 73,
537
+ 431,
538
+ 491,
539
+ 492
540
+ ],
541
+ "page_idx": 2
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "D. Loss Function",
546
+ "text_level": 1,
547
+ "bbox": [
548
+ 73,
549
+ 512,
550
+ 199,
551
+ 526
552
+ ],
553
+ "page_idx": 2
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "In our approach, a hybrid loss function plays a pivotal role in training our model effectively. The hybrid loss $\\mathbf{L}$ is formulated as in Eq. (7), where $\\alpha_{1}$ to $\\alpha_{5}$ are hyperparameters used to balance each constituent loss function.",
558
+ "bbox": [
559
+ 73,
560
+ 532,
561
+ 491,
562
+ 593
563
+ ],
564
+ "page_idx": 2
565
+ },
566
+ {
567
+ "type": "equation",
568
+ "text": "\n$$\n\\mathbf {L} = \\mathbf {L} _ {\\mathrm {S}} + \\alpha_ {1} \\mathbf {L} _ {\\text {P e r c}} + \\alpha_ {2} \\mathbf {L} _ {\\text {H i s t}} + \\alpha_ {3} \\mathbf {L} _ {\\text {P S N R}} + \\alpha_ {4} \\mathbf {L} _ {\\text {C o l o r}} + \\alpha_ {5} \\mathbf {L} _ {\\text {M S - S S I M}} \\tag {7}\n$$\n",
569
+ "text_format": "latex",
570
+ "bbox": [
571
+ 73,
572
+ 613,
573
+ 488,
574
+ 637
575
+ ],
576
+ "page_idx": 2
577
+ },
578
+ {
579
+ "type": "text",
580
+ "text": "The hybrid loss in our model combines several components to enhance image quality and perception. Smooth L1 loss $\\mathbf{L}_{\\mathrm{S}}$ handles outliers by applying a quadratic or linear penalty based on the difference between predicted and true values. Perceptual loss $\\mathbf{L}_{\\mathrm{Perc}}$ maintains feature consistency by comparing VGG-extracted feature maps. Histogram loss $\\mathbf{L}_{\\mathrm{Hist}}$ aligns pixel intensity distributions between predicted and true images. PSNR loss $\\mathbf{L}_{\\mathrm{PSNR}}$ reduces noise by penalizing mean squared error, while Color loss $\\mathbf{L}_{\\mathrm{Color}}$ ensures color fidelity by minimizing differences in channel mean values. Lastly, Multiscale SSIM loss $\\mathbf{L}_{\\mathrm{MS-SSIM}}$ preserves structural integrity by evaluating similarity across multiple scales. Together, these losses form a comprehensive strategy addressing various aspects of image enhancement.",
581
+ "bbox": [
582
+ 73,
583
+ 637,
584
+ 491,
585
+ 848
586
+ ],
587
+ "page_idx": 2
588
+ },
589
+ {
590
+ "type": "text",
591
+ "text": "III. RESULTS AND DISCUSSION",
592
+ "text_level": 1,
593
+ "bbox": [
594
+ 169,
595
+ 864,
596
+ 397,
597
+ 878
598
+ ],
599
+ "page_idx": 2
600
+ },
601
+ {
602
+ "type": "text",
603
+ "text": "Implementation details: The implementation of LyT-Net utilizes the TensorFlow framework. The ADAM Optimizer $(\\beta_{1} = 0.9$ and $\\beta_{2} = 0.999)$ is employed for training over 1000 epochs. The initial learning rate is set to $2\\times 10^{-4}$ and gradually",
604
+ "bbox": [
605
+ 73,
606
+ 883,
607
+ 491,
608
+ 946
609
+ ],
610
+ "page_idx": 2
611
+ },
612
+ {
613
+ "type": "text",
614
+ "text": "decays to $1 \\times 10^{-6}$ following a cosine annealing schedule, aiding in optimization convergence and avoiding local minima. The hyperparameters of the hybrid loss function are set as: $\\alpha_{1} = 0.06$ , $\\alpha_{2} = 0.05$ , $\\alpha_{3} = 0.0083$ , $\\alpha_{4} = 0.25$ , and $\\alpha_{5} = 0.5$ .",
615
+ "bbox": [
616
+ 501,
617
+ 292,
618
+ 919,
619
+ 353
620
+ ],
621
+ "page_idx": 2
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "LYT-Net is trained and evaluated on: LOL-v1, LOL-v2-real, and LOL-v2-synthetic. The corresponding training/testing splits are 485 : 15 for LOL-v1, 689 : 100 for LOL-v2-real, and 900 : 100 for LOL-v2-synthetic.",
626
+ "bbox": [
627
+ 503,
628
+ 353,
629
+ 919,
630
+ 412
631
+ ],
632
+ "page_idx": 2
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "During training, image pairs undergo random augmentations, including random cropping to $256 \\times 256$ and random flipping/rotation, to prevent overfitting. The training is conducted with a batch size of 1. Evaluation metrics include PSNR and SSIM for performance assessment.",
637
+ "bbox": [
638
+ 501,
639
+ 412,
640
+ 919,
641
+ 488
642
+ ],
643
+ "page_idx": 2
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Quantitative results: The proposed method is compared to SOTA LLIE techniques, as shown in Table I, focusing on several aspects: quantitative performance on the LOL (LOLV1, LOL-v2-real, LOL-v2-synthetic) and SDSD [40] datasets, and model complexity.",
648
+ "bbox": [
649
+ 501,
650
+ 492,
651
+ 919,
652
+ 568
653
+ ],
654
+ "page_idx": 2
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "As shown in Table I, LYT-Net consistently outperforms the current SOTA methods across all versions of the LOL dataset in terms of both PSNR and SSIM. Additionally, LYT-Net is highly efficient, requiring only 3.49G FLOPS and utilizing just 0.045M parameters, which gives it a significant advantage over other SOTA methods that are generally much more complex. The only exception is 3DLUT[34], which is comparable to our approach in terms of complexity. However, LYT-Net clearly surpasses the 3DLUT method in both PSNR and SSIM. This combination of strong performance and low complexity highlights the overall effectiveness of LYT-Net. On SDSD, where images are high resolution, our method shows limitations due to its significantly low parameter count. However, by utilizing a deeper variant of LYT-Net, we expect that performance increases accordingly.",
659
+ "bbox": [
660
+ 501,
661
+ 566,
662
+ 921,
663
+ 792
664
+ ],
665
+ "page_idx": 2
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "Qualitative Results: The qualitative evaluation of LYT-Net against SOTA LLIE techniques is shown in Fig. 3 on the LOL dataset and in Fig. 4 on LIME [41].",
670
+ "bbox": [
671
+ 503,
672
+ 794,
673
+ 921,
674
+ 839
675
+ ],
676
+ "page_idx": 2
677
+ },
678
+ {
679
+ "type": "text",
680
+ "text": "Previous methods, such as KiND[13] and Restormer[29], exhibit color distortion issues, as shown in Fig. 3. Additionally, several algorithms (e.g. MIRNet[16], and SNR-Net[18]) tend to produce over- or under-exposed areas, compromising image contrast while enhancing luminance. Similarly, Fig. 4 demonstrates that SRIE [42], DeHz [43], and NPE [44] result in a loss of contrast. In general, our LYT-Net is highly effective at",
681
+ "bbox": [
682
+ 501,
683
+ 839,
684
+ 921,
685
+ 945
686
+ ],
687
+ "page_idx": 2
688
+ },
689
+ {
690
+ "type": "header",
691
+ "text": "LYT-NET: LIGHTWEIGHT YUV TRANSFORMER-BASED NETWORK FOR LOW-LIGHT IMAGE ENHANCEMENT",
692
+ "bbox": [
693
+ 76,
694
+ 29,
695
+ 656,
696
+ 41
697
+ ],
698
+ "page_idx": 2
699
+ },
700
+ {
701
+ "type": "page_number",
702
+ "text": "3",
703
+ "bbox": [
704
+ 911,
705
+ 30,
706
+ 919,
707
+ 40
708
+ ],
709
+ "page_idx": 2
710
+ },
711
+ {
712
+ "type": "table",
713
+ "img_path": "images/2bd3f42ffc83ae28eb1d5d6ca3cc7ab9e1de2d398ce497b2f76d9067cb637994.jpg",
714
+ "table_caption": [],
715
+ "table_footnote": [],
716
+ "table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">Complexity</td><td colspan=\"2\">LOL-v1</td><td colspan=\"2\">LOL-v2-real</td><td colspan=\"2\">LOL-v2-syn</td><td colspan=\"2\">SDSD</td></tr><tr><td>FLOPS (G)</td><td>Params (M)</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td></tr><tr><td>3DLUT [34]</td><td>0.075</td><td>0.59</td><td>21.35</td><td>0.585</td><td>20.19</td><td>0.745</td><td>22.17</td><td>0.854</td><td>21.78</td><td>0.652</td></tr><tr><td>UFormer [27]</td><td>12.00</td><td>5.29</td><td>16.36</td><td>0.771</td><td>18.82</td><td>0.771</td><td>19.66</td><td>0.871</td><td>23.51</td><td>0.804</td></tr><tr><td>RetinexNet [10]</td><td>587.47</td><td>0.84</td><td>18.92</td><td>0.427</td><td>18.32</td><td>0.447</td><td>19.09</td><td>0.774</td><td>20.90</td><td>0.623</td></tr><tr><td>Sparse [35]</td><td>53.26</td><td>2.33</td><td>17.20</td><td>0.640</td><td>20.06</td><td>0.816</td><td>22.05</td><td>0.905</td><td>24.27</td><td>0.834</td></tr><tr><td>EnGAN [21]</td><td>61.01</td><td>114.35</td><td>20.00</td><td>0.691</td><td>18.23</td><td>0.617</td><td>16.57</td><td>0.734</td><td>20.06</td><td>0.610</td></tr><tr><td>FIDE [36]</td><td>28.51</td><td>8.62</td><td>18.27</td><td>0.665</td><td>16.85</td><td>0.678</td><td>15.20</td><td>0.612</td><td>22.31</td><td>0.644</td></tr><tr><td>LYT-Net (Ours)</td><td>3.49</td><td>0.045</td><td>22.38</td><td>0.826</td><td>21.83</td><td>0.849</td><td>23.78</td><td>0.921</td><td>28.42</td><td>0.877</td></tr><tr><td>KinD [13]</td><td>34.99</td><td>8.02</td><td>20.86</td><td>0.790</td><td>14.74</td><td>0.641</td><td>13.29</td><td>0.578</td><td>21.96</td><td>0.663</td></tr><tr><td>Restormer [29]</td><td>144.25</td><td>26.13</td><td>26.68</td><td>0.853</td><td>26.12</td><td>0.853</td><td>25.43</td><td>0.859</td><td>25.23</td><td>0.815</td></tr><tr><td>DepthLux [37]</td><td>-</td><td>9.75</td><td>26.06</td><td>0.793</td><td>26.16</td><td>0.794</td><td>28.69</td><td>0.920</td><td>-</td><td>-</td></tr><tr><td>ExpoMamba [38]</td><td>-</td><td>41</td><td>25.77</td><td>0.860</td><td>28.04</td><td>0.885</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>MIRNet [16]</td><td>785</td><td>31.76</td><td>26.52</td><td>0.856</td><td>27.17</td><td>0.865</td><td>25.96</td><td>0.898</td><td>25.76</td><td>0.851</td></tr><tr><td>SNR-Net [18]</td><td>26.35</td><td>4.01</td><td>26.72</td><td>0.851</td><td>27.21</td><td>0.871</td><td>27.79</td><td>0.941</td><td>29.05</td><td>0.880</td></tr><tr><td>KAN-T [39]</td><td>-</td><td>2.80</td><td>26.66</td><td>0.854</td><td>28.45</td><td>0.884</td><td>28.77</td><td>0.939</td><td>-</td><td>-</td></tr><tr><td>Retinexformer [28]</td><td>15.57</td><td>1.61</td><td>27.14</td><td>0.850</td><td>27.69</td><td>0.856</td><td>28.99</td><td>0.939</td><td>29.81</td><td>0.887</td></tr><tr><td>LYT-Net (Ours)</td><td>3.49</td><td>0.045</td><td>27.23</td><td>0.853</td><td>27.80</td><td>0.873</td><td>29.38</td><td>0.940</td><td>28.42</td><td>0.877</td></tr></table>",
717
+ "bbox": [
718
+ 84,
719
+ 64,
720
+ 908,
721
+ 361
722
+ ],
723
+ "page_idx": 3
724
+ },
725
+ {
726
+ "type": "image",
727
+ "img_path": "images/efb72d4d8faf722700ec550e4f2763feb61f2244c2c02bd741b30186a86c887f.jpg",
728
+ "image_caption": [],
729
+ "image_footnote": [],
730
+ "bbox": [
731
+ 81,
732
+ 406,
733
+ 197,
734
+ 473
735
+ ],
736
+ "page_idx": 3
737
+ },
738
+ {
739
+ "type": "image",
740
+ "img_path": "images/00d59ecd94bbd40b48e3e47ee0275e137ed020f3ac2cefba5ed6d7f28fb858e2.jpg",
741
+ "image_caption": [
742
+ "Input",
743
+ "Fig. 4. Qualitative comparison with SOTA LLIE methods on LIME dataset. Zoom-in regions are used to illustrate differences."
744
+ ],
745
+ "image_footnote": [],
746
+ "bbox": [
747
+ 81,
748
+ 473,
749
+ 197,
750
+ 564
751
+ ],
752
+ "page_idx": 3
753
+ },
754
+ {
755
+ "type": "image",
756
+ "img_path": "images/04705a1adb6201a5185b189bb6b765399d42c5b5040b308df7e9a0cd6a4aa58a.jpg",
757
+ "image_caption": [],
758
+ "image_footnote": [],
759
+ "bbox": [
760
+ 200,
761
+ 406,
762
+ 316,
763
+ 473
764
+ ],
765
+ "page_idx": 3
766
+ },
767
+ {
768
+ "type": "image",
769
+ "img_path": "images/a34a771d6671269be31f58c8166e396b0d0d4ff36e9d61ae75c9e12a71f3a7ae.jpg",
770
+ "image_caption": [
771
+ "SRIE"
772
+ ],
773
+ "image_footnote": [],
774
+ "bbox": [
775
+ 200,
776
+ 473,
777
+ 316,
778
+ 563
779
+ ],
780
+ "page_idx": 3
781
+ },
782
+ {
783
+ "type": "image",
784
+ "img_path": "images/04d13375f834b622fad418556d82e13e3acc66ea45aeb7caff674b5f854ea71d.jpg",
785
+ "image_caption": [],
786
+ "image_footnote": [],
787
+ "bbox": [
788
+ 318,
789
+ 406,
790
+ 434,
791
+ 473
792
+ ],
793
+ "page_idx": 3
794
+ },
795
+ {
796
+ "type": "image",
797
+ "img_path": "images/9a312a2f025414769329e5bacad9aa0071771c7416fcada741081981dc37b6c5.jpg",
798
+ "image_caption": [
799
+ "NPE"
800
+ ],
801
+ "image_footnote": [],
802
+ "bbox": [
803
+ 318,
804
+ 473,
805
+ 434,
806
+ 563
807
+ ],
808
+ "page_idx": 3
809
+ },
810
+ {
811
+ "type": "image",
812
+ "img_path": "images/fbb4dd36a9214bc290f3dd331a1531f8da8d9f48e28568eb9b9d70c52fdbf25a.jpg",
813
+ "image_caption": [],
814
+ "image_footnote": [],
815
+ "bbox": [
816
+ 437,
817
+ 406,
818
+ 553,
819
+ 473
820
+ ],
821
+ "page_idx": 3
822
+ },
823
+ {
824
+ "type": "image",
825
+ "img_path": "images/0786225b643666847c0dbf9cdd65539d201af943a313ffe6a533068023fa8642.jpg",
826
+ "image_caption": [
827
+ "LIME"
828
+ ],
829
+ "image_footnote": [],
830
+ "bbox": [
831
+ 437,
832
+ 473,
833
+ 553,
834
+ 563
835
+ ],
836
+ "page_idx": 3
837
+ },
838
+ {
839
+ "type": "image",
840
+ "img_path": "images/5109b0206da52d0ef58e43fe3a1118f19e2b9005b30285855bf79a6bd97b7025.jpg",
841
+ "image_caption": [],
842
+ "image_footnote": [],
843
+ "bbox": [
844
+ 555,
845
+ 406,
846
+ 671,
847
+ 473
848
+ ],
849
+ "page_idx": 3
850
+ },
851
+ {
852
+ "type": "image",
853
+ "img_path": "images/b2a317b1ab3e69e6c01e94ee832ff690d6b9fb2ee17669b925e748f72ef7dd0b.jpg",
854
+ "image_caption": [
855
+ "DeHz"
856
+ ],
857
+ "image_footnote": [],
858
+ "bbox": [
859
+ 555,
860
+ 473,
861
+ 671,
862
+ 563
863
+ ],
864
+ "page_idx": 3
865
+ },
866
+ {
867
+ "type": "image",
868
+ "img_path": "images/8e3382f4c31a1fa1650fc52cf3f49522fa38289df3aa1cf9105b96862f0a940b.jpg",
869
+ "image_caption": [],
870
+ "image_footnote": [],
871
+ "bbox": [
872
+ 671,
873
+ 406,
874
+ 789,
875
+ 473
876
+ ],
877
+ "page_idx": 3
878
+ },
879
+ {
880
+ "type": "image",
881
+ "img_path": "images/6ab20bdafe3dcfbf8ec8a6d68d2eaf2c986b222c59d54474c6af08eb9884f8f7.jpg",
882
+ "image_caption": [
883
+ "RetinexNet"
884
+ ],
885
+ "image_footnote": [],
886
+ "bbox": [
887
+ 671,
888
+ 473,
889
+ 789,
890
+ 563
891
+ ],
892
+ "page_idx": 3
893
+ },
894
+ {
895
+ "type": "image",
896
+ "img_path": "images/5df1d9db55b84461d7dcc8c27c25d88674d4e6ad067de073e5dbf91404271e5f.jpg",
897
+ "image_caption": [],
898
+ "image_footnote": [],
899
+ "bbox": [
900
+ 790,
901
+ 406,
902
+ 906,
903
+ 473
904
+ ],
905
+ "page_idx": 3
906
+ },
907
+ {
908
+ "type": "image",
909
+ "img_path": "images/faf23df759020e6fc3d2d16354cb9395e7608e3340806a493b2008c8a659604b.jpg",
910
+ "image_caption": [
911
+ "Ours"
912
+ ],
913
+ "image_footnote": [],
914
+ "bbox": [
915
+ 790,
916
+ 473,
917
+ 906,
918
+ 563
919
+ ],
920
+ "page_idx": 3
921
+ },
922
+ {
923
+ "type": "text",
924
+ "text": "enhancing visibility and low-contrast or poorly lit areas, while efficiently eliminating noise without introducing artifacts.",
925
+ "bbox": [
926
+ 73,
927
+ 597,
928
+ 491,
929
+ 628
930
+ ],
931
+ "page_idx": 3
932
+ },
933
+ {
934
+ "type": "text",
935
+ "text": "IV. ABLATION STUDY",
936
+ "text_level": 1,
937
+ "bbox": [
938
+ 200,
939
+ 640,
940
+ 364,
941
+ 652
942
+ ],
943
+ "page_idx": 3
944
+ },
945
+ {
946
+ "type": "text",
947
+ "text": "The ablation study is conducted on the LOLv1 dataset, using PSNR and CIEDE2000 [45] as quantitative metrics, and evaluates the impact of the CWD and MSEF blocks. In the YUV decomposition, applying CWD to the $Y$ -channel (used as the illumination map) results in the retention of lighting artifacts, leading to performance degradation compared to pooling operations and interpolation-based upsampling, which smoothen the illumination for better and more uniform lighting. However, CWD enhances the chrominance channels ( $U$ and $V$ ), preserving detail without introducing noise. Moreover, the MSEF block consistently boosts performance across all CWD combinations, improving PSNR by 0.16, 0.24, and 0.26 dB, respectively, only increasing the parameter count by 546.",
948
+ "bbox": [
949
+ 73,
950
+ 657,
951
+ 490,
952
+ 853
953
+ ],
954
+ "page_idx": 3
955
+ },
956
+ {
957
+ "type": "text",
958
+ "text": "V. CONCLUSIONS",
959
+ "text_level": 1,
960
+ "bbox": [
961
+ 217,
962
+ 864,
963
+ 349,
964
+ 878
965
+ ],
966
+ "page_idx": 3
967
+ },
968
+ {
969
+ "type": "text",
970
+ "text": "We introduce LyT-Net, an innovative lightweight transformer-based model for enhancing low-light images. Our approach utilizes a dual-path framework, processing chrominance and luminance separately to improve the",
971
+ "bbox": [
972
+ 73,
973
+ 883,
974
+ 491,
975
+ 946
976
+ ],
977
+ "page_idx": 3
978
+ },
979
+ {
980
+ "type": "table",
981
+ "img_path": "images/4006aaaf13ce4352556fd0a5673d3f165fc8b45480511444f0440fa1f52d7478.jpg",
982
+ "table_caption": [
983
+ "TABLEI QUANTITATIVE RESULTS ON LOL DATASETS. BEST RESULTS ARE IN RED, SECOND BEST ARE IN BLUE. HIGHLIGHTED CELLS SHOW RESULTS WITH GT-MEAN GAMMA CORRECTION [13], WHICH IS WIDELY USED ON THE LOL DATASETS.."
984
+ ],
985
+ "table_footnote": [],
986
+ "table_body": "<table><tr><td>Y-CWD</td><td>UV-CWD</td><td>MSEF</td><td>Params</td><td>PSNR↑</td><td>CIEDE2000↓</td></tr><tr><td>✓</td><td></td><td></td><td>40238</td><td>26.62</td><td>6.3087</td></tr><tr><td></td><td>✓</td><td></td><td>44377</td><td>26.99</td><td>6.0148</td></tr><tr><td>✓</td><td>✓</td><td></td><td>48516</td><td>26.76</td><td>6.1975</td></tr><tr><td>✓</td><td></td><td>✓</td><td>40784</td><td>26.78</td><td>6.1816</td></tr><tr><td></td><td>✓</td><td>✓</td><td>44923</td><td>27.23</td><td>5.8242</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>49062</td><td>27.02</td><td>5.9910</td></tr></table>",
987
+ "bbox": [
988
+ 524,
989
+ 594,
990
+ 903,
991
+ 698
992
+ ],
993
+ "page_idx": 3
994
+ },
995
+ {
996
+ "type": "text",
997
+ "text": "TABLE II ABLATION STUDY: PERFORMANCE AND PARAMETER IMPACT OF CWD AND MSEF BLOCKS.",
998
+ "bbox": [
999
+ 514,
1000
+ 702,
1001
+ 908,
1002
+ 737
1003
+ ],
1004
+ "page_idx": 3
1005
+ },
1006
+ {
1007
+ "type": "text",
1008
+ "text": "model's ability to manage illumination adjustments and restore corrupted regions. LYT-Net integrates multiple layers and modular blocks, including two unique CWD and MSEF — as well as the traditional ViT block with MHSA. A comprehensive qualitative and quantitative analysis demonstrates that LYT-Net consistently outperforms SOTA methods on all versions of the LOL dataset in terms of PSNR and SSIM, while maintaining high computational efficiency.",
1009
+ "bbox": [
1010
+ 503,
1011
+ 753,
1012
+ 919,
1013
+ 875
1014
+ ],
1015
+ "page_idx": 3
1016
+ },
1017
+ {
1018
+ "type": "text",
1019
+ "text": "Acknowledgement: Part of this research is supported by the \"Romanian Hub for Artificial Intelligence - HRIA\", Smart Growth, Digitization and Financial Instruments Program, 2021-2027, MySMIS no. 334906.",
1020
+ "bbox": [
1021
+ 503,
1022
+ 883,
1023
+ 921,
1024
+ 946
1025
+ ],
1026
+ "page_idx": 3
1027
+ },
1028
+ {
1029
+ "type": "header",
1030
+ "text": "LYT-NET: LIGHTWEIGHT YUV TRANSFORMER-BASED NETWORK FOR LOW-LIGHT IMAGE ENHANCEMENT",
1031
+ "bbox": [
1032
+ 76,
1033
+ 29,
1034
+ 656,
1035
+ 40
1036
+ ],
1037
+ "page_idx": 3
1038
+ },
1039
+ {
1040
+ "type": "page_number",
1041
+ "text": "4",
1042
+ "bbox": [
1043
+ 911,
1044
+ 31,
1045
+ 919,
1046
+ 40
1047
+ ],
1048
+ "page_idx": 3
1049
+ },
1050
+ {
1051
+ "type": "text",
1052
+ "text": "REFERENCES",
1053
+ "text_level": 1,
1054
+ "bbox": [
1055
+ 235,
1056
+ 69,
1057
+ 333,
1058
+ 83
1059
+ ],
1060
+ "page_idx": 4
1061
+ },
1062
+ {
1063
+ "type": "list",
1064
+ "sub_type": "ref_text",
1065
+ "list_items": [
1066
+ "[1] W. Wang, X. Wu, X. Yuan, and Z. Gao, \"An experiment-based review of low-light image enhancement methods,\" IEEE Access, vol. 8, pp. 87-884-87-917, 2020.",
1067
+ "[2] L. Xiao, C. Li, Z. Wu, and T. Wang, \"An enhancement method for x-ray image via fuzzy noise removal and homomorphic filtering,\" Neurocomputing, vol. 195, 2016.",
1068
+ "[3] S. E. Kim, J. J. Jeon, and I. K. Eom, \"Image contrast enhancement using entropy scaling in wavelet domain,\" Signal Processing, vol. 127(1), 2016.",
1069
+ "[4] S.-D. Chen and A. R. Ramli, \"Contrast enhancement using recursive mean-separate histogram equalization for scalable brightness preservation,\" IEEE Transactions on Consumer Electronics, vol. 49(4), 2003.",
1070
+ "[5] S. Kansal, S. Purwar, and R. K. Tripathi, \"Image contrast enhancement using unsharp masking and histogram equalization,\" Multimedia Tools Applications, vol. 77(20), 2018.",
1071
+ "[6] E. H. Land, “The retina theory of color vision,” Scientific american, vol. 237, no. 6, pp. 108–129, 1977.",
1072
+ "[7] S. Park, S.Yu, B.Moon, S.Ko, and J. Paik, \"Low-light image enhancement using variational optimization-based retina model,\" IEEE Transactions on Consumer Electronics, vol. 63(2), 2017.",
1073
+ "[8] Z. Gu, F. Li, F. Fang, and G. Zhang, “A novel retinax-based fractional order variational model for images with severely low light,” IEEE Transactions on Image Processing, vol. 29, 2019.",
1074
+ "[9] J. H. Jang, Y. Bae, and J. B. Ra, \"Contrast-enhanced fusion of multisensory images using subband-decomposed multiscale retinax,\" IEEE Transactions on Image Processing, vol. 21(8), 2012.",
1075
+ "[10] C. Wei, W. Wang, W. Yang, and J. Liu, “Deep retina decomposition for low-light enhancement,” in Proceedings of the British Machine Vision Conference (BMVC), 2018.",
1076
+ "[11] R. Wang, Q. Zhang, C.-W. Fu, X. Shen, W.-S. Zheng, and J. Jia, \"Underexposed photo enhancement using deep illumination estimation,\" in CVPR, 2019.",
1077
+ "[12] X. Yi, H. Xu, H. Zhang, L. Tang, and J. Ma, \"Diff-retinex: Rethinking low-light image enhancement with a generative diffusion model,\" in CVPR, 2023.",
1078
+ "[13] Y. Zhang, J. Zhang, and X. Guo, \"Kindling the darkness: A practical low-light image enhancer,\" in Proceedings of ACM international conference on multimedia, 2019.",
1079
+ "[14] Y. Zhang, Y. Tian, Y. Kong, B. Zhong, and Y. Fu, “Residual dense network for image restoration,” In IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.",
1080
+ "[15] A. Dudhane, S. Zamir, S. Khan, F. Khan, and M.-H. Yang, “Burst image restoration and enhancement,” CVPR, 2022.",
1081
+ "[16] S. W. Zamir, A. Arora, S. Khan, M. Hayat, F. S. Khan, M.-H. Yang, and L. Shao, “Learning enriched features for real image restoration and enhancement,” in European Conference on Computer Vision, 2020.",
1082
+ "[17] R. Liu, L. Ma, J. Zhang, X. Fan, and Z. Luo, \"Retinex-inspired unrolling with cooperative prior architecture search for low-light image enhancement,\" in CVPR, 2021.",
1083
+ "[18] X. Xu, R. Wang, C.-W. Fu, and J. Jia, “SNR-aware low-light image enhancement,” in CVPR, 2022.",
1084
+ "[19] Y. Shi, D. Liu, L. Zhang, Y. Tian, X. Xia, and X. Fu, “Zero-ig: Zero-shot illumination-guided joint denoising and adaptive enhancement for low-light images,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 3015–3024.",
1085
+ "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, \"Generative adversarial nets,\" In Advances in neural information processing systems, 2014.",
1086
+ "[21] Y. Jiang, X. Gong, D. Liu, Y. Cheng, C. Fang, X. Shen, J. Yang, P. Zhou, and Z. Wang, \"Enlightengan: Deep light enhancement without paired supervision,\" IEEE Transactions on Image Processing, vol. 30, pp. 2340-2349, 2021.",
1087
+ "[22] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, and S. Gelly, \"An image is worth 16x16 words: Transformers for image recognition at scale,\" International Conference on Learning Representations (ICLR), 2021.",
1088
+ "[23] T. Wang, K. Zhang, T. Shen, W. Luo, B. Stenger, and T. Lu, “Ultra-high-definition low-light image enhancement: A benchmark and transformer-based method,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 3, 2023, pp. 2654–2662.",
1089
+ "[24] W. Wang, E. Xie, X. Li, D.-P. Fan, K. Song, D. Liang, T. Lu, P. Luo, and L. Shao, “Pyramid vision transformer: A versatile backbone for dense prediction without convolutions,” ICCV, 2021."
1090
+ ],
1091
+ "bbox": [
1092
+ 76,
1093
+ 95,
1094
+ 491,
1095
+ 943
1096
+ ],
1097
+ "page_idx": 4
1098
+ },
1099
+ {
1100
+ "type": "list",
1101
+ "sub_type": "ref_text",
1102
+ "list_items": [
1103
+ "[25] S. Zheng, J. Lu, H. Zhao, X. Zhu, Z. Luo, Y. Wang, Y. Fu, J. Feng, T. Xiang, and P. H. Torr, \"Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers,\" CVPR, 2021.",
1104
+ "[26] Z. Liu, Y. L. Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, and B. Guo, \"Swin transformer: Hierarchical vision transformer using shifted windows,\" ICCV, 2021.",
1105
+ "[27] Z. Wang, X. Cun, J. Bao, W. Zhou, J. Liu, and H. Li, \"Uformer: A general u-shaped transformer for image restoration,\" in CVPR, 2022.",
1106
+ "[28] Y. Cai, H. Bian, J. Lin, H. Wang, R. Timofte, and Y. Zhang, \"Retinexformer: One-stage retinax-based transformer for low-light image enhancement,\" in IEEE/CVF International Conference on Computer Vision (ICCV), 2023.",
1107
+ "[29] S. W. Zamir, A. Arora, S. Khan, M. Hayat, F. S. Khan, and M.-H. Yang, \"Restormer: Efficient transformer for high-resolution image restoration,\" in CVPR, 2022.",
1108
+ "[30] C. Hu, Y. Hu, L. Xu, Y. Guo, Z. Cai, X. Jing, and P. Liu, \"Jte-cflow for low-light enhancement and zero-element pixels restoration with application to night traffic monitoring images,\" IEEE Transactions on Intelligent Transportation Systems, vol. 26, no. 3, pp. 3755-3770, 2025.",
1109
+ "[31] H. Jiang, A. Luo, H. Fan, S. Han, and S. Liu, \"Low-light image enhancement with wavelet-based diffusion models,\" ACM Transactions on Graphics (TOG), vol. 42, no. 6, pp. 1-14, 2023.",
1110
+ "[32] H. Jiang, A. Luo, X. Liu, S. Han, and S. Liu, \"Lightendiffusion: Unsupervised low-light image enhancement with latent-retinex diffusion models,\" in European Conference on Computer Vision. Springer, 2025, pp. 161-179.",
1111
+ "[33] J. Hou, Z. Zhu, J. Hou, H. Liu, H. Zeng, and H. Yuan, \"Global structure-aware diffusion process for low-light image enhancement,\" Advances in Neural Information Processing Systems, vol. 36, 2024.",
1112
+ "[34] H. Zeng, J. Cai, L. Li, Z. Cao, and L. Zhang, \"Learning image-adaptive 3d lookup tables for high performance photo enhancement in real-time,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 44, no. 4, pp. 2058-2073, 2020.",
1113
+ "[35] W. Yang, W. Wang, H. Huang, S. Wang, and J. Liu, \"Sparse gradient regularized deep retina network for robust low-light image enhancement,\" IEEE Transactions on Image Processing, vol. 30, pp. 2072-2086, 2021.",
1114
+ "[36] K. Xu, X. Yang, B. Yin, and R. W. Lau, \"Learning to restore low-light images via decomposition-and-enhancement,\" in CVPR, 2020.",
1115
+ "[37] R. Balmez, A. Brateanu, C. Orhei, C. O. Ancuti, and C. Ancuti, \"Depthlux: Employing depthwise separable convolutions for low-light image enhancement,\" Sensors, vol. 25, no. 5, 2025. [Online]. Available: https://www.mdpi.com/1424-8220/25/5/1530",
1116
+ "[38] E. Adhikarla, K. Zhang, J. Nicholson, and B. D. Davison, \"Expomamba: Exploiting frequency SSM blocks for efficient and effective image enhancement,\" in Workshop on Efficient Systems for Foundation Models II @ ICML2024, 2024. [Online]. Available: https://openreview.net/forum?id=X9L6PatYhH",
1117
+ "[39] A. Brateanu, R. Balmez, C. Orhei, C. Ancuti, and C. Ancuti, \"Enhancing low-light images with kolmogorov-arnold networks in transformer attention,\" Sensors, vol. 25, no. 2, 2025. [Online]. Available: https://www.mdpi.com/1424-8220/25/2/327",
1118
+ "[40] R. Wang, X. Xu, C.-W. Fu, J. Lu, B. Yu, and J. Jia, \"Seeing dynamic scene in the dark: A high-quality video dataset with mechatronic alignment,\" in Proceedings of the IEEE/CVF international conference on computer vision, 2021, pp. 9700-9709.",
1119
+ "[41] X. Guo, Y. Li, and H. Ling, \"Lime: Low-light image enhancement via illumination map estimation,\" IEEE Transactions on image processing, vol. 26, no. 2, pp. 982-993, 2016.",
1120
+ "[42] X. Fu, D. Zeng, Y. Huang, Y. Liao, X. Ding, and J. Paisley, “A fusion-based enhancing method for weakly illuminated images,” Signal Processing, vol. 129, pp. 82–96, 2016.",
1121
+ "[43] X. Dong, Y. Pang, and J. Wen, \"Fast efficient algorithm for enhancement of low lighting video,\" in ACM SIGGRAPH 2010 Posters, 2010, pp. 1-1.",
1122
+ "[44] S. Wang, J. Zheng, H.-M. Hu, and B. Li, \"Naturalness preserved enhancement algorithm for non-uniform illumination images,\" IEEE Transactions on Image Processing, vol. 22, no. 9, pp. 3538-3548, 2013.",
1123
+ "[45] M. R. Luo, G. Cui, and B. Rigg, “The development of the cie 2000 colour-difference formula: Ciede2000,” Color Research & Application, vol. 26, no. 5, pp. 340–350, 2001. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/col.1049"
1124
+ ],
1125
+ "bbox": [
1126
+ 506,
1127
+ 71,
1128
+ 919,
1129
+ 875
1130
+ ],
1131
+ "page_idx": 4
1132
+ },
1133
+ {
1134
+ "type": "header",
1135
+ "text": "LYT-NET: LIGHTWEIGHT YUV TRANSFORMER-BASED NETWORK FOR LOW-LIGHT IMAGE ENHANCEMENT",
1136
+ "bbox": [
1137
+ 76,
1138
+ 29,
1139
+ 656,
1140
+ 41
1141
+ ],
1142
+ "page_idx": 4
1143
+ },
1144
+ {
1145
+ "type": "page_number",
1146
+ "text": "5",
1147
+ "bbox": [
1148
+ 911,
1149
+ 30,
1150
+ 919,
1151
+ 40
1152
+ ],
1153
+ "page_idx": 4
1154
+ }
1155
+ ]