SlowGuess commited on
Commit
0b4344c
·
verified ·
1 Parent(s): 843b1d0

Add Batch 254ccce2-41c1-4700-866a-e92ef8ca9490

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_content_list.json +865 -0
  3. 2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_model.json +1305 -0
  4. 2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_origin.pdf +3 -0
  5. 2401.17xxx/2401.17234/full.md +171 -0
  6. 2401.17xxx/2401.17234/images.zip +3 -0
  7. 2401.17xxx/2401.17234/layout.json +0 -0
  8. 2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_content_list.json +0 -0
  9. 2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_model.json +0 -0
  10. 2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_origin.pdf +3 -0
  11. 2401.17xxx/2401.17256/full.md +0 -0
  12. 2401.17xxx/2401.17256/images.zip +3 -0
  13. 2401.17xxx/2401.17256/layout.json +0 -0
  14. 2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_content_list.json +0 -0
  15. 2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_model.json +0 -0
  16. 2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_origin.pdf +3 -0
  17. 2401.17xxx/2401.17263/full.md +0 -0
  18. 2401.17xxx/2401.17263/images.zip +3 -0
  19. 2401.17xxx/2401.17263/layout.json +0 -0
  20. 2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_content_list.json +0 -0
  21. 2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_model.json +0 -0
  22. 2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_origin.pdf +3 -0
  23. 2401.17xxx/2401.17264/full.md +471 -0
  24. 2401.17xxx/2401.17264/images.zip +3 -0
  25. 2401.17xxx/2401.17264/layout.json +0 -0
  26. 2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_content_list.json +0 -0
  27. 2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_model.json +0 -0
  28. 2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_origin.pdf +3 -0
  29. 2401.17xxx/2401.17270/full.md +421 -0
  30. 2401.17xxx/2401.17270/images.zip +3 -0
  31. 2401.17xxx/2401.17270/layout.json +0 -0
  32. 2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_content_list.json +0 -0
  33. 2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_model.json +0 -0
  34. 2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_origin.pdf +3 -0
  35. 2401.17xxx/2401.17377/full.md +527 -0
  36. 2401.17xxx/2401.17377/images.zip +3 -0
  37. 2401.17xxx/2401.17377/layout.json +0 -0
  38. 2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_content_list.json +0 -0
  39. 2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_model.json +0 -0
  40. 2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_origin.pdf +3 -0
  41. 2401.17xxx/2401.17409/full.md +0 -0
  42. 2401.17xxx/2401.17409/images.zip +3 -0
  43. 2401.17xxx/2401.17409/layout.json +0 -0
  44. 2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_content_list.json +2205 -0
  45. 2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_model.json +0 -0
  46. 2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_origin.pdf +3 -0
  47. 2401.17xxx/2401.17461/full.md +431 -0
  48. 2401.17xxx/2401.17461/images.zip +3 -0
  49. 2401.17xxx/2401.17461/layout.json +0 -0
  50. 2401.17xxx/2401.17464/61117e09-cb04-4043-868a-02a2e7fb24fe_content_list.json +0 -0
.gitattributes CHANGED
@@ -10006,3 +10006,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10006
  2402.06xxx/2402.06646/1e39a434-08bf-4a11-954e-d2f5bfb231c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10007
  2402.18xxx/2402.18587/f20176a3-5723-423f-aa58-05ee1baee788_origin.pdf filter=lfs diff=lfs merge=lfs -text
10008
  2405.00xxx/2405.00010/9922d98b-54ac-40ee-bc7f-11b45ecea51b_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10006
  2402.06xxx/2402.06646/1e39a434-08bf-4a11-954e-d2f5bfb231c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10007
  2402.18xxx/2402.18587/f20176a3-5723-423f-aa58-05ee1baee788_origin.pdf filter=lfs diff=lfs merge=lfs -text
10008
  2405.00xxx/2405.00010/9922d98b-54ac-40ee-bc7f-11b45ecea51b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10009
+ 2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10010
+ 2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10011
+ 2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10012
+ 2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10013
+ 2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10014
+ 2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_origin.pdf filter=lfs diff=lfs merge=lfs -text
10015
+ 2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10016
+ 2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_origin.pdf filter=lfs diff=lfs merge=lfs -text
10017
+ 2401.17xxx/2401.17464/61117e09-cb04-4043-868a-02a2e7fb24fe_origin.pdf filter=lfs diff=lfs merge=lfs -text
10018
+ 2401.17xxx/2401.17486/b3ea26da-b4af-4dc0-a4ac-2ca85b3ff4a0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10019
+ 2401.17xxx/2401.17538/fa1af4b5-3ff0-4589-828c-4b068f004e1a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10020
+ 2401.17xxx/2401.17548/c24d26c3-9653-4800-a677-6d455d9fc0db_origin.pdf filter=lfs diff=lfs merge=lfs -text
10021
+ 2401.17xxx/2401.17583/434a9977-ca64-4f86-9774-c3565cc6a22f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10022
+ 2401.17xxx/2401.17592/f4d06ad4-94f7-4089-bef1-6e16d764b88c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10023
+ 2401.17xxx/2401.17600/89b69201-08fc-469c-8699-2fe4f75cc01f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10024
+ 2401.17xxx/2401.17630/e56f4cf9-d7fe-40df-a660-62d2c156f97f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10025
+ 2401.17xxx/2401.17633/10a9ee91-6d2e-42df-a4c4-e849cf4f262c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10026
+ 2401.17xxx/2401.17644/2a6dcf09-2d41-4509-b5b9-9a451f5252bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10027
+ 2401.17xxx/2401.17661/2b7bd083-3c3a-4164-a2d0-a1897a4e3ea0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10028
+ 2401.17xxx/2401.17671/d81eab9c-db5c-4a88-8a94-d3cb06bb5c04_origin.pdf filter=lfs diff=lfs merge=lfs -text
10029
+ 2401.17xxx/2401.17690/6b89228f-0108-416d-a83a-5dbf1806305b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10030
+ 2401.17xxx/2401.17698/591ed71f-84b7-4330-8913-8132a91668fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10031
+ 2401.17xxx/2401.17759/ebbaabc5-2afa-4cb4-8ac7-41825dc490a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10032
+ 2401.17xxx/2401.17807/9eb48bdc-e0d9-4f2b-90f5-5abffe8742e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10033
+ 2401.17xxx/2401.17868/2919a191-ad58-40c0-ba86-84ec06c2aafb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10034
+ 2401.17xxx/2401.17879/68fb8c99-6e52-4c86-8463-446f4b8ccc6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10035
+ 2401.17xxx/2401.17880/863fcd27-5d38-43ad-84e5-635a7510d3e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10036
+ 2401.17xxx/2401.17904/3e296326-ee99-4c1e-af03-ca37dbca74da_origin.pdf filter=lfs diff=lfs merge=lfs -text
10037
+ 2401.17xxx/2401.17958/3a158b19-940b-44e7-bc39-de941b4d8413_origin.pdf filter=lfs diff=lfs merge=lfs -text
10038
+ 2401.18xxx/2401.18006/fa2ea757-2338-46b4-9a07-d2f8e882292b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10039
+ 2401.18xxx/2401.18018/2e8ea970-0587-4717-8bc9-3c4067a363d7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10040
+ 2401.18xxx/2401.18039/1a17231b-4b3b-4791-85d2-136a648b84d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10041
+ 2401.18xxx/2401.18058/d6692f7f-807a-47e2-a0b3-a99a3466e95d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10042
+ 2401.18xxx/2401.18059/18f3980e-b696-436c-b54d-403d0643e976_origin.pdf filter=lfs diff=lfs merge=lfs -text
10043
+ 2401.18xxx/2401.18079/3c9f7f71-ea1f-46f0-8574-2d2bd9923cba_origin.pdf filter=lfs diff=lfs merge=lfs -text
10044
+ 2401.18xxx/2401.18084/e27c0733-2f01-420b-8a13-59c914325be2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10045
+ 2401.18xxx/2401.18085/d1801a75-1c6f-4aae-8008-9e828e89ec00_origin.pdf filter=lfs diff=lfs merge=lfs -text
10046
+ 2402.00xxx/2402.00093/8750a11e-e032-4cba-9ee4-7b95d58212bc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10047
+ 2402.00xxx/2402.00097/e1cdfc1c-16ba-48f1-aacd-3c392bb9642a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10048
+ 2402.00xxx/2402.00126/ba67bc7d-4cdd-412e-b097-52bad3542a94_origin.pdf filter=lfs diff=lfs merge=lfs -text
10049
+ 2402.00xxx/2402.00157/1ca8f510-a136-4680-ae34-c34db006a313_origin.pdf filter=lfs diff=lfs merge=lfs -text
10050
+ 2402.00xxx/2402.00159/4020421e-8a39-4737-bdd0-dd533343d3bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10051
+ 2402.00xxx/2402.00247/7969272d-461c-4976-b0bb-79a259379f36_origin.pdf filter=lfs diff=lfs merge=lfs -text
10052
+ 2402.00xxx/2402.00253/488fcb1a-787c-4b82-81e9-9506a591455b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10053
+ 2402.00xxx/2402.00282/b9183c80-e472-499f-b091-59d5703f4b92_origin.pdf filter=lfs diff=lfs merge=lfs -text
10054
+ 2402.00xxx/2402.00326/8d468b2d-f6a2-4dd2-b0c1-44fd36967e62_origin.pdf filter=lfs diff=lfs merge=lfs -text
10055
+ 2402.00xxx/2402.00341/b3e15312-9dff-46ef-95f3-ada2377073f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10056
+ 2402.00xxx/2402.00351/d1ecda9a-8a64-4a57-b782-c77b4d16e87c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10057
+ 2402.00xxx/2402.00356/2c2220cf-36b5-4d6e-b98f-4056b766ab7d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10058
+ 2402.00xxx/2402.00357/e979508a-82b8-49ea-9a91-4ae2189ecb20_origin.pdf filter=lfs diff=lfs merge=lfs -text
10059
+ 2402.00xxx/2402.00367/d61f87ef-6b94-4c72-b924-89d9d8cadd8d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10060
+ 2402.00xxx/2402.00371/c05e38c5-96e9-440f-88aa-fcac22723ab0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10061
+ 2402.00xxx/2402.00386/19fc67ab-5817-4af6-adf4-2877750aa249_origin.pdf filter=lfs diff=lfs merge=lfs -text
10062
+ 2402.00xxx/2402.00396/124c7827-de6f-4c3a-9977-93cdc0435a8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10063
+ 2402.00xxx/2402.00422/e73db499-9133-4d42-a188-1494fd0e8c03_origin.pdf filter=lfs diff=lfs merge=lfs -text
10064
+ 2402.00xxx/2402.00433/69114aa2-21dd-4e1f-b82e-3bf81ba9ba20_origin.pdf filter=lfs diff=lfs merge=lfs -text
10065
+ 2402.00xxx/2402.00444/b9bd7d1f-6322-4df4-9e4c-27eb99ccfd86_origin.pdf filter=lfs diff=lfs merge=lfs -text
10066
+ 2402.00xxx/2402.00898/558b3888-4334-45d0-8df7-d1271ce7c53e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10067
+ 2402.00xxx/2402.00905/ebfe14fe-a715-4b31-933e-99c5e73f0b44_origin.pdf filter=lfs diff=lfs merge=lfs -text
10068
+ 2402.01xxx/2402.01763/bc8dffea-1e0c-4073-9902-f0d1965e0d53_origin.pdf filter=lfs diff=lfs merge=lfs -text
10069
+ 2402.01xxx/2402.01766/ef26bb47-b14a-4858-a3b6-d213ca6635f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10070
+ 2402.01xxx/2402.01771/fc4f38a7-633f-4f83-8407-da7fea884c62_origin.pdf filter=lfs diff=lfs merge=lfs -text
10071
+ 2402.03xxx/2402.03367/49da6673-15fb-40db-a51b-1ea3826140e4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10072
+ 2402.09xxx/2402.09444/f207e906-0076-4a00-a46d-59cd7251f2a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_content_list.json ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Asynchronous Distributed Genetic Algorithms with Javascript and JSON",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 92,
8
+ 78,
9
+ 929,
10
+ 125
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Juan Julián Merelo-Guervós, Pedro A. Castillo, JLJ Laredo, A. Mora García, A. Prieto",
17
+ "bbox": [
18
+ 176,
19
+ 146,
20
+ 843,
21
+ 161
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—In a connected world, spare CPU cycles are up for grabs, if you only make its obtention easy enough. In this paper we present a distributed evolutionary computation system that uses the computational capabilities of the ubiquitous web browser. Using Asynchronous Javascript and JSON (Javascript Object Notation, a serialization protocol) allows anybody with a web browser (that is, mostly everybody connected to the Internet) to participate in a genetic algorithm experiment with little effort, or none at all. Since, in this case, computing becomes a social activity and is inherently unpredictable, in this paper we will explore the performance of this kind of virtual computer by solving simple problems such as the Royal Road function and analyzing how many machines and evaluations it yields. We will also examine possible performance bottlenecks and how to solve them, and, finally, issue some advice on how to set up this kind of experiments to maximize turnout and, thus, performance.",
28
+ "bbox": [
29
+ 80,
30
+ 191,
31
+ 502,
32
+ 384
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "I. INTRODUCTION",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 221,
42
+ 391,
43
+ 361,
44
+ 404
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Application-level networks (ALNs), are configured as a set of clients/server (servers) that can provide their spare CPU cycles by means of a downloadable application, establishing a distributed computation network which can provide ad hoc computational power. Some ALN like SETI@Home have been quite successful [1], creating a virtual computer that has processed a good amount of teraflops, while other experiments such as Popular Power (and most others, in fact) have not [?].",
51
+ "bbox": [
52
+ 80,
53
+ 410,
54
+ 502,
55
+ 536
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "The key feature of these application-level networks is the simplicity of use: we believe that the best way to obtain the participation of as many users as possible is to make it as very simple. In particular, it will be easier if they do not need to download a special application (such as a screen-saver) to participate, as is needed in BOINC, the successor to SETI@Home. For this reason, we are exploring the use of applications that are commonly installed in the user's computer, such as the web browser, which is available even in PDAs and some cellular phones<sup>1</sup>. Moreover, most browsers natively include a JavaScript interpreter [2], [3], [4] or virtual machine. JavaScript is an interpreted language<sup>2</sup>, initially proposed by Netscape, and later adopted as an ECMA standard [5], [6], [7], [8]. In this way, most browsers are compatible, at least at a language level (not always at the level of browser objects, where there exists a reasonable compatibility, anyway). Most browser also include elements such as a Java virtual machine and a Flash plugin, which,",
62
+ "bbox": [
63
+ 80,
64
+ 538,
65
+ 502,
66
+ 794
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "list",
72
+ "sub_type": "text",
73
+ "list_items": [
74
+ "The authors are with the Departamento de Arquitectura y Tecnología de Computadores, University of Granada (Spain), corresponding email jj@merelo.net",
75
+ "<sup>1</sup>Whose computing power is similar to four-year-old desktop machines",
76
+ "2which has nothing to do with Java, other than the name and its syntactic similarity"
77
+ ],
78
+ "bbox": [
79
+ 80,
80
+ 804,
81
+ 500,
82
+ 872
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "text",
88
+ "text": "with ActionScript, has more or less the same capabilities. However, there are several disadvantages to these: they might or might not be present (they are not native), they are noisy in the sense that, since they act as plugins, their execution is always noted by the user, their programs are more heavyweight than simple text code, and, finally, its integration with the browser is more awkward than the seamless integration that JavaScript offers. In any case, most things said here for JavaScript also apply to these and other plugins.",
89
+ "bbox": [
90
+ 515,
91
+ 191,
92
+ 937,
93
+ 332
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "text",
99
+ "text": "By itself, an interpreted language is not enough for creating a metacomputer if there is no way to convey information back from the client to the server in a seamless way. The ability to use the virtual machine included in browsers for distributed computing appeared with the XMLHttpRequest object, which allows asynchronous petitions to the server, in what has been called AJAX, Asynchronous JavaScript and XML [9]. AJAX is just one of the possible ways to perform asynchronous client-server communication, the others being AJAX (Asynchronous Javascript and JSON), and remoting using applets or embedded objects. However, it is quite popular, and a wide user base and documentation is available for it, using any of these asynchronous client/server communication protocols. The traditional client/server model becomes then more egalitarian, or closer to a peer to peer model, since a bidirectional communication line appears: the browser can make calls to the server, do some computation and later send the results to the server.",
100
+ "bbox": [
101
+ 515,
102
+ 333,
103
+ 937,
104
+ 587
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "text",
110
+ "text": "AJAX (and AJAJ, which differ only in the way data is serialized) works as follows: the XMLHttpRequest is provided with a request to the server and a pointer to a callback function. The request generates an event, which is asynchronously activated when a reply is received making use of the callback function. Following this approach the browser is not locked, providing a way to program applications that are similar to the ones used at the desktop, in the sense that they do not have to wait for the application response to be loaded and rendered on the screen every time a request is made. It also means that a user clicking on the Submit button is no longer needed to initiate communication with the server; any JavaScript thread can do so, with the constraint that the only server they can communicate with is the one that hosts the page the script is included in. On the other side, this provides a way to use the browser for application level networks that create distributed computing systems, since the request-response loop does not need the user participation in a fashion very similar to any other distributed computing application; these ALN can be",
111
+ "bbox": [
112
+ 515,
113
+ 587,
114
+ 937,
115
+ 872
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "controlled from the server with any programming language. Of course, it can also be combined with other distributed programming frameworks based on OpenGrid [10] or other distributed computing paradigms.",
122
+ "bbox": [
123
+ 80,
124
+ 68,
125
+ 499,
126
+ 124
127
+ ],
128
+ "page_idx": 1
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "We previously used Ruby on Rails for making this kind of distributed AJAX application [11]; however, performance and scaling behavior were not too satisfactory mainly for two reasons: the nature of the Ruby on Rails server, which required the setup of a load-balancing server, and the (probably determinant) fact that part of the genetic algorithm was done on the server, with a low degree of parallelism and thus a high impact on performance. Latest experiments after publication yielded a maximum of 40 chromosome evaluation per second<sup>3</sup>.",
133
+ "bbox": [
134
+ 80,
135
+ 125,
136
+ 499,
137
+ 264
138
+ ],
139
+ "page_idx": 1
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "In this paper, following the same concept of distributed evolutionary computation on the browser via AJAX, we have redesigned the application using Perl and PostgreSQL (on the server) and Javascript, as before, on the client, and renamed it AGAJAJ (pronounce it A-gah-yai), which stands for Asynchronous Genetic Algorithm with Javascript and JSON; in this case, the genetic algorithm (solving the well known Royal Road problem) is carried out only on the clients, with the server used just for interchange of information among them. We will perform several experimentes in which clients donate computing power by just loading a web page to find out what kind of performance we can expect from this kind of setup, from the number of machines that will be made available by their users to the number of evaluations each one of them can perform; in these experiments, we have improved two orders of magnitude the performance achieved in the previous experiments which used Ruby on Rails, and also the number of concurrent machines available to perform them, showing that this kind of setup is ready to take more computing-intensive experiments without the need of an expensive server setup.",
144
+ "bbox": [
145
+ 80,
146
+ 266,
147
+ 499,
148
+ 564
149
+ ],
150
+ "page_idx": 1
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "This paper follows our group's line of work on distributing evolutionary computation applications, which has already been adapted to several parallel and distributed computing paradigms (for example, Jini [12], JavaSpaces [13], Java with applets [14], service oriented architectures [15] and P2P systems [16], [17]). Evolutionary computation is quite adequate for this kind of distributed environment for several reasons: it is a population based method, so computation can be distributed among nodes (via distribution of population) in many different ways; besides, some works suggest that there are synergies among evolutionary algorithms and parallelization: isolated populations that are connected only eventually avoid the loss of diversity and produce better solutions in fewer time obtaining, in some cases, superlinear accelerations [18].",
155
+ "bbox": [
156
+ 80,
157
+ 565,
158
+ 499,
159
+ 776
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "Of course, with a suitable work division method, many other algorithms could be adapted to browser-based distributed computation; however, in this paper will solve only genetic algorithms, and concentrate on raw performance, rather than algorithmic behavior.",
166
+ "bbox": [
167
+ 80,
168
+ 777,
169
+ 499,
170
+ 847
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "The rest of the paper is organized as follows: next section concentrates on the application of volunteer/involuntary computing to evolutionary computation; the setup is described in section III. Experiments and results are shown in section IV and discussed in V, along with future lines of work.",
177
+ "bbox": [
178
+ 517,
179
+ 68,
180
+ 937,
181
+ 138
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "II. STATE OF THE ART",
188
+ "text_level": 1,
189
+ "bbox": [
190
+ 643,
191
+ 155,
192
+ 811,
193
+ 167
194
+ ],
195
+ "page_idx": 1
196
+ },
197
+ {
198
+ "type": "text",
199
+ "text": "So called volunteer computing [19], [20] systems are application-level networks set up so that different people can donate CPU cycles for a joint computing effort. The best known project is SETI@home $^4$ , which, from the user's point of view, is a screen-saver which has to be downloaded and installed; when the user's CPU is not busy it performs several signal analysis operations. Some companies related to volunteer computing, such as Popular Power (and others; they are referenced, for example, in [21]) did some experimentation with Java based clients, but none has had commercial success; on the other hand, the SETI@Home program has been open-sourced and extended as the BOINC (Berkeley Open Infrastructure for Network Computing) framework [22]. This kind of volunteer computing has been adapted to evolutionary computation in several occasions, using frameworks such as DREAM [23], which includes a Java-based virtual machine, GOLEM@Home, Electric Sheep [24] and G2-P2P [25]. Both approaches acknowledge that to achieve massive scalability, a peer to peer (P2P) approach is advisable, since it eliminates bottlenecks and single points of failure.",
200
+ "bbox": [
201
+ 515,
202
+ 178,
203
+ 936,
204
+ 461
205
+ ],
206
+ "page_idx": 1
207
+ },
208
+ {
209
+ "type": "text",
210
+ "text": "There are mainly two problems in this kind of volunteer networks: first of all, it is important not to abuse the CPU resources of volunteers; secondly, a sufficient number of users is needed in order to be able to do the required computation, which can be a problem on its own if there are too many of them, bringing the network, or at least the solution-collecting node, to its knees. A third problem is that performance prediction is difficult when neither the number of participants nor their individual node performances are known in advance. Finally, fault-tolerance [26] and cheating [27] are also important issues; if the environment is competitive, or any single computation is important, they will have to be taken into account.",
211
+ "bbox": [
212
+ 515,
213
+ 463,
214
+ 936,
215
+ 646
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "In any case, we believe that the best way to obtain a good amount of users is to make it easy for them to participate, using technologies available in their computers, as the browser is. In fact, some suggestions were published (for example, the one of Jim Culbert in his blog [28], and in some mailing lists), and, besides our own [11], there have been some recent papers and reports on similar setups. For instance, W. Langdon has been running for some time an interactive evolution experiment using Javascript in the browser [29], which was mainly intended for achieving high diversity in a fractal snowflake design than high performance. Even more recently, Klein and Spector [30] present a system based on the Push3 language, which is compiled to JavaScript",
222
+ "bbox": [
223
+ 515,
224
+ 648,
225
+ 936,
226
+ 833
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "page_footnote",
232
+ "text": "The published figure was even lower.",
233
+ "bbox": [
234
+ 95,
235
+ 859,
236
+ 312,
237
+ 871
238
+ ],
239
+ "page_idx": 1
240
+ },
241
+ {
242
+ "type": "page_footnote",
243
+ "text": "4See http://setiathome.berkeley.edu/ for downloading the software and some reports.",
244
+ "bbox": [
245
+ 517,
246
+ 847,
247
+ 937,
248
+ 872
249
+ ],
250
+ "page_idx": 1
251
+ },
252
+ {
253
+ "type": "text",
254
+ "text": "in the browser. This system would be the closest to what we are presenting in this paper.",
255
+ "bbox": [
256
+ 82,
257
+ 68,
258
+ 500,
259
+ 96
260
+ ],
261
+ "page_idx": 2
262
+ },
263
+ {
264
+ "type": "text",
265
+ "text": "The proposed approach could also be considered as parasitic computing since, as stated in Section I, the only participation from the user will be to load a web page and click on a button; in fact, any AJAX-based could use these resources without his acquiescence (and, in any case, it would be desirable to run without causing much trouble). The concept was introduced by Barabási in [31], and followed by others (for instance, Kohring in [32]). In that work they proposed to use the Internet routers to compute a checksum by means of a set of specially crafted packets, whose aggregated result would be used to solve the SAT problem. Anyway, although the concept is interesting, there seems not to be a continuation for this work (at least openly), probably due to its inherent dangers (as analyzed in a paper by Lam et al. [33]).",
266
+ "bbox": [
267
+ 80,
268
+ 96,
269
+ 500,
270
+ 308
271
+ ],
272
+ "page_idx": 2
273
+ },
274
+ {
275
+ "type": "text",
276
+ "text": "The virtual machine embedded into the browser provides a way to easily do that kind of sneaky/parasitic computing, but JavaScript faces the handicap of being an interpreted language, which means that the efficiency of different implementations varies wildly. Moreover, it is not optimized for numerical computation but for object tree management (the so called DOM, document object model) and strings. Nevertheless its wide availability makes us think about considering it, at least as a possibility.",
277
+ "bbox": [
278
+ 80,
279
+ 309,
280
+ 500,
281
+ 437
282
+ ],
283
+ "page_idx": 2
284
+ },
285
+ {
286
+ "type": "text",
287
+ "text": "III. METHODOLOGY AND EXPERIMENTAL SETUP",
288
+ "text_level": 1,
289
+ "bbox": [
290
+ 112,
291
+ 442,
292
+ 472,
293
+ 456
294
+ ],
295
+ "page_idx": 2
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "For this experiments we have designed and implemented a client-server program written in Perl (server-side) and Javascript (client-side), communicating using JSON via the XMLHttpRequest object. This object requires that the website and the AJAX requests are served from the same host, which is a constraint. Code for both is available, under the GPL, from http://rubyforge.org/projects/dconrails/. The algorithm runs on the client for a fixed number of generations, as shown in figure 1; running parameters are set from the server and are downloaded from it along with the webpage from which the experiment is run. A preset number of generations is run on the client, after which a request is made to the server with the best individual in the last generation. The algorithm stops and waits for the answer from the server. The server receives the request, stores it in a database, and sends back the best individual stored in the server. This individual is incorporated in the client population, which starts again to run. Several clients acting at the same time make requests asynchronously, using the facilities of the standard Apache web server. The server is thus used as a clearinghouse for interchange of information among the different clients; however, there's no explicit communication or topology among the different nodes running the genetic algorithm. Besides, the fact that the server always contains the best individuals generated so far guarantees that the best solution (with a fixed number of evaluations resolution) available so far is always kept. The server also sends back the number of generations the client should run; which is usually the same number as before,",
300
+ "bbox": [
301
+ 80,
302
+ 461,
303
+ 502,
304
+ 872
305
+ ],
306
+ "page_idx": 2
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "but turns to 0, thus stopping the client, when the stopping condition is met.",
311
+ "bbox": [
312
+ 517,
313
+ 68,
314
+ 937,
315
+ 95
316
+ ],
317
+ "page_idx": 2
318
+ },
319
+ {
320
+ "type": "text",
321
+ "text": "Clients leave the experiment by the expeditive method of surfing away to another page or closing the web browser; in tabbed browsers (most browsers nowadays), a tab (or several) can run the experiment while the browser is available for other tasks. When the experiment has been running for a predetermined number of evaluations (which were set, for this experiment, to 750000), all clients get a message to stop running, and change their user interface to a message offering them to reload the (new) experiment and start all over again. Besides, there is a watching daemon running on the server which checks the database for the number of individuals evaluated, and resets the experiment by incrementing the experiment ID by one and eliminating the population. Thus, experiments can run unchecked on a server while this watchdog daemons is running. Several additional utilities are also provided via several webpages, that inform on the state of the experiment, or allow to set the GA parameters. Experimental subjects were gathered by several methods: sending it via email to department and project coworkers, using the URL for the experiment as a Google Talk status line, as a Twitter (http://twitter.com) message, as a blog post, and, eventually, it was picked up by a wildly popular Spanish blog $^{5}$ , which managed to gather the highest number of machines.",
322
+ "bbox": [
323
+ 515,
324
+ 97,
325
+ 936,
326
+ 437
327
+ ],
328
+ "page_idx": 2
329
+ },
330
+ {
331
+ "type": "text",
332
+ "text": "The experiment consisted in optimizing the 256-bits Royal Road function, and each instance consisted in a maximum of 750000 evaluations (which were barely enough to find the solution). The algorithm was steady state (with incorporation of the immigrant every 20 generations), with rank-based selection and substitution; every generation, $50\\%$ of the population was generated, substituting the worst $50\\%$ individuals. Crossover priority was set to $80\\%$ , and mutation to $20\\%$ , changing $1\\%$ of the bits. However, these settings will have no influence on performance, other than the fact that, if the solution is found before the end of the experiment, the users will get bored and change to a new page<sup>6</sup>.",
333
+ "bbox": [
334
+ 515,
335
+ 438,
336
+ 937,
337
+ 609
338
+ ],
339
+ "page_idx": 2
340
+ },
341
+ {
342
+ "type": "text",
343
+ "text": "Data was gathered from two different sources: the watchdog-daemon logs, which mainly gave data about the number of individuals evaluated and the time needed for each experiment, and the Apache daemon log; the relevant lines were extracted just by using grep. It should be noted that the server was not running exclusively the experiment, but doing it along with the usual tasks. The server was a 700-MHz, 1 Gigabyte-RAM machine, with the database in another dual processor, 450-MHz machine. Both machines were running obsolete RedHat 7.x and 9.x Linux operating systems<sup>7</sup>.",
344
+ "bbox": [
345
+ 515,
346
+ 610,
347
+ 937,
348
+ 751
349
+ ],
350
+ "page_idx": 2
351
+ },
352
+ {
353
+ "type": "text",
354
+ "text": "Results of the set experiments will be commented in the",
355
+ "bbox": [
356
+ 534,
357
+ 752,
358
+ 937,
359
+ 767
360
+ ],
361
+ "page_idx": 2
362
+ },
363
+ {
364
+ "type": "page_footnote",
365
+ "text": "<sup>5</sup>Who posted it at http://www.microsiervos.com/archivo/ordenadores/experimento-computation-distribuida.html.",
366
+ "bbox": [
367
+ 517,
368
+ 782,
369
+ 934,
370
+ 815
371
+ ],
372
+ "page_idx": 2
373
+ },
374
+ {
375
+ "type": "page_footnote",
376
+ "text": "<sup>6</sup>And this is just an example of how social factors in this kind of experiments affect performance.",
377
+ "bbox": [
378
+ 517,
379
+ 815,
380
+ 936,
381
+ 838
382
+ ],
383
+ "page_idx": 2
384
+ },
385
+ {
386
+ "type": "page_footnote",
387
+ "text": "Both machines host our group web server and home pages; we thought it was better to run the experiment in our standard setup instead of a dedicated one.",
388
+ "bbox": [
389
+ 517,
390
+ 838,
391
+ 936,
392
+ 871
393
+ ],
394
+ "page_idx": 2
395
+ },
396
+ {
397
+ "type": "text",
398
+ "text": "Prueba Algoritmo Genetico",
399
+ "text_level": 1,
400
+ "bbox": [
401
+ 179,
402
+ 66,
403
+ 426,
404
+ 83
405
+ ],
406
+ "page_idx": 3
407
+ },
408
+ {
409
+ "type": "image",
410
+ "img_path": "images/81c70255e9252b9ab386acd7691f3d97e7be148188c4cc04e53c446416913614.jpg",
411
+ "image_caption": [
412
+ "Fig. 1. Experiment running on two different browsers (Firefox and Epiphany) in the same machine. User interface is written in Spanish, since in this initial experiment was addressed to audience speaking that language. The colored (or gray-scale) horizontal bar is a graphical representation of the chromosome. The inset windows (Epiphany) started in second place, and thus, the state of evolution is less advanced."
413
+ ],
414
+ "image_footnote": [],
415
+ "bbox": [
416
+ 178,
417
+ 96,
418
+ 840,
419
+ 286
420
+ ],
421
+ "page_idx": 3
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "next section.",
426
+ "bbox": [
427
+ 82,
428
+ 366,
429
+ 174,
430
+ 378
431
+ ],
432
+ "page_idx": 3
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "IV. EXPERIMENTAL RESULTS",
437
+ "text_level": 1,
438
+ "bbox": [
439
+ 183,
440
+ 387,
441
+ 400,
442
+ 399
443
+ ],
444
+ "page_idx": 3
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "Eventually, the experiment was running for several days, with different degrees of intensity. Several hundred machines participated in different instances, coming from all over the world, although mainly from Spain. The first question we wanted to answer was, how many machines should we expect in this kind of experiment? BOINC and SETI@home have gathered thousands of machines in its $10+$ year run, but our experiments were limited in time (several hours, at most, if no machine was available, a few minutes if it was), so a limited number of machines should also be expected. The distribution of the number of machines is shown in figure 2.",
449
+ "bbox": [
450
+ 80,
451
+ 404,
452
+ 500,
453
+ 561
454
+ ],
455
+ "page_idx": 3
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "The exact figures will vary in every experiment, but it seems clear that the median number of machines will hover around 1/4 of the maximum number. Besides, it is quite easy to obtain 5 clients for a certain number of evaluations; most experiments have less than 10 clients.",
460
+ "bbox": [
461
+ 80,
462
+ 574,
463
+ 500,
464
+ 644
465
+ ],
466
+ "page_idx": 3
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "On the other hand, the number of evaluations each client contributes are quite different, as is shown in figure 3.",
471
+ "bbox": [
472
+ 80,
473
+ 644,
474
+ 500,
475
+ 673
476
+ ],
477
+ "page_idx": 3
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "Most clients contribute just a few generations, be it because the browser stops running the program if it takes more than a predetermined number of seconds (which can happen in IE or in Firefox if clients are too slow; usually the predetermined number of generations will be less than this number), the user gets bored and moves on, or because statistically most people join when the experiment has been running for some time and show up only when there are a few evaluations left. Each one of these problems would have to be tackled separately, but the baseline is that, even when a good number of machines joins an experiment, they will do so only for a limited amount of time on average. Besides, these limitations in number of generations translate also to limitations in time, so that experiments will have to be",
482
+ "bbox": [
483
+ 80,
484
+ 674,
485
+ 500,
486
+ 872
487
+ ],
488
+ "page_idx": 3
489
+ },
490
+ {
491
+ "type": "image",
492
+ "img_path": "images/593231fc15da14522b9d75f07d10744c799de4593679725b63ae963c6bd73719.jpg",
493
+ "image_caption": [
494
+ "Fig. 2. Histogram of the number of machines used in each experiment. The median is 8.5 machines, and the $75\\%$ quartile is placed at 15 machines, with a peak of 44 machines. A single client using several browsers counts as a single machine. It should be noted that not all clients are simultaneously connected."
495
+ ],
496
+ "image_footnote": [],
497
+ "bbox": [
498
+ 521,
499
+ 368,
500
+ 919,
501
+ 661
502
+ ],
503
+ "page_idx": 3
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "designed in a way that sufficient information is transmitted from clients during the expected time they are going to spend in the experiment.",
508
+ "bbox": [
509
+ 515,
510
+ 770,
511
+ 937,
512
+ 813
513
+ ],
514
+ "page_idx": 3
515
+ },
516
+ {
517
+ "type": "text",
518
+ "text": "But different clients have different performance, and it is also interesting to measure what is the average time (and thus average performance) it takes the clients between two communications to the server (that is, $20 * 50$ evaluations +",
519
+ "bbox": [
520
+ 515,
521
+ 815,
522
+ 937,
523
+ 872
524
+ ],
525
+ "page_idx": 3
526
+ },
527
+ {
528
+ "type": "image",
529
+ "img_path": "images/250b76a397fd705ce81c3148aaa82ee966773e0af5fbb82b7af1dbc82dc68358.jpg",
530
+ "image_caption": [
531
+ "Fig. 3. Histogram of the number of generations all machines participating in the experiment have contributed, cut off at 400 generations. Each generation corresponds to 50 new individuals. Most clients contribute 100 generations or less, with a few contributing more than 200. The median is 16 generations, with the 3rd quartile placed at 55 generations."
532
+ ],
533
+ "image_footnote": [],
534
+ "bbox": [
535
+ 178,
536
+ 70,
537
+ 811,
538
+ 396
539
+ ],
540
+ "page_idx": 4
541
+ },
542
+ {
543
+ "type": "text",
544
+ "text": "waiting time). This is interesting for two main reasons: server performance will have to be tuned to be able to answer to this level of requests, and second, the generation gap will also have to be fine-tuned so that waiting time and the possibility that the script is blocked due to overtime is minimized. The results obtained in the experiment are shown in figure 4.",
545
+ "bbox": [
546
+ 80,
547
+ 480,
548
+ 499,
549
+ 566
550
+ ],
551
+ "page_idx": 4
552
+ },
553
+ {
554
+ "type": "text",
555
+ "text": "This figure shows that, for this kind of problem, the vast majority of clients will have a gap smaller than two seconds. This quantity will vary for different problems, but the conclusion is that most clients will have high or median performance, with few clients having lower performance. This measure also gives us an estimate of the average performance (2.906 seconds/20 generations).",
556
+ "bbox": [
557
+ 80,
558
+ 568,
559
+ 499,
560
+ 668
561
+ ],
562
+ "page_idx": 4
563
+ },
564
+ {
565
+ "type": "text",
566
+ "text": "However, at the end of the day the setup is intended to achieve high performance when running an evolutionary computation experiment. This data is presented in figure 5.",
567
+ "bbox": [
568
+ 80,
569
+ 669,
570
+ 499,
571
+ 714
572
+ ],
573
+ "page_idx": 4
574
+ },
575
+ {
576
+ "type": "text",
577
+ "text": "This figure is quite similar to fig 4. Median is at 1000 seconds, with a minimum at 292 and 3rd quartile at 2323; $75\\%$ of runs will last less than 2323 seconds. Taking into account that the average 20-generation step is 2.906 seconds, and a single experiment needs 375 such steps, the average single machine run would take 1089.75 seconds; this means that the maximum speedup achieved is $1089.75 / 292 = 3.73$ clients running simultaneously, and the median is approximately a single average machine. This will probably vary for experiments of different duration, but, on average, we could say that significant (albeit small) speedups can be",
578
+ "bbox": [
579
+ 80,
580
+ 716,
581
+ 500,
582
+ 874
583
+ ],
584
+ "page_idx": 4
585
+ },
586
+ {
587
+ "type": "text",
588
+ "text": "achieved using spontaneous volunteer computing. In general, however, several machines will sequentially provide CPU cycles to an experiment, adding up to a single machine doing all the work. In general also, the fact that there are up to 44 machines working in a single experiment, or that the range of running times can vary in a factor of up to one hundred, indicates that, for this experiment, no bottleneck has been found. Of course, more simultaneous machines will have to be tested to find the limit. Finally, the fact that all contributions are volunteer means that the evaluation rate is not constant, yielding figures like fig 6, where the steepness of each line is roughly equivalent to the evaluation speed, since the $x$ axis corresponds to time, and the $y$ axis number of individuals evaluated.",
589
+ "bbox": [
590
+ 515,
591
+ 480,
592
+ 937,
593
+ 679
594
+ ],
595
+ "page_idx": 4
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "V. CONCLUSIONS, DISCUSSION AND FUTURE WORK",
600
+ "text_level": 1,
601
+ "bbox": [
602
+ 537,
603
+ 685,
604
+ 915,
605
+ 698
606
+ ],
607
+ "page_idx": 4
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "While in previous papers [11] we proved that this kind of AJAX based, volunteer, and potentially sneaky, computation could be used profitably for performing genetic algorithm experiments, in this paper we have proved that, without an expensive or far-fetched setup, it can achieve high performance, equivalent, at most, to several computers of average performance. The code used to perform the experiment is publicly available and is modular so that creating different experiments is just a matter of writing a new JavaScript fitness function and tuning the GA parameters accordingly.",
612
+ "bbox": [
613
+ 515,
614
+ 701,
615
+ 937,
616
+ 844
617
+ ],
618
+ "page_idx": 4
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "The experiments have proved that there is a good amount of computational power that can be easily tapped and used for",
623
+ "bbox": [
624
+ 517,
625
+ 844,
626
+ 937,
627
+ 872
628
+ ],
629
+ "page_idx": 4
630
+ },
631
+ {
632
+ "type": "image",
633
+ "img_path": "images/54f3353d943ec9b402dab15ee7b2b1aa34daa7c006d23805f2cdce892606223c.jpg",
634
+ "image_caption": [
635
+ "Fig. 6. Plot showing the number of individuals evaluated vs. time for a few dozens experiments; time is plotted up to 600 seconds only. As is seen, some experiments have a more or less constant evaluation rate (constant inclination), while other are more step-like with clients leaving and joining the experiment all the time."
636
+ ],
637
+ "image_footnote": [],
638
+ "bbox": [
639
+ 176,
640
+ 74,
641
+ 811,
642
+ 526
643
+ ],
644
+ "page_idx": 5
645
+ },
646
+ {
647
+ "type": "text",
648
+ "text": "evolutionary computation experiments, however, the nature of AGAJAJ constrains also the way users donate computing power, as well as the number of clients available for an experiment. In this paper we have found some figures, which will undoubtedly vary for other experiments; however, the general shape of the curves will probably be the same, following a very steep decrease from the maximum values obtained.",
649
+ "bbox": [
650
+ 80,
651
+ 615,
652
+ 499,
653
+ 728
654
+ ],
655
+ "page_idx": 5
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "The GA, being asynchronous, faces some problems that have not been tackled in this paper. What is the best approach to preserve diversity? To generate a new population in each client, and receive immigrants as soon as possible, which are incorporated into the population? Or is it better to create new client populations based on existing populations? What is really the algorithmic contribution of new clients? These issues will be explored as future work. We will also try to measure the limits of this technology, and test the impact of servers of varying performance and workload on overall",
660
+ "bbox": [
661
+ 80,
662
+ 730,
663
+ 502,
664
+ 874
665
+ ],
666
+ "page_idx": 5
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "performance. Eventually, we will also try to perform a sneaky experiment, to check what kind of performance can be expected in that kind of setups.",
671
+ "bbox": [
672
+ 515,
673
+ 615,
674
+ 936,
675
+ 657
676
+ ],
677
+ "page_idx": 5
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "Another venue of work will be to examine the algorithmic performance of AGAJAJ; even as new clients are added to an experiment, what's the improvement obtained from them? In order to check that, a controlled experiment using known computers will be used, adding them one at a time, so that the real impact on the genetic algorithm is evaluated. Once that is know, it would be interesting to experiment with adaptive client parameters, instead of the one-size-fits-all parameter settings used so far.",
682
+ "bbox": [
683
+ 515,
684
+ 657,
685
+ 937,
686
+ 787
687
+ ],
688
+ "page_idx": 5
689
+ },
690
+ {
691
+ "type": "text",
692
+ "text": "ACKNOWLEDGEMENTS",
693
+ "text_level": 1,
694
+ "bbox": [
695
+ 642,
696
+ 796,
697
+ 811,
698
+ 809
699
+ ],
700
+ "page_idx": 5
701
+ },
702
+ {
703
+ "type": "text",
704
+ "text": "This paper has been funded in part by the Spanish MI-CYT project NoHNES (Spanish Ministerio de Educación y Ciencia - TIN2007-68083) and the Junta de Andalucía P06-TIC-02025. We are also grateful to the editors of the",
705
+ "bbox": [
706
+ 515,
707
+ 815,
708
+ 937,
709
+ 874
710
+ ],
711
+ "page_idx": 5
712
+ },
713
+ {
714
+ "type": "image",
715
+ "img_path": "images/5af9cbf06b01bb0e4b876c9fa5a129e00ee3225338981178df07f5bceb785e88.jpg",
716
+ "image_caption": [
717
+ "Fig. 4. Distribution of gap time, that is, time among two request from a particular client; it has been rounded to the closest number of seconds (since the webserver logs it has been extracted from have that resolution). Most clients take 3 second or less to process 20 generations, with a majority taking 2 or less. Gaps bigger than 10-15 seconds are probably non-significant, in fact, negative gaps and those bigger than 100 have been eliminated from the data set. For this set of experiments, the median is at 2. $x$ axis is logarithmic, to emphasize the fact that the distribution of client performance falls very fast, although a different center should be expected for a different problem."
718
+ ],
719
+ "image_footnote": [],
720
+ "bbox": [
721
+ 85,
722
+ 70,
723
+ 497,
724
+ 364
725
+ ],
726
+ "page_idx": 6
727
+ },
728
+ {
729
+ "type": "text",
730
+ "text": "Microsiervos.com blog for talking about our experiment, and to the readers of that article for participating in it. We are also grateful to the anonymous people who have known about the experiment via several possible ways<sup>8</sup> and participating in it.",
731
+ "bbox": [
732
+ 80,
733
+ 522,
734
+ 500,
735
+ 581
736
+ ],
737
+ "page_idx": 6
738
+ },
739
+ {
740
+ "type": "text",
741
+ "text": "REFERENCES",
742
+ "text_level": 1,
743
+ "bbox": [
744
+ 243,
745
+ 598,
746
+ 339,
747
+ 610
748
+ ],
749
+ "page_idx": 6
750
+ },
751
+ {
752
+ "type": "list",
753
+ "sub_type": "ref_text",
754
+ "list_items": [
755
+ "[1] D. P. Anderson, J. Cobb, E. Korpela, M. Lebofsky, and D. Werthimer, “SETI@home: an experiment in public-resource computing,” Commun. ACM, vol. 45, no. 11, pp. 56–61, 2002.",
756
+ "[2] R. Steinmetz and K. Wehrle, Eds., Peer-to-Peer Systems and Applications, ser. Lecture Notes in Computer Science, vol. 3485. Springer, 2005.",
757
+ "[3] Gilorien, DHTML and JavaScript. Upper Saddle River, NJ 07458, USA: Prentice-Hall PTR, 2000.",
758
+ "[4] R. Shah, \"A beginner's guide to JavaScript,\" JavaWorld: IDG's magazine for the Java community, vol. 1, no. 1, pp. ??-??, Mar. 1996. [Online]. Available: http://www.javaworld.com/javaworld/jw-03-1996/jw-03-javascriptintro.htm",
759
+ "[5] D. Flanagan, JavaScript Pocket Reference (2nd Edition). O'Reilly, October 2002. [Online]. Available: http://www.amazon.fr/exec/obidos/ASIN/0596004117/citeulike04-21",
760
+ "[6] ECMA, ECMA-262: ECMAScript Language Specification, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Dec. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-262.HTM"
761
+ ],
762
+ "bbox": [
763
+ 92,
764
+ 624,
765
+ 500,
766
+ 831
767
+ ],
768
+ "page_idx": 6
769
+ },
770
+ {
771
+ "type": "text",
772
+ "text": "<sup>8</sup>For instance, this article in my own blog http://atalaya.blogalia.com/historias/53480",
773
+ "bbox": [
774
+ 80,
775
+ 847,
776
+ 497,
777
+ 872
778
+ ],
779
+ "page_idx": 6
780
+ },
781
+ {
782
+ "type": "image",
783
+ "img_path": "images/16fbbc6baab692bce6ff11ca3dc7de066f9bd4ab6f5d56d39617c4f2f9d0db51.jpg",
784
+ "image_caption": [
785
+ "Fig. 5. Distribution of running times for a fixed amount of evaluations. Some outliers have been cut off; approximately $10\\%$ of runs took more than 10000 seconds. The mode is between 500 and 1000 seconds, and most runs end before 2000 seconds. Time starts to count from the moment the first client connects to the server."
786
+ ],
787
+ "image_footnote": [],
788
+ "bbox": [
789
+ 522,
790
+ 70,
791
+ 934,
792
+ 364
793
+ ],
794
+ "page_idx": 6
795
+ },
796
+ {
797
+ "type": "list",
798
+ "sub_type": "ref_text",
799
+ "list_items": [
800
+ "[7] ——, ECMA-290: ECMAScript Components Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-290.HTM",
801
+ "[8] ——, ECMA-327: ECMAScript 3: Compact Profile, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2001. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-327.htm",
802
+ "[9] ——, ECMA-357: ECMAScript for XML (E4X) Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2004. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-357.htm",
803
+ "[10] Wikipedia, “Ajax — wikipedia, la enciclopedia libre,” 2006, [Internet; descargado 14-mayo-2006]. [Online]. Available: $\\backslash$ url{http://es.wikipedia.org/w/index.php?title=AJAX&oldid=3168254}",
804
+ "[11] \"Towards Open Grid Services Architecture,\" http://www.globus.org/ogsa/.",
805
+ "[12] J. J. Merelo, A. M. García, J. L. J. Laredo, J. Lupión, and F. Tricas, \"Browser-based distributed evolutionary computation: performance and scaling behavior,\" in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2851-2858.",
806
+ "[13] M. García-Arenas, J. G. Castellano, P. A. Castillo, J. Carpio, M. Cillero, J.-J. Merelo-Guervós, A. Prieto, V. Rivas, and G. Romero, \"Speedup measurements for a distributed evolutionary algorithm that uses Jini,\" in XI Jornadas de Paralelismo, U. d. G. Depto. ATC, Ed., 2000, pp. 241-246.",
807
+ "[14] C. Setzkorn and R. C. Paton, \"Javaspaces - an affordable technology for the simple implementation of reusable parallel evolutionary algorithms,\" in Proceedings of the International Symposium on Knowledge Exploration in Life Science Informatics, KELSI 2004, ser. Lecture Notes in Artificial Intelligence, J. A. López, E. Benfenati, and W. Dubitzky, Eds., vol. 3303. Milan, Italy: Springer, 25-26 Nov. 2004, pp. 151-160. [Online]. Available: $\\backslash$ url{http://springerlink.metapress.com/openurl.asp?genre=article;issn=0302-9743;volume=3303;spage=151}",
808
+ "[15] F. S. Chong and W. B. Langdon, \"Java based distributed genetic"
809
+ ],
810
+ "bbox": [
811
+ 521,
812
+ 470,
813
+ 937,
814
+ 872
815
+ ],
816
+ "page_idx": 6
817
+ },
818
+ {
819
+ "type": "list",
820
+ "sub_type": "ref_text",
821
+ "list_items": [
822
+ "programming on the internet,\" in Proceedings of the Genetic and Evolutionary Computation Conference, W. Banzhaf, J. Daida, A. E. Eiben, M. H. Garzon, V. Honavar, M. Jakiela, and R. E. Smith, Eds., vol. 2. Orlando, Florida, USA: Morgan Kaufmann, 13-17 Jul. 1999, p. 1229, full text in technical report CSRP-99-7. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/p.chong/DGPposter.pdf",
823
+ "[16] D. S. Myers and M. P. Cummings, \"Necessity is the mother of invention: a simple grid computing system using commodity tools,\" J. Parallel Distrib. Comput., vol. 63, no. 5, pp. 578-589, 2003.",
824
+ "[17] J. L. J. Laredo, P. A. Castillo, B. Paechter, A. M. Mora, E. Alfaro-Cid, A. I. Esparcia-Alcazar, and J. J. Merelo, \"Empirical validation of a gossiping communication mechanism for parallel eas,\" ser. Lecture Notes in Computer Science, M. G. et al., Ed., vol. 4448. Springer, 2007, pp. 129-136.",
825
+ "[18] E. A. Eiben, M. Schoenauer, J. L. J. Laredo, P. A. Castillo, A. M. Mora, and J. J. Merelo, \"Exploring selection mechanisms for an agent-based distributed evolutionary algorithm,\" in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2801-2808.",
826
+ "[19] E. Cantú-Paz, “Migration policies, selection pressure, and parallel evolutionary algorithms,” Journal of Heuristics, vol. 7, no. 4, pp. 311–334, 2001.",
827
+ "[20] L. F. G. Sarmenta and S. Hirano, \"Bayanihan: building and studying Web-based volunteer computing systems using Java,\" Future Generation Computer Systems, vol. 15, no. 5-6, pp. 675-686, 1999. [Online]. Available: citeseer.nj.nec.com/sarmenta99bayanihan.html",
828
+ "[21] D. P. Anderson, E. Korpela, and R. Walton, \"High-performance task distribution for volunteer computing,\" in E-SCIENCE '05: Proceedings of the First International Conference on e-Science and Grid Computing. Washington, DC, USA: IEEE Computer Society, 2005, pp. 196-203.",
829
+ "[22] P. Cappello and D. Mourloukos, “A scalable, robust network for parallel computing,” in JGI '01: Proceedings of the 2001 joint ACM-ISCOPE conference on Java Grande. New York, NY, USA: ACM Press, 2001, pp. 78–86.",
830
+ "[23] D. Anderson, \"Boinc: A system for public-resource computing and storage,\" in Proceedings of the 5th IEEE/ACM International Workshop on Grid Computing, Pittsburgh, USA, 2004.",
831
+ "[24] M. Arenas, P. Collet, A. Eiben, M. Jelasy, J. J. Merelo, B. Paechter, M. Preuß, and M. Schoenauer, \"A framework for distributed evolutionary algorithms,\" ser. Lecture Notes in Computer Science, LNCS, J.-J. M. Guervós, P. Adamidis, H.-G. Beyer, J.-L. Fernández-Villacanas, and H.-P. Schwefel, Eds., no. 2439. Springer-Verlag, September 2002, keywords: Implementation::Parallel EAs, Related::Simulated Adaptive Behavior, Technique::Advanced techniques - miscellaneous, Technique::Evolutionary computing - general, Technique::Evolutionary computing - miscellaneous, CiteSeer context: http://citeseer.nj.nec.com/context/2189070/0, available from http://link.springer.de/link/service/series/0558/papers/2439/243900665.pdf, Metapress URL: http://www.springerlink.com/link.asp?id=h4n29kbl69jvab4c.",
832
+ "[25] Wikipedia, \"Electric sheep — wikipedia, the free encyclopedia,\" 2007, http://en.wikipedia.org/w/index.php?title=Electric_Sheep&oldid=159677937.",
833
+ "[26] R. Mason and W. Kelly, “G2-P2P: a fully decentralised fault-tolerant cycle-stealing framework,” in ACSW Frontiers '05: Proceedings of the 2005 Australasian workshop on Grid computing and e-research. Darlinghurst, Australia, Australia: Australian Computer Society, Inc., 2005, pp. 33-39.",
834
+ "[27] F. F. D. Vega, \"A fault tolerant optimization algorithm based on evolutionary computation,\" in DEPCOS-RELCOMEX '06: Proceedings of the International Conference on Dependability of Computer Systems (DEPCOS-RELCOMEX'06). Washington, DC, USA: IEEE Computer Society, 2006, pp. 335-342.",
835
+ "[28] L. F. G. Sarmenta, \"Sabotage-tolerance mechanisms for volunteer computing systems,\" Future Generation Computer Systems, vol. 18, no. 4, pp. 561-572, 2002. [Online]. Available: citeseer.nj.nec.com/449803.html",
836
+ "[29] J. Culbert, \"Ajax and distributed computation thoughts,\" Published at http://culbert.net/?p=6, March 2006, Ultimo acceso Mayo 2006.",
837
+ "[30] W. B. Langdon, “Pfeiffer – A distributed open-ended evolutionary system,” in AISB'05: Proceedings of the Joint Symposium on Socially Inspired Computing (METAS 2005), B. Edmonds, N. Gilbert, S. Gustafson, D. Hales, and N. Krasnogor, Eds., University of"
838
+ ],
839
+ "bbox": [
840
+ 84,
841
+ 68,
842
+ 500,
843
+ 872
844
+ ],
845
+ "page_idx": 7
846
+ },
847
+ {
848
+ "type": "list",
849
+ "sub_type": "ref_text",
850
+ "list_items": [
851
+ "Hertfordshire, Hatfield, UK, 12-15 Apr. 2005, pp. 7-13, sSAISB 2005 Convention. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/wbl_metas2005.pdf",
852
+ "[31] J. Klein and L. Spector, \"Unwitting distributed genetic programming via asynchronous javascript and xml,\" in GECCO '07: Proceedings of the 9th annual conference on Genetic and evolutionary computation. New York, NY, USA: ACM, 2007, pp. 1628-1635.",
853
+ "[32] A.-L. Barabasi, V. W. Freeh, H. Jeong, and J. B. Brockman, \"Parasitic computing,\" Nature, vol. 412, no. 6850, pp. 894-897, August 2001. [Online]. Available: http://www.nature.com/cgi-taf/DynaPage.taf?file=/nature/journal/v412/n6850/abs/412894a0\\_fs.html",
854
+ "[33] G. A. Kohring, \"Implicit simulations using messaging protocols,\" COMPUTERS AND PHYSICS, vol. 14, p. 203, 2003. [Online]. Available: http://www.citebase.org/abstract?id=oai:arXiv.org:cs/0208021",
855
+ "[34] V. T. Lam, S. Antonatos, P. Akritidis, and K. G. Anagnostakis, \"Puppetnets: misusing web browsers as a distributed attack infrastructure,\" in CCS '06: Proceedings of the 13th ACM conference on Computer and communications security. New York, NY, USA: ACM, 2006, pp. 221-234."
856
+ ],
857
+ "bbox": [
858
+ 521,
859
+ 68,
860
+ 936,
861
+ 282
862
+ ],
863
+ "page_idx": 7
864
+ }
865
+ ]
2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_model.json ADDED
@@ -0,0 +1,1305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.093,
7
+ 0.079,
8
+ 0.931,
9
+ 0.127
10
+ ],
11
+ "angle": 0,
12
+ "content": "Asynchronous Distributed Genetic Algorithms with Javascript and JSON"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.177,
18
+ 0.147,
19
+ 0.844,
20
+ 0.162
21
+ ],
22
+ "angle": 0,
23
+ "content": "Juan Julián Merelo-Guervós, Pedro A. Castillo, JLJ Laredo, A. Mora García, A. Prieto"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.082,
29
+ 0.192,
30
+ 0.504,
31
+ 0.385
32
+ ],
33
+ "angle": 0,
34
+ "content": "Abstract—In a connected world, spare CPU cycles are up for grabs, if you only make its obtention easy enough. In this paper we present a distributed evolutionary computation system that uses the computational capabilities of the ubiquitous web browser. Using Asynchronous Javascript and JSON (Javascript Object Notation, a serialization protocol) allows anybody with a web browser (that is, mostly everybody connected to the Internet) to participate in a genetic algorithm experiment with little effort, or none at all. Since, in this case, computing becomes a social activity and is inherently unpredictable, in this paper we will explore the performance of this kind of virtual computer by solving simple problems such as the Royal Road function and analyzing how many machines and evaluations it yields. We will also examine possible performance bottlenecks and how to solve them, and, finally, issue some advice on how to set up this kind of experiments to maximize turnout and, thus, performance."
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.223,
40
+ 0.392,
41
+ 0.362,
42
+ 0.405
43
+ ],
44
+ "angle": 0,
45
+ "content": "I. INTRODUCTION"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.082,
51
+ 0.411,
52
+ 0.503,
53
+ 0.538
54
+ ],
55
+ "angle": 0,
56
+ "content": "Application-level networks (ALNs), are configured as a set of clients/server (servers) that can provide their spare CPU cycles by means of a downloadable application, establishing a distributed computation network which can provide ad hoc computational power. Some ALN like SETI@Home have been quite successful [1], creating a virtual computer that has processed a good amount of teraflops, while other experiments such as Popular Power (and most others, in fact) have not [?]."
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.082,
62
+ 0.539,
63
+ 0.503,
64
+ 0.795
65
+ ],
66
+ "angle": 0,
67
+ "content": "The key feature of these application-level networks is the simplicity of use: we believe that the best way to obtain the participation of as many users as possible is to make it as very simple. In particular, it will be easier if they do not need to download a special application (such as a screen-saver) to participate, as is needed in BOINC, the successor to SETI@Home. For this reason, we are exploring the use of applications that are commonly installed in the user's computer, such as the web browser, which is available even in PDAs and some cellular phones<sup>1</sup>. Moreover, most browsers natively include a JavaScript interpreter [2], [3], [4] or virtual machine. JavaScript is an interpreted language<sup>2</sup>, initially proposed by Netscape, and later adopted as an ECMA standard [5], [6], [7], [8]. In this way, most browsers are compatible, at least at a language level (not always at the level of browser objects, where there exists a reasonable compatibility, anyway). Most browser also include elements such as a Java virtual machine and a Flash plugin, which,"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.082,
73
+ 0.805,
74
+ 0.502,
75
+ 0.839
76
+ ],
77
+ "angle": 0,
78
+ "content": "The authors are with the Departamento de Arquitectura y Tecnología de Computadores, University of Granada (Spain), corresponding email jj@merelo.net"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.099,
84
+ 0.838,
85
+ 0.495,
86
+ 0.85
87
+ ],
88
+ "angle": 0,
89
+ "content": "<sup>1</sup>Whose computing power is similar to four-year-old desktop machines"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.082,
95
+ 0.85,
96
+ 0.501,
97
+ 0.873
98
+ ],
99
+ "angle": 0,
100
+ "content": "2which has nothing to do with Java, other than the name and its syntactic similarity"
101
+ },
102
+ {
103
+ "type": "list",
104
+ "bbox": [
105
+ 0.082,
106
+ 0.805,
107
+ 0.502,
108
+ 0.873
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.516,
117
+ 0.192,
118
+ 0.938,
119
+ 0.333
120
+ ],
121
+ "angle": 0,
122
+ "content": "with ActionScript, has more or less the same capabilities. However, there are several disadvantages to these: they might or might not be present (they are not native), they are noisy in the sense that, since they act as plugins, their execution is always noted by the user, their programs are more heavyweight than simple text code, and, finally, its integration with the browser is more awkward than the seamless integration that JavaScript offers. In any case, most things said here for JavaScript also apply to these and other plugins."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.516,
128
+ 0.334,
129
+ 0.938,
130
+ 0.589
131
+ ],
132
+ "angle": 0,
133
+ "content": "By itself, an interpreted language is not enough for creating a metacomputer if there is no way to convey information back from the client to the server in a seamless way. The ability to use the virtual machine included in browsers for distributed computing appeared with the XMLHttpRequest object, which allows asynchronous petitions to the server, in what has been called AJAX, Asynchronous JavaScript and XML [9]. AJAX is just one of the possible ways to perform asynchronous client-server communication, the others being AJAX (Asynchronous Javascript and JSON), and remoting using applets or embedded objects. However, it is quite popular, and a wide user base and documentation is available for it, using any of these asynchronous client/server communication protocols. The traditional client/server model becomes then more egalitarian, or closer to a peer to peer model, since a bidirectional communication line appears: the browser can make calls to the server, do some computation and later send the results to the server."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.516,
139
+ 0.589,
140
+ 0.938,
141
+ 0.873
142
+ ],
143
+ "angle": 0,
144
+ "content": "AJAX (and AJAJ, which differ only in the way data is serialized) works as follows: the XMLHttpRequest is provided with a request to the server and a pointer to a callback function. The request generates an event, which is asynchronously activated when a reply is received making use of the callback function. Following this approach the browser is not locked, providing a way to program applications that are similar to the ones used at the desktop, in the sense that they do not have to wait for the application response to be loaded and rendered on the screen every time a request is made. It also means that a user clicking on the Submit button is no longer needed to initiate communication with the server; any JavaScript thread can do so, with the constraint that the only server they can communicate with is the one that hosts the page the script is included in. On the other side, this provides a way to use the browser for application level networks that create distributed computing systems, since the request-response loop does not need the user participation in a fashion very similar to any other distributed computing application; these ALN can be"
145
+ }
146
+ ],
147
+ [
148
+ {
149
+ "type": "text",
150
+ "bbox": [
151
+ 0.082,
152
+ 0.069,
153
+ 0.5,
154
+ 0.125
155
+ ],
156
+ "angle": 0,
157
+ "content": "controlled from the server with any programming language. Of course, it can also be combined with other distributed programming frameworks based on OpenGrid [10] or other distributed computing paradigms."
158
+ },
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.082,
163
+ 0.126,
164
+ 0.5,
165
+ 0.266
166
+ ],
167
+ "angle": 0,
168
+ "content": "We previously used Ruby on Rails for making this kind of distributed AJAX application [11]; however, performance and scaling behavior were not too satisfactory mainly for two reasons: the nature of the Ruby on Rails server, which required the setup of a load-balancing server, and the (probably determinant) fact that part of the genetic algorithm was done on the server, with a low degree of parallelism and thus a high impact on performance. Latest experiments after publication yielded a maximum of 40 chromosome evaluation per second<sup>3</sup>."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.082,
174
+ 0.267,
175
+ 0.5,
176
+ 0.565
177
+ ],
178
+ "angle": 0,
179
+ "content": "In this paper, following the same concept of distributed evolutionary computation on the browser via AJAX, we have redesigned the application using Perl and PostgreSQL (on the server) and Javascript, as before, on the client, and renamed it AGAJAJ (pronounce it A-gah-yai), which stands for Asynchronous Genetic Algorithm with Javascript and JSON; in this case, the genetic algorithm (solving the well known Royal Road problem) is carried out only on the clients, with the server used just for interchange of information among them. We will perform several experimentes in which clients donate computing power by just loading a web page to find out what kind of performance we can expect from this kind of setup, from the number of machines that will be made available by their users to the number of evaluations each one of them can perform; in these experiments, we have improved two orders of magnitude the performance achieved in the previous experiments which used Ruby on Rails, and also the number of concurrent machines available to perform them, showing that this kind of setup is ready to take more computing-intensive experiments without the need of an expensive server setup."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.082,
185
+ 0.566,
186
+ 0.5,
187
+ 0.777
188
+ ],
189
+ "angle": 0,
190
+ "content": "This paper follows our group's line of work on distributing evolutionary computation applications, which has already been adapted to several parallel and distributed computing paradigms (for example, Jini [12], JavaSpaces [13], Java with applets [14], service oriented architectures [15] and P2P systems [16], [17]). Evolutionary computation is quite adequate for this kind of distributed environment for several reasons: it is a population based method, so computation can be distributed among nodes (via distribution of population) in many different ways; besides, some works suggest that there are synergies among evolutionary algorithms and parallelization: isolated populations that are connected only eventually avoid the loss of diversity and produce better solutions in fewer time obtaining, in some cases, superlinear accelerations [18]."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.082,
196
+ 0.779,
197
+ 0.5,
198
+ 0.849
199
+ ],
200
+ "angle": 0,
201
+ "content": "Of course, with a suitable work division method, many other algorithms could be adapted to browser-based distributed computation; however, in this paper will solve only genetic algorithms, and concentrate on raw performance, rather than algorithmic behavior."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.518,
207
+ 0.069,
208
+ 0.938,
209
+ 0.139
210
+ ],
211
+ "angle": 0,
212
+ "content": "The rest of the paper is organized as follows: next section concentrates on the application of volunteer/involuntary computing to evolutionary computation; the setup is described in section III. Experiments and results are shown in section IV and discussed in V, along with future lines of work."
213
+ },
214
+ {
215
+ "type": "title",
216
+ "bbox": [
217
+ 0.645,
218
+ 0.156,
219
+ 0.812,
220
+ 0.168
221
+ ],
222
+ "angle": 0,
223
+ "content": "II. STATE OF THE ART"
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.517,
229
+ 0.179,
230
+ 0.937,
231
+ 0.462
232
+ ],
233
+ "angle": 0,
234
+ "content": "So called volunteer computing [19], [20] systems are application-level networks set up so that different people can donate CPU cycles for a joint computing effort. The best known project is SETI@home\\(^4\\), which, from the user's point of view, is a screen-saver which has to be downloaded and installed; when the user's CPU is not busy it performs several signal analysis operations. Some companies related to volunteer computing, such as Popular Power (and others; they are referenced, for example, in [21]) did some experimentation with Java based clients, but none has had commercial success; on the other hand, the SETI@Home program has been open-sourced and extended as the BOINC (Berkeley Open Infrastructure for Network Computing) framework [22]. This kind of volunteer computing has been adapted to evolutionary computation in several occasions, using frameworks such as DREAM [23], which includes a Java-based virtual machine, GOLEM@Home, Electric Sheep [24] and G2-P2P [25]. Both approaches acknowledge that to achieve massive scalability, a peer to peer (P2P) approach is advisable, since it eliminates bottlenecks and single points of failure."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.517,
240
+ 0.464,
241
+ 0.937,
242
+ 0.647
243
+ ],
244
+ "angle": 0,
245
+ "content": "There are mainly two problems in this kind of volunteer networks: first of all, it is important not to abuse the CPU resources of volunteers; secondly, a sufficient number of users is needed in order to be able to do the required computation, which can be a problem on its own if there are too many of them, bringing the network, or at least the solution-collecting node, to its knees. A third problem is that performance prediction is difficult when neither the number of participants nor their individual node performances are known in advance. Finally, fault-tolerance [26] and cheating [27] are also important issues; if the environment is competitive, or any single computation is important, they will have to be taken into account."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.517,
251
+ 0.649,
252
+ 0.937,
253
+ 0.834
254
+ ],
255
+ "angle": 0,
256
+ "content": "In any case, we believe that the best way to obtain a good amount of users is to make it easy for them to participate, using technologies available in their computers, as the browser is. In fact, some suggestions were published (for example, the one of Jim Culbert in his blog [28], and in some mailing lists), and, besides our own [11], there have been some recent papers and reports on similar setups. For instance, W. Langdon has been running for some time an interactive evolution experiment using Javascript in the browser [29], which was mainly intended for achieving high diversity in a fractal snowflake design than high performance. Even more recently, Klein and Spector [30] present a system based on the Push3 language, which is compiled to JavaScript"
257
+ },
258
+ {
259
+ "type": "page_footnote",
260
+ "bbox": [
261
+ 0.097,
262
+ 0.86,
263
+ 0.313,
264
+ 0.872
265
+ ],
266
+ "angle": 0,
267
+ "content": "The published figure was even lower."
268
+ },
269
+ {
270
+ "type": "page_footnote",
271
+ "bbox": [
272
+ 0.518,
273
+ 0.849,
274
+ 0.938,
275
+ 0.873
276
+ ],
277
+ "angle": 0,
278
+ "content": "4See http://setiathome.berkeley.edu/ for downloading the software and some reports."
279
+ }
280
+ ],
281
+ [
282
+ {
283
+ "type": "text",
284
+ "bbox": [
285
+ 0.084,
286
+ 0.069,
287
+ 0.502,
288
+ 0.097
289
+ ],
290
+ "angle": 0,
291
+ "content": "in the browser. This system would be the closest to what we are presenting in this paper."
292
+ },
293
+ {
294
+ "type": "text",
295
+ "bbox": [
296
+ 0.082,
297
+ 0.097,
298
+ 0.501,
299
+ 0.309
300
+ ],
301
+ "angle": 0,
302
+ "content": "The proposed approach could also be considered as parasitic computing since, as stated in Section I, the only participation from the user will be to load a web page and click on a button; in fact, any AJAX-based could use these resources without his acquiescence (and, in any case, it would be desirable to run without causing much trouble). The concept was introduced by Barabási in [31], and followed by others (for instance, Kohring in [32]). In that work they proposed to use the Internet routers to compute a checksum by means of a set of specially crafted packets, whose aggregated result would be used to solve the SAT problem. Anyway, although the concept is interesting, there seems not to be a continuation for this work (at least openly), probably due to its inherent dangers (as analyzed in a paper by Lam et al. [33])."
303
+ },
304
+ {
305
+ "type": "text",
306
+ "bbox": [
307
+ 0.082,
308
+ 0.31,
309
+ 0.502,
310
+ 0.438
311
+ ],
312
+ "angle": 0,
313
+ "content": "The virtual machine embedded into the browser provides a way to easily do that kind of sneaky/parasitic computing, but JavaScript faces the handicap of being an interpreted language, which means that the efficiency of different implementations varies wildly. Moreover, it is not optimized for numerical computation but for object tree management (the so called DOM, document object model) and strings. Nevertheless its wide availability makes us think about considering it, at least as a possibility."
314
+ },
315
+ {
316
+ "type": "title",
317
+ "bbox": [
318
+ 0.113,
319
+ 0.444,
320
+ 0.473,
321
+ 0.457
322
+ ],
323
+ "angle": 0,
324
+ "content": "III. METHODOLOGY AND EXPERIMENTAL SETUP"
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.082,
330
+ 0.462,
331
+ 0.503,
332
+ 0.873
333
+ ],
334
+ "angle": 0,
335
+ "content": "For this experiments we have designed and implemented a client-server program written in Perl (server-side) and Javascript (client-side), communicating using JSON via the XMLHttpRequest object. This object requires that the website and the AJAX requests are served from the same host, which is a constraint. Code for both is available, under the GPL, from http://rubyforge.org/projects/dconrails/. The algorithm runs on the client for a fixed number of generations, as shown in figure 1; running parameters are set from the server and are downloaded from it along with the webpage from which the experiment is run. A preset number of generations is run on the client, after which a request is made to the server with the best individual in the last generation. The algorithm stops and waits for the answer from the server. The server receives the request, stores it in a database, and sends back the best individual stored in the server. This individual is incorporated in the client population, which starts again to run. Several clients acting at the same time make requests asynchronously, using the facilities of the standard Apache web server. The server is thus used as a clearinghouse for interchange of information among the different clients; however, there's no explicit communication or topology among the different nodes running the genetic algorithm. Besides, the fact that the server always contains the best individuals generated so far guarantees that the best solution (with a fixed number of evaluations resolution) available so far is always kept. The server also sends back the number of generations the client should run; which is usually the same number as before,"
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.518,
341
+ 0.069,
342
+ 0.938,
343
+ 0.096
344
+ ],
345
+ "angle": 0,
346
+ "content": "but turns to 0, thus stopping the client, when the stopping condition is met."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.517,
352
+ 0.098,
353
+ 0.937,
354
+ 0.438
355
+ ],
356
+ "angle": 0,
357
+ "content": "Clients leave the experiment by the expeditive method of surfing away to another page or closing the web browser; in tabbed browsers (most browsers nowadays), a tab (or several) can run the experiment while the browser is available for other tasks. When the experiment has been running for a predetermined number of evaluations (which were set, for this experiment, to 750000), all clients get a message to stop running, and change their user interface to a message offering them to reload the (new) experiment and start all over again. Besides, there is a watching daemon running on the server which checks the database for the number of individuals evaluated, and resets the experiment by incrementing the experiment ID by one and eliminating the population. Thus, experiments can run unchecked on a server while this watchdog daemons is running. Several additional utilities are also provided via several webpages, that inform on the state of the experiment, or allow to set the GA parameters. Experimental subjects were gathered by several methods: sending it via email to department and project coworkers, using the URL for the experiment as a Google Talk status line, as a Twitter (http://twitter.com) message, as a blog post, and, eventually, it was picked up by a wildly popular Spanish blog \\(^{5}\\), which managed to gather the highest number of machines."
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.517,
363
+ 0.439,
364
+ 0.939,
365
+ 0.61
366
+ ],
367
+ "angle": 0,
368
+ "content": "The experiment consisted in optimizing the 256-bits Royal Road function, and each instance consisted in a maximum of 750000 evaluations (which were barely enough to find the solution). The algorithm was steady state (with incorporation of the immigrant every 20 generations), with rank-based selection and substitution; every generation, \\(50\\%\\) of the population was generated, substituting the worst \\(50\\%\\) individuals. Crossover priority was set to \\(80\\%\\), and mutation to \\(20\\%\\), changing \\(1\\%\\) of the bits. However, these settings will have no influence on performance, other than the fact that, if the solution is found before the end of the experiment, the users will get bored and change to a new page<sup>6</sup>."
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.517,
374
+ 0.611,
375
+ 0.939,
376
+ 0.752
377
+ ],
378
+ "angle": 0,
379
+ "content": "Data was gathered from two different sources: the watchdog-daemon logs, which mainly gave data about the number of individuals evaluated and the time needed for each experiment, and the Apache daemon log; the relevant lines were extracted just by using grep. It should be noted that the server was not running exclusively the experiment, but doing it along with the usual tasks. The server was a 700-MHz, 1 Gigabyte-RAM machine, with the database in another dual processor, 450-MHz machine. Both machines were running obsolete RedHat 7.x and 9.x Linux operating systems<sup>7</sup>."
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.536,
385
+ 0.753,
386
+ 0.938,
387
+ 0.768
388
+ ],
389
+ "angle": 0,
390
+ "content": "Results of the set experiments will be commented in the"
391
+ },
392
+ {
393
+ "type": "page_footnote",
394
+ "bbox": [
395
+ 0.518,
396
+ 0.783,
397
+ 0.936,
398
+ 0.816
399
+ ],
400
+ "angle": 0,
401
+ "content": "<sup>5</sup>Who posted it at http://www.microsiervos.com/archivo/ordenadores/experimento-computation-distribuida.html."
402
+ },
403
+ {
404
+ "type": "page_footnote",
405
+ "bbox": [
406
+ 0.518,
407
+ 0.817,
408
+ 0.937,
409
+ 0.839
410
+ ],
411
+ "angle": 0,
412
+ "content": "<sup>6</sup>And this is just an example of how social factors in this kind of experiments affect performance."
413
+ },
414
+ {
415
+ "type": "page_footnote",
416
+ "bbox": [
417
+ 0.518,
418
+ 0.839,
419
+ 0.937,
420
+ 0.872
421
+ ],
422
+ "angle": 0,
423
+ "content": "Both machines host our group web server and home pages; we thought it was better to run the experiment in our standard setup instead of a dedicated one."
424
+ },
425
+ {
426
+ "type": "list",
427
+ "bbox": [
428
+ 0.518,
429
+ 0.783,
430
+ 0.937,
431
+ 0.872
432
+ ],
433
+ "angle": 0,
434
+ "content": null
435
+ }
436
+ ],
437
+ [
438
+ {
439
+ "type": "title",
440
+ "bbox": [
441
+ 0.18,
442
+ 0.067,
443
+ 0.427,
444
+ 0.084
445
+ ],
446
+ "angle": 0,
447
+ "content": "Prueba Algoritmo Genetico"
448
+ },
449
+ {
450
+ "type": "image",
451
+ "bbox": [
452
+ 0.179,
453
+ 0.097,
454
+ 0.842,
455
+ 0.287
456
+ ],
457
+ "angle": 0,
458
+ "content": null
459
+ },
460
+ {
461
+ "type": "image_caption",
462
+ "bbox": [
463
+ 0.081,
464
+ 0.299,
465
+ 0.938,
466
+ 0.334
467
+ ],
468
+ "angle": 0,
469
+ "content": "Fig. 1. Experiment running on two different browsers (Firefox and Epiphany) in the same machine. User interface is written in Spanish, since in this initial experiment was addressed to audience speaking that language. The colored (or gray-scale) horizontal bar is a graphical representation of the chromosome. The inset windows (Epiphany) started in second place, and thus, the state of evolution is less advanced."
470
+ },
471
+ {
472
+ "type": "text",
473
+ "bbox": [
474
+ 0.083,
475
+ 0.367,
476
+ 0.175,
477
+ 0.379
478
+ ],
479
+ "angle": 0,
480
+ "content": "next section."
481
+ },
482
+ {
483
+ "type": "title",
484
+ "bbox": [
485
+ 0.184,
486
+ 0.388,
487
+ 0.4,
488
+ 0.4
489
+ ],
490
+ "angle": 0,
491
+ "content": "IV. EXPERIMENTAL RESULTS"
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.082,
497
+ 0.405,
498
+ 0.502,
499
+ 0.562
500
+ ],
501
+ "angle": 0,
502
+ "content": "Eventually, the experiment was running for several days, with different degrees of intensity. Several hundred machines participated in different instances, coming from all over the world, although mainly from Spain. The first question we wanted to answer was, how many machines should we expect in this kind of experiment? BOINC and SETI@home have gathered thousands of machines in its \\(10+\\) year run, but our experiments were limited in time (several hours, at most, if no machine was available, a few minutes if it was), so a limited number of machines should also be expected. The distribution of the number of machines is shown in figure 2."
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.082,
508
+ 0.575,
509
+ 0.501,
510
+ 0.646
511
+ ],
512
+ "angle": 0,
513
+ "content": "The exact figures will vary in every experiment, but it seems clear that the median number of machines will hover around 1/4 of the maximum number. Besides, it is quite easy to obtain 5 clients for a certain number of evaluations; most experiments have less than 10 clients."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.082,
519
+ 0.646,
520
+ 0.501,
521
+ 0.674
522
+ ],
523
+ "angle": 0,
524
+ "content": "On the other hand, the number of evaluations each client contributes are quite different, as is shown in figure 3."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.082,
530
+ 0.675,
531
+ 0.501,
532
+ 0.873
533
+ ],
534
+ "angle": 0,
535
+ "content": "Most clients contribute just a few generations, be it because the browser stops running the program if it takes more than a predetermined number of seconds (which can happen in IE or in Firefox if clients are too slow; usually the predetermined number of generations will be less than this number), the user gets bored and moves on, or because statistically most people join when the experiment has been running for some time and show up only when there are a few evaluations left. Each one of these problems would have to be tackled separately, but the baseline is that, even when a good number of machines joins an experiment, they will do so only for a limited amount of time on average. Besides, these limitations in number of generations translate also to limitations in time, so that experiments will have to be"
536
+ },
537
+ {
538
+ "type": "image",
539
+ "bbox": [
540
+ 0.522,
541
+ 0.369,
542
+ 0.92,
543
+ 0.662
544
+ ],
545
+ "angle": 0,
546
+ "content": null
547
+ },
548
+ {
549
+ "type": "image_caption",
550
+ "bbox": [
551
+ 0.517,
552
+ 0.678,
553
+ 0.938,
554
+ 0.732
555
+ ],
556
+ "angle": 0,
557
+ "content": "Fig. 2. Histogram of the number of machines used in each experiment. The median is 8.5 machines, and the \\(75\\%\\) quartile is placed at 15 machines, with a peak of 44 machines. A single client using several browsers counts as a single machine. It should be noted that not all clients are simultaneously connected."
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.517,
563
+ 0.771,
564
+ 0.938,
565
+ 0.814
566
+ ],
567
+ "angle": 0,
568
+ "content": "designed in a way that sufficient information is transmitted from clients during the expected time they are going to spend in the experiment."
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.517,
574
+ 0.816,
575
+ 0.938,
576
+ 0.873
577
+ ],
578
+ "angle": 0,
579
+ "content": "But different clients have different performance, and it is also interesting to measure what is the average time (and thus average performance) it takes the clients between two communications to the server (that is, \\(20 * 50\\) evaluations +"
580
+ }
581
+ ],
582
+ [
583
+ {
584
+ "type": "image",
585
+ "bbox": [
586
+ 0.179,
587
+ 0.071,
588
+ 0.813,
589
+ 0.397
590
+ ],
591
+ "angle": 0,
592
+ "content": null
593
+ },
594
+ {
595
+ "type": "image_caption",
596
+ "bbox": [
597
+ 0.081,
598
+ 0.413,
599
+ 0.938,
600
+ 0.448
601
+ ],
602
+ "angle": 0,
603
+ "content": "Fig. 3. Histogram of the number of generations all machines participating in the experiment have contributed, cut off at 400 generations. Each generation corresponds to 50 new individuals. Most clients contribute 100 generations or less, with a few contributing more than 200. The median is 16 generations, with the 3rd quartile placed at 55 generations."
604
+ },
605
+ {
606
+ "type": "text",
607
+ "bbox": [
608
+ 0.081,
609
+ 0.481,
610
+ 0.5,
611
+ 0.567
612
+ ],
613
+ "angle": 0,
614
+ "content": "waiting time). This is interesting for two main reasons: server performance will have to be tuned to be able to answer to this level of requests, and second, the generation gap will also have to be fine-tuned so that waiting time and the possibility that the script is blocked due to overtime is minimized. The results obtained in the experiment are shown in figure 4."
615
+ },
616
+ {
617
+ "type": "text",
618
+ "bbox": [
619
+ 0.082,
620
+ 0.569,
621
+ 0.5,
622
+ 0.669
623
+ ],
624
+ "angle": 0,
625
+ "content": "This figure shows that, for this kind of problem, the vast majority of clients will have a gap smaller than two seconds. This quantity will vary for different problems, but the conclusion is that most clients will have high or median performance, with few clients having lower performance. This measure also gives us an estimate of the average performance (2.906 seconds/20 generations)."
626
+ },
627
+ {
628
+ "type": "text",
629
+ "bbox": [
630
+ 0.082,
631
+ 0.671,
632
+ 0.5,
633
+ 0.715
634
+ ],
635
+ "angle": 0,
636
+ "content": "However, at the end of the day the setup is intended to achieve high performance when running an evolutionary computation experiment. This data is presented in figure 5."
637
+ },
638
+ {
639
+ "type": "text",
640
+ "bbox": [
641
+ 0.082,
642
+ 0.717,
643
+ 0.501,
644
+ 0.875
645
+ ],
646
+ "angle": 0,
647
+ "content": "This figure is quite similar to fig 4. Median is at 1000 seconds, with a minimum at 292 and 3rd quartile at 2323; \\(75\\%\\) of runs will last less than 2323 seconds. Taking into account that the average 20-generation step is 2.906 seconds, and a single experiment needs 375 such steps, the average single machine run would take 1089.75 seconds; this means that the maximum speedup achieved is \\(1089.75 / 292 = 3.73\\) clients running simultaneously, and the median is approximately a single average machine. This will probably vary for experiments of different duration, but, on average, we could say that significant (albeit small) speedups can be"
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.517,
653
+ 0.481,
654
+ 0.938,
655
+ 0.68
656
+ ],
657
+ "angle": 0,
658
+ "content": "achieved using spontaneous volunteer computing. In general, however, several machines will sequentially provide CPU cycles to an experiment, adding up to a single machine doing all the work. In general also, the fact that there are up to 44 machines working in a single experiment, or that the range of running times can vary in a factor of up to one hundred, indicates that, for this experiment, no bottleneck has been found. Of course, more simultaneous machines will have to be tested to find the limit. Finally, the fact that all contributions are volunteer means that the evaluation rate is not constant, yielding figures like fig 6, where the steepness of each line is roughly equivalent to the evaluation speed, since the \\( x \\) axis corresponds to time, and the \\( y \\) axis number of individuals evaluated."
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.539,
664
+ 0.686,
665
+ 0.916,
666
+ 0.699
667
+ ],
668
+ "angle": 0,
669
+ "content": "V. CONCLUSIONS, DISCUSSION AND FUTURE WORK"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.517,
675
+ 0.703,
676
+ 0.938,
677
+ 0.845
678
+ ],
679
+ "angle": 0,
680
+ "content": "While in previous papers [11] we proved that this kind of AJAX based, volunteer, and potentially sneaky, computation could be used profitably for performing genetic algorithm experiments, in this paper we have proved that, without an expensive or far-fetched setup, it can achieve high performance, equivalent, at most, to several computers of average performance. The code used to perform the experiment is publicly available and is modular so that creating different experiments is just a matter of writing a new JavaScript fitness function and tuning the GA parameters accordingly."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.518,
686
+ 0.845,
687
+ 0.938,
688
+ 0.874
689
+ ],
690
+ "angle": 0,
691
+ "content": "The experiments have proved that there is a good amount of computational power that can be easily tapped and used for"
692
+ }
693
+ ],
694
+ [
695
+ {
696
+ "type": "image",
697
+ "bbox": [
698
+ 0.178,
699
+ 0.075,
700
+ 0.813,
701
+ 0.527
702
+ ],
703
+ "angle": 0,
704
+ "content": null
705
+ },
706
+ {
707
+ "type": "image_caption",
708
+ "bbox": [
709
+ 0.081,
710
+ 0.548,
711
+ 0.938,
712
+ 0.584
713
+ ],
714
+ "angle": 0,
715
+ "content": "Fig. 6. Plot showing the number of individuals evaluated vs. time for a few dozens experiments; time is plotted up to 600 seconds only. As is seen, some experiments have a more or less constant evaluation rate (constant inclination), while other are more step-like with clients leaving and joining the experiment all the time."
716
+ },
717
+ {
718
+ "type": "text",
719
+ "bbox": [
720
+ 0.081,
721
+ 0.616,
722
+ 0.5,
723
+ 0.729
724
+ ],
725
+ "angle": 0,
726
+ "content": "evolutionary computation experiments, however, the nature of AGAJAJ constrains also the way users donate computing power, as well as the number of clients available for an experiment. In this paper we have found some figures, which will undoubtedly vary for other experiments; however, the general shape of the curves will probably be the same, following a very steep decrease from the maximum values obtained."
727
+ },
728
+ {
729
+ "type": "text",
730
+ "bbox": [
731
+ 0.081,
732
+ 0.731,
733
+ 0.504,
734
+ 0.875
735
+ ],
736
+ "angle": 0,
737
+ "content": "The GA, being asynchronous, faces some problems that have not been tackled in this paper. What is the best approach to preserve diversity? To generate a new population in each client, and receive immigrants as soon as possible, which are incorporated into the population? Or is it better to create new client populations based on existing populations? What is really the algorithmic contribution of new clients? These issues will be explored as future work. We will also try to measure the limits of this technology, and test the impact of servers of varying performance and workload on overall"
738
+ },
739
+ {
740
+ "type": "text",
741
+ "bbox": [
742
+ 0.517,
743
+ 0.616,
744
+ 0.937,
745
+ 0.658
746
+ ],
747
+ "angle": 0,
748
+ "content": "performance. Eventually, we will also try to perform a sneaky experiment, to check what kind of performance can be expected in that kind of setups."
749
+ },
750
+ {
751
+ "type": "text",
752
+ "bbox": [
753
+ 0.517,
754
+ 0.658,
755
+ 0.939,
756
+ 0.788
757
+ ],
758
+ "angle": 0,
759
+ "content": "Another venue of work will be to examine the algorithmic performance of AGAJAJ; even as new clients are added to an experiment, what's the improvement obtained from them? In order to check that, a controlled experiment using known computers will be used, adding them one at a time, so that the real impact on the genetic algorithm is evaluated. Once that is know, it would be interesting to experiment with adaptive client parameters, instead of the one-size-fits-all parameter settings used so far."
760
+ },
761
+ {
762
+ "type": "title",
763
+ "bbox": [
764
+ 0.643,
765
+ 0.798,
766
+ 0.813,
767
+ 0.81
768
+ ],
769
+ "angle": 0,
770
+ "content": "ACKNOWLEDGEMENTS"
771
+ },
772
+ {
773
+ "type": "text",
774
+ "bbox": [
775
+ 0.517,
776
+ 0.816,
777
+ 0.939,
778
+ 0.875
779
+ ],
780
+ "angle": 0,
781
+ "content": "This paper has been funded in part by the Spanish MI-CYT project NoHNES (Spanish Ministerio de Educación y Ciencia - TIN2007-68083) and the Junta de Andalucía P06-TIC-02025. We are also grateful to the editors of the"
782
+ }
783
+ ],
784
+ [
785
+ {
786
+ "type": "image",
787
+ "bbox": [
788
+ 0.086,
789
+ 0.071,
790
+ 0.498,
791
+ 0.365
792
+ ],
793
+ "angle": 0,
794
+ "content": null
795
+ },
796
+ {
797
+ "type": "image_caption",
798
+ "bbox": [
799
+ 0.082,
800
+ 0.38,
801
+ 0.502,
802
+ 0.478
803
+ ],
804
+ "angle": 0,
805
+ "content": "Fig. 4. Distribution of gap time, that is, time among two request from a particular client; it has been rounded to the closest number of seconds (since the webserver logs it has been extracted from have that resolution). Most clients take 3 second or less to process 20 generations, with a majority taking 2 or less. Gaps bigger than 10-15 seconds are probably non-significant, in fact, negative gaps and those bigger than 100 have been eliminated from the data set. For this set of experiments, the median is at 2. \\( x \\) axis is logarithmic, to emphasize the fact that the distribution of client performance falls very fast, although a different center should be expected for a different problem."
806
+ },
807
+ {
808
+ "type": "text",
809
+ "bbox": [
810
+ 0.082,
811
+ 0.523,
812
+ 0.502,
813
+ 0.582
814
+ ],
815
+ "angle": 0,
816
+ "content": "Microsiervos.com blog for talking about our experiment, and to the readers of that article for participating in it. We are also grateful to the anonymous people who have known about the experiment via several possible ways<sup>8</sup> and participating in it."
817
+ },
818
+ {
819
+ "type": "title",
820
+ "bbox": [
821
+ 0.244,
822
+ 0.599,
823
+ 0.341,
824
+ 0.611
825
+ ],
826
+ "angle": 0,
827
+ "content": "REFERENCES"
828
+ },
829
+ {
830
+ "type": "ref_text",
831
+ "bbox": [
832
+ 0.093,
833
+ 0.625,
834
+ 0.501,
835
+ 0.657
836
+ ],
837
+ "angle": 0,
838
+ "content": "[1] D. P. Anderson, J. Cobb, E. Korpela, M. Lebofsky, and D. Werthimer, “SETI@home: an experiment in public-resource computing,” Commun. ACM, vol. 45, no. 11, pp. 56–61, 2002."
839
+ },
840
+ {
841
+ "type": "ref_text",
842
+ "bbox": [
843
+ 0.093,
844
+ 0.659,
845
+ 0.501,
846
+ 0.689
847
+ ],
848
+ "angle": 0,
849
+ "content": "[2] R. Steinmetz and K. Wehrle, Eds., Peer-to-Peer Systems and Applications, ser. Lecture Notes in Computer Science, vol. 3485. Springer, 2005."
850
+ },
851
+ {
852
+ "type": "ref_text",
853
+ "bbox": [
854
+ 0.093,
855
+ 0.691,
856
+ 0.501,
857
+ 0.712
858
+ ],
859
+ "angle": 0,
860
+ "content": "[3] Gilorien, DHTML and JavaScript. Upper Saddle River, NJ 07458, USA: Prentice-Hall PTR, 2000."
861
+ },
862
+ {
863
+ "type": "ref_text",
864
+ "bbox": [
865
+ 0.093,
866
+ 0.713,
867
+ 0.501,
868
+ 0.756
869
+ ],
870
+ "angle": 0,
871
+ "content": "[4] R. Shah, \"A beginner's guide to JavaScript,\" JavaWorld: IDG's magazine for the Java community, vol. 1, no. 1, pp. ??-??, Mar. 1996. [Online]. Available: http://www.javaworld.com/javaworld/jw-03-1996/jw-03-javascriptintro.htm"
872
+ },
873
+ {
874
+ "type": "ref_text",
875
+ "bbox": [
876
+ 0.093,
877
+ 0.757,
878
+ 0.501,
879
+ 0.788
880
+ ],
881
+ "angle": 0,
882
+ "content": "[5] D. Flanagan, JavaScript Pocket Reference (2nd Edition). O'Reilly, October 2002. [Online]. Available: http://www.amazon.fr/exec/obidos/ASIN/0596004117/citeulike04-21"
883
+ },
884
+ {
885
+ "type": "ref_text",
886
+ "bbox": [
887
+ 0.093,
888
+ 0.789,
889
+ 0.501,
890
+ 0.832
891
+ ],
892
+ "angle": 0,
893
+ "content": "[6] ECMA, ECMA-262: ECMAScript Language Specification, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Dec. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-262.HTM"
894
+ },
895
+ {
896
+ "type": "list",
897
+ "bbox": [
898
+ 0.093,
899
+ 0.625,
900
+ 0.501,
901
+ 0.832
902
+ ],
903
+ "angle": 0,
904
+ "content": null
905
+ },
906
+ {
907
+ "type": "text",
908
+ "bbox": [
909
+ 0.082,
910
+ 0.849,
911
+ 0.499,
912
+ 0.873
913
+ ],
914
+ "angle": 0,
915
+ "content": "<sup>8</sup>For instance, this article in my own blog http://atalaya.blogalia.com/historias/53480"
916
+ },
917
+ {
918
+ "type": "image",
919
+ "bbox": [
920
+ 0.523,
921
+ 0.071,
922
+ 0.936,
923
+ 0.365
924
+ ],
925
+ "angle": 0,
926
+ "content": null
927
+ },
928
+ {
929
+ "type": "image_caption",
930
+ "bbox": [
931
+ 0.518,
932
+ 0.38,
933
+ 0.938,
934
+ 0.435
935
+ ],
936
+ "angle": 0,
937
+ "content": "Fig. 5. Distribution of running times for a fixed amount of evaluations. Some outliers have been cut off; approximately \\(10\\%\\) of runs took more than 10000 seconds. The mode is between 500 and 1000 seconds, and most runs end before 2000 seconds. Time starts to count from the moment the first client connects to the server."
938
+ },
939
+ {
940
+ "type": "ref_text",
941
+ "bbox": [
942
+ 0.527,
943
+ 0.471,
944
+ 0.938,
945
+ 0.514
946
+ ],
947
+ "angle": 0,
948
+ "content": "[7] ——, ECMA-290: ECMAScript Components Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-290.HTM"
949
+ },
950
+ {
951
+ "type": "ref_text",
952
+ "bbox": [
953
+ 0.527,
954
+ 0.515,
955
+ 0.938,
956
+ 0.559
957
+ ],
958
+ "angle": 0,
959
+ "content": "[8] ——, ECMA-327: ECMAScript 3: Compact Profile, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2001. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-327.htm"
960
+ },
961
+ {
962
+ "type": "ref_text",
963
+ "bbox": [
964
+ 0.528,
965
+ 0.559,
966
+ 0.938,
967
+ 0.602
968
+ ],
969
+ "angle": 0,
970
+ "content": "[9] ——, ECMA-357: ECMAScript for XML (E4X) Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2004. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-357.htm"
971
+ },
972
+ {
973
+ "type": "ref_text",
974
+ "bbox": [
975
+ 0.522,
976
+ 0.602,
977
+ 0.938,
978
+ 0.635
979
+ ],
980
+ "angle": 0,
981
+ "content": "[10] Wikipedia, “Ajax — wikipedia, la enciclopedia libre,” 2006, [Internet; descargado 14-mayo-2006]. [Online]. Available: \\(\\backslash\\)url{http://es.wikipedia.org/w/index.php?title=AJAX&oldid=3168254}"
982
+ },
983
+ {
984
+ "type": "ref_text",
985
+ "bbox": [
986
+ 0.522,
987
+ 0.634,
988
+ 0.938,
989
+ 0.656
990
+ ],
991
+ "angle": 0,
992
+ "content": "[11] \"Towards Open Grid Services Architecture,\" http://www.globus.org/ogsa/."
993
+ },
994
+ {
995
+ "type": "ref_text",
996
+ "bbox": [
997
+ 0.522,
998
+ 0.657,
999
+ 0.938,
1000
+ 0.711
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "[12] J. J. Merelo, A. M. García, J. L. J. Laredo, J. Lupión, and F. Tricas, \"Browser-based distributed evolutionary computation: performance and scaling behavior,\" in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2851-2858."
1004
+ },
1005
+ {
1006
+ "type": "ref_text",
1007
+ "bbox": [
1008
+ 0.522,
1009
+ 0.711,
1010
+ 0.938,
1011
+ 0.764
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "[13] M. García-Arenas, J. G. Castellano, P. A. Castillo, J. Carpio, M. Cillero, J.-J. Merelo-Guervós, A. Prieto, V. Rivas, and G. Romero, \"Speedup measurements for a distributed evolutionary algorithm that uses Jini,\" in XI Jornadas de Paralelismo, U. d. G. Depto. ATC, Ed., 2000, pp. 241-246."
1015
+ },
1016
+ {
1017
+ "type": "ref_text",
1018
+ "bbox": [
1019
+ 0.522,
1020
+ 0.765,
1021
+ 0.938,
1022
+ 0.861
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "[14] C. Setzkorn and R. C. Paton, \"Javaspaces - an affordable technology for the simple implementation of reusable parallel evolutionary algorithms,\" in Proceedings of the International Symposium on Knowledge Exploration in Life Science Informatics, KELSI 2004, ser. Lecture Notes in Artificial Intelligence, J. A. López, E. Benfenati, and W. Dubitzky, Eds., vol. 3303. Milan, Italy: Springer, 25-26 Nov. 2004, pp. 151-160. [Online]. Available: \\(\\backslash\\)url{http://springerlink.metapress.com/openurl.asp?genre=article;issn=0302-9743;volume=3303;spage=151}"
1026
+ },
1027
+ {
1028
+ "type": "ref_text",
1029
+ "bbox": [
1030
+ 0.522,
1031
+ 0.862,
1032
+ 0.938,
1033
+ 0.873
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "[15] F. S. Chong and W. B. Langdon, \"Java based distributed genetic"
1037
+ },
1038
+ {
1039
+ "type": "list",
1040
+ "bbox": [
1041
+ 0.522,
1042
+ 0.471,
1043
+ 0.938,
1044
+ 0.873
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": null
1048
+ }
1049
+ ],
1050
+ [
1051
+ {
1052
+ "type": "ref_text",
1053
+ "bbox": [
1054
+ 0.114,
1055
+ 0.07,
1056
+ 0.502,
1057
+ 0.146
1058
+ ],
1059
+ "angle": 0,
1060
+ "content": "programming on the internet,\" in Proceedings of the Genetic and Evolutionary Computation Conference, W. Banzhaf, J. Daida, A. E. Eiben, M. H. Garzon, V. Honavar, M. Jakiela, and R. E. Smith, Eds., vol. 2. Orlando, Florida, USA: Morgan Kaufmann, 13-17 Jul. 1999, p. 1229, full text in technical report CSRP-99-7. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/p.chong/DGPposter.pdf"
1061
+ },
1062
+ {
1063
+ "type": "ref_text",
1064
+ "bbox": [
1065
+ 0.086,
1066
+ 0.147,
1067
+ 0.502,
1068
+ 0.178
1069
+ ],
1070
+ "angle": 0,
1071
+ "content": "[16] D. S. Myers and M. P. Cummings, \"Necessity is the mother of invention: a simple grid computing system using commodity tools,\" J. Parallel Distrib. Comput., vol. 63, no. 5, pp. 578-589, 2003."
1072
+ },
1073
+ {
1074
+ "type": "ref_text",
1075
+ "bbox": [
1076
+ 0.085,
1077
+ 0.179,
1078
+ 0.501,
1079
+ 0.231
1080
+ ],
1081
+ "angle": 0,
1082
+ "content": "[17] J. L. J. Laredo, P. A. Castillo, B. Paechter, A. M. Mora, E. Alfaro-Cid, A. I. Esparcia-Alcazar, and J. J. Merelo, \"Empirical validation of a gossiping communication mechanism for parallel eas,\" ser. Lecture Notes in Computer Science, M. G. et al., Ed., vol. 4448. Springer, 2007, pp. 129-136."
1083
+ },
1084
+ {
1085
+ "type": "ref_text",
1086
+ "bbox": [
1087
+ 0.086,
1088
+ 0.232,
1089
+ 0.502,
1090
+ 0.294
1091
+ ],
1092
+ "angle": 0,
1093
+ "content": "[18] E. A. Eiben, M. Schoenauer, J. L. J. Laredo, P. A. Castillo, A. M. Mora, and J. J. Merelo, \"Exploring selection mechanisms for an agent-based distributed evolutionary algorithm,\" in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2801-2808."
1094
+ },
1095
+ {
1096
+ "type": "ref_text",
1097
+ "bbox": [
1098
+ 0.086,
1099
+ 0.295,
1100
+ 0.501,
1101
+ 0.327
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "[19] E. Cantú-Paz, “Migration policies, selection pressure, and parallel evolutionary algorithms,” Journal of Heuristics, vol. 7, no. 4, pp. 311–334, 2001."
1105
+ },
1106
+ {
1107
+ "type": "ref_text",
1108
+ "bbox": [
1109
+ 0.086,
1110
+ 0.327,
1111
+ 0.501,
1112
+ 0.37
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "[20] L. F. G. Sarmenta and S. Hirano, \"Bayanihan: building and studying Web-based volunteer computing systems using Java,\" Future Generation Computer Systems, vol. 15, no. 5-6, pp. 675-686, 1999. [Online]. Available: citeseer.nj.nec.com/sarmenta99bayanihan.html"
1116
+ },
1117
+ {
1118
+ "type": "ref_text",
1119
+ "bbox": [
1120
+ 0.086,
1121
+ 0.371,
1122
+ 0.501,
1123
+ 0.422
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "[21] D. P. Anderson, E. Korpela, and R. Walton, \"High-performance task distribution for volunteer computing,\" in E-SCIENCE '05: Proceedings of the First International Conference on e-Science and Grid Computing. Washington, DC, USA: IEEE Computer Society, 2005, pp. 196-203."
1127
+ },
1128
+ {
1129
+ "type": "ref_text",
1130
+ "bbox": [
1131
+ 0.086,
1132
+ 0.423,
1133
+ 0.501,
1134
+ 0.467
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "[22] P. Cappello and D. Mourloukos, “A scalable, robust network for parallel computing,” in JGI '01: Proceedings of the 2001 joint ACM-ISCOPE conference on Java Grande. New York, NY, USA: ACM Press, 2001, pp. 78–86."
1138
+ },
1139
+ {
1140
+ "type": "ref_text",
1141
+ "bbox": [
1142
+ 0.086,
1143
+ 0.467,
1144
+ 0.501,
1145
+ 0.499
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": "[23] D. Anderson, \"Boinc: A system for public-resource computing and storage,\" in Proceedings of the 5th IEEE/ACM International Workshop on Grid Computing, Pittsburgh, USA, 2004."
1149
+ },
1150
+ {
1151
+ "type": "ref_text",
1152
+ "bbox": [
1153
+ 0.086,
1154
+ 0.499,
1155
+ 0.501,
1156
+ 0.626
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "[24] M. Arenas, P. Collet, A. Eiben, M. Jelasy, J. J. Merelo, B. Paechter, M. Preuß, and M. Schoenauer, \"A framework for distributed evolutionary algorithms,\" ser. Lecture Notes in Computer Science, LNCS, J.-J. M. Guervós, P. Adamidis, H.-G. Beyer, J.-L. Fernández-Villacanas, and H.-P. Schwefel, Eds., no. 2439. Springer-Verlag, September 2002, keywords: Implementation::Parallel EAs, Related::Simulated Adaptive Behavior, Technique::Advanced techniques - miscellaneous, Technique::Evolutionary computing - general, Technique::Evolutionary computing - miscellaneous, CiteSeer context: http://citeseer.nj.nec.com/context/2189070/0, available from http://link.springer.de/link/service/series/0558/papers/2439/243900665.pdf, Metapress URL: http://www.springerlink.com/link.asp?id=h4n29kbl69jvab4c."
1160
+ },
1161
+ {
1162
+ "type": "ref_text",
1163
+ "bbox": [
1164
+ 0.086,
1165
+ 0.627,
1166
+ 0.501,
1167
+ 0.657
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "[25] Wikipedia, \"Electric sheep — wikipedia, the free encyclopedia,\" 2007, http://en.wikipedia.org/w/index.php?title=Electric_Sheep&oldid=159677937."
1171
+ },
1172
+ {
1173
+ "type": "ref_text",
1174
+ "bbox": [
1175
+ 0.086,
1176
+ 0.659,
1177
+ 0.502,
1178
+ 0.712
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "[26] R. Mason and W. Kelly, “G2-P2P: a fully decentralised fault-tolerant cycle-stealing framework,” in ACSW Frontiers '05: Proceedings of the 2005 Australasian workshop on Grid computing and e-research. Darlinghurst, Australia, Australia: Australian Computer Society, Inc., 2005, pp. 33-39."
1182
+ },
1183
+ {
1184
+ "type": "ref_text",
1185
+ "bbox": [
1186
+ 0.086,
1187
+ 0.712,
1188
+ 0.502,
1189
+ 0.766
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "[27] F. F. D. Vega, \"A fault tolerant optimization algorithm based on evolutionary computation,\" in DEPCOS-RELCOMEX '06: Proceedings of the International Conference on Dependability of Computer Systems (DEPCOS-RELCOMEX'06). Washington, DC, USA: IEEE Computer Society, 2006, pp. 335-342."
1193
+ },
1194
+ {
1195
+ "type": "ref_text",
1196
+ "bbox": [
1197
+ 0.086,
1198
+ 0.766,
1199
+ 0.501,
1200
+ 0.807
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "[28] L. F. G. Sarmenta, \"Sabotage-tolerance mechanisms for volunteer computing systems,\" Future Generation Computer Systems, vol. 18, no. 4, pp. 561-572, 2002. [Online]. Available: citeseer.nj.nec.com/449803.html"
1204
+ },
1205
+ {
1206
+ "type": "ref_text",
1207
+ "bbox": [
1208
+ 0.086,
1209
+ 0.808,
1210
+ 0.501,
1211
+ 0.83
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "[29] J. Culbert, \"Ajax and distributed computation thoughts,\" Published at http://culbert.net/?p=6, March 2006, Ultimo acceso Mayo 2006."
1215
+ },
1216
+ {
1217
+ "type": "ref_text",
1218
+ "bbox": [
1219
+ 0.086,
1220
+ 0.831,
1221
+ 0.501,
1222
+ 0.873
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "[30] W. B. Langdon, “Pfeiffer – A distributed open-ended evolutionary system,” in AISB'05: Proceedings of the Joint Symposium on Socially Inspired Computing (METAS 2005), B. Edmonds, N. Gilbert, S. Gustafson, D. Hales, and N. Krasnogor, Eds., University of"
1226
+ },
1227
+ {
1228
+ "type": "list",
1229
+ "bbox": [
1230
+ 0.085,
1231
+ 0.07,
1232
+ 0.502,
1233
+ 0.873
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": null
1237
+ },
1238
+ {
1239
+ "type": "ref_text",
1240
+ "bbox": [
1241
+ 0.552,
1242
+ 0.07,
1243
+ 0.937,
1244
+ 0.103
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "Hertfordshire, Hatfield, UK, 12-15 Apr. 2005, pp. 7-13, sSAISB 2005 Convention. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/wbl_metas2005.pdf"
1248
+ },
1249
+ {
1250
+ "type": "ref_text",
1251
+ "bbox": [
1252
+ 0.522,
1253
+ 0.104,
1254
+ 0.937,
1255
+ 0.145
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "[31] J. Klein and L. Spector, \"Unwitting distributed genetic programming via asynchronous javascript and xml,\" in GECCO '07: Proceedings of the 9th annual conference on Genetic and evolutionary computation. New York, NY, USA: ACM, 2007, pp. 1628-1635."
1259
+ },
1260
+ {
1261
+ "type": "ref_text",
1262
+ "bbox": [
1263
+ 0.522,
1264
+ 0.146,
1265
+ 0.937,
1266
+ 0.188
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "[32] A.-L. Barabasi, V. W. Freeh, H. Jeong, and J. B. Brockman, \"Parasitic computing,\" Nature, vol. 412, no. 6850, pp. 894-897, August 2001. [Online]. Available: http://www.nature.com/cgi-taf/DynaPage.taf?file=/nature/journal/v412/n6850/abs/412894a0\\_fs.html"
1270
+ },
1271
+ {
1272
+ "type": "ref_text",
1273
+ "bbox": [
1274
+ 0.522,
1275
+ 0.189,
1276
+ 0.937,
1277
+ 0.23
1278
+ ],
1279
+ "angle": 0,
1280
+ "content": "[33] G. A. Kohring, \"Implicit simulations using messaging protocols,\" COMPUTERS AND PHYSICS, vol. 14, p. 203, 2003. [Online]. Available: http://www.citebase.org/abstract?id=oai:arXiv.org:cs/0208021"
1281
+ },
1282
+ {
1283
+ "type": "ref_text",
1284
+ "bbox": [
1285
+ 0.522,
1286
+ 0.231,
1287
+ 0.937,
1288
+ 0.283
1289
+ ],
1290
+ "angle": 0,
1291
+ "content": "[34] V. T. Lam, S. Antonatos, P. Akritidis, and K. G. Anagnostakis, \"Puppetnets: misusing web browsers as a distributed attack infrastructure,\" in CCS '06: Proceedings of the 13th ACM conference on Computer and communications security. New York, NY, USA: ACM, 2006, pp. 221-234."
1292
+ },
1293
+ {
1294
+ "type": "list",
1295
+ "bbox": [
1296
+ 0.522,
1297
+ 0.07,
1298
+ 0.937,
1299
+ 0.283
1300
+ ],
1301
+ "angle": 0,
1302
+ "content": null
1303
+ }
1304
+ ]
1305
+ ]
2401.17xxx/2401.17234/89dd36c6-55c5-4327-8671-6a0a7eb86d8c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5db779d67aae776a7879a930b18b1a8bef0161e29471898db89cdf4157932b68
3
+ size 176721
2401.17xxx/2401.17234/full.md ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Asynchronous Distributed Genetic Algorithms with Javascript and JSON
2
+
3
+ Juan Julián Merelo-Guervós, Pedro A. Castillo, JLJ Laredo, A. Mora García, A. Prieto
4
+
5
+ Abstract—In a connected world, spare CPU cycles are up for grabs, if you only make its obtention easy enough. In this paper we present a distributed evolutionary computation system that uses the computational capabilities of the ubiquitous web browser. Using Asynchronous Javascript and JSON (Javascript Object Notation, a serialization protocol) allows anybody with a web browser (that is, mostly everybody connected to the Internet) to participate in a genetic algorithm experiment with little effort, or none at all. Since, in this case, computing becomes a social activity and is inherently unpredictable, in this paper we will explore the performance of this kind of virtual computer by solving simple problems such as the Royal Road function and analyzing how many machines and evaluations it yields. We will also examine possible performance bottlenecks and how to solve them, and, finally, issue some advice on how to set up this kind of experiments to maximize turnout and, thus, performance.
6
+
7
+ # I. INTRODUCTION
8
+
9
+ Application-level networks (ALNs), are configured as a set of clients/server (servers) that can provide their spare CPU cycles by means of a downloadable application, establishing a distributed computation network which can provide ad hoc computational power. Some ALN like SETI@Home have been quite successful [1], creating a virtual computer that has processed a good amount of teraflops, while other experiments such as Popular Power (and most others, in fact) have not [?].
10
+
11
+ The key feature of these application-level networks is the simplicity of use: we believe that the best way to obtain the participation of as many users as possible is to make it as very simple. In particular, it will be easier if they do not need to download a special application (such as a screen-saver) to participate, as is needed in BOINC, the successor to SETI@Home. For this reason, we are exploring the use of applications that are commonly installed in the user's computer, such as the web browser, which is available even in PDAs and some cellular phones<sup>1</sup>. Moreover, most browsers natively include a JavaScript interpreter [2], [3], [4] or virtual machine. JavaScript is an interpreted language<sup>2</sup>, initially proposed by Netscape, and later adopted as an ECMA standard [5], [6], [7], [8]. In this way, most browsers are compatible, at least at a language level (not always at the level of browser objects, where there exists a reasonable compatibility, anyway). Most browser also include elements such as a Java virtual machine and a Flash plugin, which,
12
+
13
+ The authors are with the Departamento de Arquitectura y Tecnología de Computadores, University of Granada (Spain), corresponding email jj@merelo.net
14
+ <sup>1</sup>Whose computing power is similar to four-year-old desktop machines
15
+ 2which has nothing to do with Java, other than the name and its syntactic similarity
16
+
17
+ with ActionScript, has more or less the same capabilities. However, there are several disadvantages to these: they might or might not be present (they are not native), they are noisy in the sense that, since they act as plugins, their execution is always noted by the user, their programs are more heavyweight than simple text code, and, finally, its integration with the browser is more awkward than the seamless integration that JavaScript offers. In any case, most things said here for JavaScript also apply to these and other plugins.
18
+
19
+ By itself, an interpreted language is not enough for creating a metacomputer if there is no way to convey information back from the client to the server in a seamless way. The ability to use the virtual machine included in browsers for distributed computing appeared with the XMLHttpRequest object, which allows asynchronous petitions to the server, in what has been called AJAX, Asynchronous JavaScript and XML [9]. AJAX is just one of the possible ways to perform asynchronous client-server communication, the others being AJAX (Asynchronous Javascript and JSON), and remoting using applets or embedded objects. However, it is quite popular, and a wide user base and documentation is available for it, using any of these asynchronous client/server communication protocols. The traditional client/server model becomes then more egalitarian, or closer to a peer to peer model, since a bidirectional communication line appears: the browser can make calls to the server, do some computation and later send the results to the server.
20
+
21
+ AJAX (and AJAJ, which differ only in the way data is serialized) works as follows: the XMLHttpRequest is provided with a request to the server and a pointer to a callback function. The request generates an event, which is asynchronously activated when a reply is received making use of the callback function. Following this approach the browser is not locked, providing a way to program applications that are similar to the ones used at the desktop, in the sense that they do not have to wait for the application response to be loaded and rendered on the screen every time a request is made. It also means that a user clicking on the Submit button is no longer needed to initiate communication with the server; any JavaScript thread can do so, with the constraint that the only server they can communicate with is the one that hosts the page the script is included in. On the other side, this provides a way to use the browser for application level networks that create distributed computing systems, since the request-response loop does not need the user participation in a fashion very similar to any other distributed computing application; these ALN can be
22
+
23
+ controlled from the server with any programming language. Of course, it can also be combined with other distributed programming frameworks based on OpenGrid [10] or other distributed computing paradigms.
24
+
25
+ We previously used Ruby on Rails for making this kind of distributed AJAX application [11]; however, performance and scaling behavior were not too satisfactory mainly for two reasons: the nature of the Ruby on Rails server, which required the setup of a load-balancing server, and the (probably determinant) fact that part of the genetic algorithm was done on the server, with a low degree of parallelism and thus a high impact on performance. Latest experiments after publication yielded a maximum of 40 chromosome evaluation per second<sup>3</sup>.
26
+
27
+ In this paper, following the same concept of distributed evolutionary computation on the browser via AJAX, we have redesigned the application using Perl and PostgreSQL (on the server) and Javascript, as before, on the client, and renamed it AGAJAJ (pronounce it A-gah-yai), which stands for Asynchronous Genetic Algorithm with Javascript and JSON; in this case, the genetic algorithm (solving the well known Royal Road problem) is carried out only on the clients, with the server used just for interchange of information among them. We will perform several experimentes in which clients donate computing power by just loading a web page to find out what kind of performance we can expect from this kind of setup, from the number of machines that will be made available by their users to the number of evaluations each one of them can perform; in these experiments, we have improved two orders of magnitude the performance achieved in the previous experiments which used Ruby on Rails, and also the number of concurrent machines available to perform them, showing that this kind of setup is ready to take more computing-intensive experiments without the need of an expensive server setup.
28
+
29
+ This paper follows our group's line of work on distributing evolutionary computation applications, which has already been adapted to several parallel and distributed computing paradigms (for example, Jini [12], JavaSpaces [13], Java with applets [14], service oriented architectures [15] and P2P systems [16], [17]). Evolutionary computation is quite adequate for this kind of distributed environment for several reasons: it is a population based method, so computation can be distributed among nodes (via distribution of population) in many different ways; besides, some works suggest that there are synergies among evolutionary algorithms and parallelization: isolated populations that are connected only eventually avoid the loss of diversity and produce better solutions in fewer time obtaining, in some cases, superlinear accelerations [18].
30
+
31
+ Of course, with a suitable work division method, many other algorithms could be adapted to browser-based distributed computation; however, in this paper will solve only genetic algorithms, and concentrate on raw performance, rather than algorithmic behavior.
32
+
33
+ The rest of the paper is organized as follows: next section concentrates on the application of volunteer/involuntary computing to evolutionary computation; the setup is described in section III. Experiments and results are shown in section IV and discussed in V, along with future lines of work.
34
+
35
+ # II. STATE OF THE ART
36
+
37
+ So called volunteer computing [19], [20] systems are application-level networks set up so that different people can donate CPU cycles for a joint computing effort. The best known project is SETI@home $^4$ , which, from the user's point of view, is a screen-saver which has to be downloaded and installed; when the user's CPU is not busy it performs several signal analysis operations. Some companies related to volunteer computing, such as Popular Power (and others; they are referenced, for example, in [21]) did some experimentation with Java based clients, but none has had commercial success; on the other hand, the SETI@Home program has been open-sourced and extended as the BOINC (Berkeley Open Infrastructure for Network Computing) framework [22]. This kind of volunteer computing has been adapted to evolutionary computation in several occasions, using frameworks such as DREAM [23], which includes a Java-based virtual machine, GOLEM@Home, Electric Sheep [24] and G2-P2P [25]. Both approaches acknowledge that to achieve massive scalability, a peer to peer (P2P) approach is advisable, since it eliminates bottlenecks and single points of failure.
38
+
39
+ There are mainly two problems in this kind of volunteer networks: first of all, it is important not to abuse the CPU resources of volunteers; secondly, a sufficient number of users is needed in order to be able to do the required computation, which can be a problem on its own if there are too many of them, bringing the network, or at least the solution-collecting node, to its knees. A third problem is that performance prediction is difficult when neither the number of participants nor their individual node performances are known in advance. Finally, fault-tolerance [26] and cheating [27] are also important issues; if the environment is competitive, or any single computation is important, they will have to be taken into account.
40
+
41
+ In any case, we believe that the best way to obtain a good amount of users is to make it easy for them to participate, using technologies available in their computers, as the browser is. In fact, some suggestions were published (for example, the one of Jim Culbert in his blog [28], and in some mailing lists), and, besides our own [11], there have been some recent papers and reports on similar setups. For instance, W. Langdon has been running for some time an interactive evolution experiment using Javascript in the browser [29], which was mainly intended for achieving high diversity in a fractal snowflake design than high performance. Even more recently, Klein and Spector [30] present a system based on the Push3 language, which is compiled to JavaScript
42
+
43
+ in the browser. This system would be the closest to what we are presenting in this paper.
44
+
45
+ The proposed approach could also be considered as parasitic computing since, as stated in Section I, the only participation from the user will be to load a web page and click on a button; in fact, any AJAX-based could use these resources without his acquiescence (and, in any case, it would be desirable to run without causing much trouble). The concept was introduced by Barabási in [31], and followed by others (for instance, Kohring in [32]). In that work they proposed to use the Internet routers to compute a checksum by means of a set of specially crafted packets, whose aggregated result would be used to solve the SAT problem. Anyway, although the concept is interesting, there seems not to be a continuation for this work (at least openly), probably due to its inherent dangers (as analyzed in a paper by Lam et al. [33]).
46
+
47
+ The virtual machine embedded into the browser provides a way to easily do that kind of sneaky/parasitic computing, but JavaScript faces the handicap of being an interpreted language, which means that the efficiency of different implementations varies wildly. Moreover, it is not optimized for numerical computation but for object tree management (the so called DOM, document object model) and strings. Nevertheless its wide availability makes us think about considering it, at least as a possibility.
48
+
49
+ # III. METHODOLOGY AND EXPERIMENTAL SETUP
50
+
51
+ For this experiments we have designed and implemented a client-server program written in Perl (server-side) and Javascript (client-side), communicating using JSON via the XMLHttpRequest object. This object requires that the website and the AJAX requests are served from the same host, which is a constraint. Code for both is available, under the GPL, from http://rubyforge.org/projects/dconrails/. The algorithm runs on the client for a fixed number of generations, as shown in figure 1; running parameters are set from the server and are downloaded from it along with the webpage from which the experiment is run. A preset number of generations is run on the client, after which a request is made to the server with the best individual in the last generation. The algorithm stops and waits for the answer from the server. The server receives the request, stores it in a database, and sends back the best individual stored in the server. This individual is incorporated in the client population, which starts again to run. Several clients acting at the same time make requests asynchronously, using the facilities of the standard Apache web server. The server is thus used as a clearinghouse for interchange of information among the different clients; however, there's no explicit communication or topology among the different nodes running the genetic algorithm. Besides, the fact that the server always contains the best individuals generated so far guarantees that the best solution (with a fixed number of evaluations resolution) available so far is always kept. The server also sends back the number of generations the client should run; which is usually the same number as before,
52
+
53
+ but turns to 0, thus stopping the client, when the stopping condition is met.
54
+
55
+ Clients leave the experiment by the expeditive method of surfing away to another page or closing the web browser; in tabbed browsers (most browsers nowadays), a tab (or several) can run the experiment while the browser is available for other tasks. When the experiment has been running for a predetermined number of evaluations (which were set, for this experiment, to 750000), all clients get a message to stop running, and change their user interface to a message offering them to reload the (new) experiment and start all over again. Besides, there is a watching daemon running on the server which checks the database for the number of individuals evaluated, and resets the experiment by incrementing the experiment ID by one and eliminating the population. Thus, experiments can run unchecked on a server while this watchdog daemons is running. Several additional utilities are also provided via several webpages, that inform on the state of the experiment, or allow to set the GA parameters. Experimental subjects were gathered by several methods: sending it via email to department and project coworkers, using the URL for the experiment as a Google Talk status line, as a Twitter (http://twitter.com) message, as a blog post, and, eventually, it was picked up by a wildly popular Spanish blog $^{5}$ , which managed to gather the highest number of machines.
56
+
57
+ The experiment consisted in optimizing the 256-bits Royal Road function, and each instance consisted in a maximum of 750000 evaluations (which were barely enough to find the solution). The algorithm was steady state (with incorporation of the immigrant every 20 generations), with rank-based selection and substitution; every generation, $50\%$ of the population was generated, substituting the worst $50\%$ individuals. Crossover priority was set to $80\%$ , and mutation to $20\%$ , changing $1\%$ of the bits. However, these settings will have no influence on performance, other than the fact that, if the solution is found before the end of the experiment, the users will get bored and change to a new page<sup>6</sup>.
58
+
59
+ Data was gathered from two different sources: the watchdog-daemon logs, which mainly gave data about the number of individuals evaluated and the time needed for each experiment, and the Apache daemon log; the relevant lines were extracted just by using grep. It should be noted that the server was not running exclusively the experiment, but doing it along with the usual tasks. The server was a 700-MHz, 1 Gigabyte-RAM machine, with the database in another dual processor, 450-MHz machine. Both machines were running obsolete RedHat 7.x and 9.x Linux operating systems<sup>7</sup>.
60
+
61
+ Results of the set experiments will be commented in the
62
+
63
+ # Prueba Algoritmo Genetico
64
+
65
+ ![](images/81c70255e9252b9ab386acd7691f3d97e7be148188c4cc04e53c446416913614.jpg)
66
+ Fig. 1. Experiment running on two different browsers (Firefox and Epiphany) in the same machine. User interface is written in Spanish, since in this initial experiment was addressed to audience speaking that language. The colored (or gray-scale) horizontal bar is a graphical representation of the chromosome. The inset windows (Epiphany) started in second place, and thus, the state of evolution is less advanced.
67
+
68
+ next section.
69
+
70
+ # IV. EXPERIMENTAL RESULTS
71
+
72
+ Eventually, the experiment was running for several days, with different degrees of intensity. Several hundred machines participated in different instances, coming from all over the world, although mainly from Spain. The first question we wanted to answer was, how many machines should we expect in this kind of experiment? BOINC and SETI@home have gathered thousands of machines in its $10+$ year run, but our experiments were limited in time (several hours, at most, if no machine was available, a few minutes if it was), so a limited number of machines should also be expected. The distribution of the number of machines is shown in figure 2.
73
+
74
+ The exact figures will vary in every experiment, but it seems clear that the median number of machines will hover around 1/4 of the maximum number. Besides, it is quite easy to obtain 5 clients for a certain number of evaluations; most experiments have less than 10 clients.
75
+
76
+ On the other hand, the number of evaluations each client contributes are quite different, as is shown in figure 3.
77
+
78
+ Most clients contribute just a few generations, be it because the browser stops running the program if it takes more than a predetermined number of seconds (which can happen in IE or in Firefox if clients are too slow; usually the predetermined number of generations will be less than this number), the user gets bored and moves on, or because statistically most people join when the experiment has been running for some time and show up only when there are a few evaluations left. Each one of these problems would have to be tackled separately, but the baseline is that, even when a good number of machines joins an experiment, they will do so only for a limited amount of time on average. Besides, these limitations in number of generations translate also to limitations in time, so that experiments will have to be
79
+
80
+ ![](images/593231fc15da14522b9d75f07d10744c799de4593679725b63ae963c6bd73719.jpg)
81
+ Fig. 2. Histogram of the number of machines used in each experiment. The median is 8.5 machines, and the $75\%$ quartile is placed at 15 machines, with a peak of 44 machines. A single client using several browsers counts as a single machine. It should be noted that not all clients are simultaneously connected.
82
+
83
+ designed in a way that sufficient information is transmitted from clients during the expected time they are going to spend in the experiment.
84
+
85
+ But different clients have different performance, and it is also interesting to measure what is the average time (and thus average performance) it takes the clients between two communications to the server (that is, $20 * 50$ evaluations +
86
+
87
+ ![](images/250b76a397fd705ce81c3148aaa82ee966773e0af5fbb82b7af1dbc82dc68358.jpg)
88
+ Fig. 3. Histogram of the number of generations all machines participating in the experiment have contributed, cut off at 400 generations. Each generation corresponds to 50 new individuals. Most clients contribute 100 generations or less, with a few contributing more than 200. The median is 16 generations, with the 3rd quartile placed at 55 generations.
89
+
90
+ waiting time). This is interesting for two main reasons: server performance will have to be tuned to be able to answer to this level of requests, and second, the generation gap will also have to be fine-tuned so that waiting time and the possibility that the script is blocked due to overtime is minimized. The results obtained in the experiment are shown in figure 4.
91
+
92
+ This figure shows that, for this kind of problem, the vast majority of clients will have a gap smaller than two seconds. This quantity will vary for different problems, but the conclusion is that most clients will have high or median performance, with few clients having lower performance. This measure also gives us an estimate of the average performance (2.906 seconds/20 generations).
93
+
94
+ However, at the end of the day the setup is intended to achieve high performance when running an evolutionary computation experiment. This data is presented in figure 5.
95
+
96
+ This figure is quite similar to fig 4. Median is at 1000 seconds, with a minimum at 292 and 3rd quartile at 2323; $75\%$ of runs will last less than 2323 seconds. Taking into account that the average 20-generation step is 2.906 seconds, and a single experiment needs 375 such steps, the average single machine run would take 1089.75 seconds; this means that the maximum speedup achieved is $1089.75 / 292 = 3.73$ clients running simultaneously, and the median is approximately a single average machine. This will probably vary for experiments of different duration, but, on average, we could say that significant (albeit small) speedups can be
97
+
98
+ achieved using spontaneous volunteer computing. In general, however, several machines will sequentially provide CPU cycles to an experiment, adding up to a single machine doing all the work. In general also, the fact that there are up to 44 machines working in a single experiment, or that the range of running times can vary in a factor of up to one hundred, indicates that, for this experiment, no bottleneck has been found. Of course, more simultaneous machines will have to be tested to find the limit. Finally, the fact that all contributions are volunteer means that the evaluation rate is not constant, yielding figures like fig 6, where the steepness of each line is roughly equivalent to the evaluation speed, since the $x$ axis corresponds to time, and the $y$ axis number of individuals evaluated.
99
+
100
+ # V. CONCLUSIONS, DISCUSSION AND FUTURE WORK
101
+
102
+ While in previous papers [11] we proved that this kind of AJAX based, volunteer, and potentially sneaky, computation could be used profitably for performing genetic algorithm experiments, in this paper we have proved that, without an expensive or far-fetched setup, it can achieve high performance, equivalent, at most, to several computers of average performance. The code used to perform the experiment is publicly available and is modular so that creating different experiments is just a matter of writing a new JavaScript fitness function and tuning the GA parameters accordingly.
103
+
104
+ The experiments have proved that there is a good amount of computational power that can be easily tapped and used for
105
+
106
+ ![](images/54f3353d943ec9b402dab15ee7b2b1aa34daa7c006d23805f2cdce892606223c.jpg)
107
+ Fig. 6. Plot showing the number of individuals evaluated vs. time for a few dozens experiments; time is plotted up to 600 seconds only. As is seen, some experiments have a more or less constant evaluation rate (constant inclination), while other are more step-like with clients leaving and joining the experiment all the time.
108
+
109
+ evolutionary computation experiments, however, the nature of AGAJAJ constrains also the way users donate computing power, as well as the number of clients available for an experiment. In this paper we have found some figures, which will undoubtedly vary for other experiments; however, the general shape of the curves will probably be the same, following a very steep decrease from the maximum values obtained.
110
+
111
+ The GA, being asynchronous, faces some problems that have not been tackled in this paper. What is the best approach to preserve diversity? To generate a new population in each client, and receive immigrants as soon as possible, which are incorporated into the population? Or is it better to create new client populations based on existing populations? What is really the algorithmic contribution of new clients? These issues will be explored as future work. We will also try to measure the limits of this technology, and test the impact of servers of varying performance and workload on overall
112
+
113
+ performance. Eventually, we will also try to perform a sneaky experiment, to check what kind of performance can be expected in that kind of setups.
114
+
115
+ Another venue of work will be to examine the algorithmic performance of AGAJAJ; even as new clients are added to an experiment, what's the improvement obtained from them? In order to check that, a controlled experiment using known computers will be used, adding them one at a time, so that the real impact on the genetic algorithm is evaluated. Once that is know, it would be interesting to experiment with adaptive client parameters, instead of the one-size-fits-all parameter settings used so far.
116
+
117
+ # ACKNOWLEDGEMENTS
118
+
119
+ This paper has been funded in part by the Spanish MI-CYT project NoHNES (Spanish Ministerio de Educación y Ciencia - TIN2007-68083) and the Junta de Andalucía P06-TIC-02025. We are also grateful to the editors of the
120
+
121
+ ![](images/5af9cbf06b01bb0e4b876c9fa5a129e00ee3225338981178df07f5bceb785e88.jpg)
122
+ Fig. 4. Distribution of gap time, that is, time among two request from a particular client; it has been rounded to the closest number of seconds (since the webserver logs it has been extracted from have that resolution). Most clients take 3 second or less to process 20 generations, with a majority taking 2 or less. Gaps bigger than 10-15 seconds are probably non-significant, in fact, negative gaps and those bigger than 100 have been eliminated from the data set. For this set of experiments, the median is at 2. $x$ axis is logarithmic, to emphasize the fact that the distribution of client performance falls very fast, although a different center should be expected for a different problem.
123
+
124
+ Microsiervos.com blog for talking about our experiment, and to the readers of that article for participating in it. We are also grateful to the anonymous people who have known about the experiment via several possible ways<sup>8</sup> and participating in it.
125
+
126
+ # REFERENCES
127
+
128
+ [1] D. P. Anderson, J. Cobb, E. Korpela, M. Lebofsky, and D. Werthimer, “SETI@home: an experiment in public-resource computing,” Commun. ACM, vol. 45, no. 11, pp. 56–61, 2002.
129
+ [2] R. Steinmetz and K. Wehrle, Eds., Peer-to-Peer Systems and Applications, ser. Lecture Notes in Computer Science, vol. 3485. Springer, 2005.
130
+ [3] Gilorien, DHTML and JavaScript. Upper Saddle River, NJ 07458, USA: Prentice-Hall PTR, 2000.
131
+ [4] R. Shah, "A beginner's guide to JavaScript," JavaWorld: IDG's magazine for the Java community, vol. 1, no. 1, pp. ??-??, Mar. 1996. [Online]. Available: http://www.javaworld.com/javaworld/jw-03-1996/jw-03-javascriptintro.htm
132
+ [5] D. Flanagan, JavaScript Pocket Reference (2nd Edition). O'Reilly, October 2002. [Online]. Available: http://www.amazon.fr/exec/obidos/ASIN/0596004117/citeulike04-21
133
+ [6] ECMA, ECMA-262: ECMAScript Language Specification, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Dec. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-262.HTM
134
+
135
+ <sup>8</sup>For instance, this article in my own blog http://atalaya.blogalia.com/historias/53480
136
+
137
+ ![](images/16fbbc6baab692bce6ff11ca3dc7de066f9bd4ab6f5d56d39617c4f2f9d0db51.jpg)
138
+ Fig. 5. Distribution of running times for a fixed amount of evaluations. Some outliers have been cut off; approximately $10\%$ of runs took more than 10000 seconds. The mode is between 500 and 1000 seconds, and most runs end before 2000 seconds. Time starts to count from the moment the first client connects to the server.
139
+
140
+ [7] ——, ECMA-290: ECMAScript Components Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 1999. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ECMA-290.HTM
141
+ [8] ——, ECMA-327: ECMAScript 3: Compact Profile, 3rd ed. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2001. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-327.htm
142
+ [9] ——, ECMA-357: ECMAScript for XML (E4X) Specification. Geneva, Switzerland: ECMA (European Association for Standardizing Information and Communication Systems), Jun. 2004. [Online]. Available: http://www.ecma.ch/ecma1/STAND/ecma-357.htm
143
+ [10] Wikipedia, “Ajax — wikipedia, la enciclopedia libre,” 2006, [Internet; descargado 14-mayo-2006]. [Online]. Available: $\backslash$ url{http://es.wikipedia.org/w/index.php?title=AJAX&oldid=3168254}
144
+ [11] "Towards Open Grid Services Architecture," http://www.globus.org/ogsa/.
145
+ [12] J. J. Merelo, A. M. García, J. L. J. Laredo, J. Lupión, and F. Tricas, "Browser-based distributed evolutionary computation: performance and scaling behavior," in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2851-2858.
146
+ [13] M. García-Arenas, J. G. Castellano, P. A. Castillo, J. Carpio, M. Cillero, J.-J. Merelo-Guervós, A. Prieto, V. Rivas, and G. Romero, "Speedup measurements for a distributed evolutionary algorithm that uses Jini," in XI Jornadas de Paralelismo, U. d. G. Depto. ATC, Ed., 2000, pp. 241-246.
147
+ [14] C. Setzkorn and R. C. Paton, "Javaspaces - an affordable technology for the simple implementation of reusable parallel evolutionary algorithms," in Proceedings of the International Symposium on Knowledge Exploration in Life Science Informatics, KELSI 2004, ser. Lecture Notes in Artificial Intelligence, J. A. López, E. Benfenati, and W. Dubitzky, Eds., vol. 3303. Milan, Italy: Springer, 25-26 Nov. 2004, pp. 151-160. [Online]. Available: $\backslash$ url{http://springerlink.metapress.com/openurl.asp?genre=article;issn=0302-9743;volume=3303;spage=151}
148
+ [15] F. S. Chong and W. B. Langdon, "Java based distributed genetic
149
+
150
+ programming on the internet," in Proceedings of the Genetic and Evolutionary Computation Conference, W. Banzhaf, J. Daida, A. E. Eiben, M. H. Garzon, V. Honavar, M. Jakiela, and R. E. Smith, Eds., vol. 2. Orlando, Florida, USA: Morgan Kaufmann, 13-17 Jul. 1999, p. 1229, full text in technical report CSRP-99-7. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/p.chong/DGPposter.pdf
151
+ [16] D. S. Myers and M. P. Cummings, "Necessity is the mother of invention: a simple grid computing system using commodity tools," J. Parallel Distrib. Comput., vol. 63, no. 5, pp. 578-589, 2003.
152
+ [17] J. L. J. Laredo, P. A. Castillo, B. Paechter, A. M. Mora, E. Alfaro-Cid, A. I. Esparcia-Alcazar, and J. J. Merelo, "Empirical validation of a gossiping communication mechanism for parallel eas," ser. Lecture Notes in Computer Science, M. G. et al., Ed., vol. 4448. Springer, 2007, pp. 129-136.
153
+ [18] E. A. Eiben, M. Schoenauer, J. L. J. Laredo, P. A. Castillo, A. M. Mora, and J. J. Merelo, "Exploring selection mechanisms for an agent-based distributed evolutionary algorithm," in GECCO '07: Proceedings of the 2007 GECCO conference companion on Genetic and evolutionary computation. New York, NY, USA: ACM Press, 2007, pp. 2801-2808.
154
+ [19] E. Cantú-Paz, “Migration policies, selection pressure, and parallel evolutionary algorithms,” Journal of Heuristics, vol. 7, no. 4, pp. 311–334, 2001.
155
+ [20] L. F. G. Sarmenta and S. Hirano, "Bayanihan: building and studying Web-based volunteer computing systems using Java," Future Generation Computer Systems, vol. 15, no. 5-6, pp. 675-686, 1999. [Online]. Available: citeseer.nj.nec.com/sarmenta99bayanihan.html
156
+ [21] D. P. Anderson, E. Korpela, and R. Walton, "High-performance task distribution for volunteer computing," in E-SCIENCE '05: Proceedings of the First International Conference on e-Science and Grid Computing. Washington, DC, USA: IEEE Computer Society, 2005, pp. 196-203.
157
+ [22] P. Cappello and D. Mourloukos, “A scalable, robust network for parallel computing,” in JGI '01: Proceedings of the 2001 joint ACM-ISCOPE conference on Java Grande. New York, NY, USA: ACM Press, 2001, pp. 78–86.
158
+ [23] D. Anderson, "Boinc: A system for public-resource computing and storage," in Proceedings of the 5th IEEE/ACM International Workshop on Grid Computing, Pittsburgh, USA, 2004.
159
+ [24] M. Arenas, P. Collet, A. Eiben, M. Jelasy, J. J. Merelo, B. Paechter, M. Preuß, and M. Schoenauer, "A framework for distributed evolutionary algorithms," ser. Lecture Notes in Computer Science, LNCS, J.-J. M. Guervós, P. Adamidis, H.-G. Beyer, J.-L. Fernández-Villacanas, and H.-P. Schwefel, Eds., no. 2439. Springer-Verlag, September 2002, keywords: Implementation::Parallel EAs, Related::Simulated Adaptive Behavior, Technique::Advanced techniques - miscellaneous, Technique::Evolutionary computing - general, Technique::Evolutionary computing - miscellaneous, CiteSeer context: http://citeseer.nj.nec.com/context/2189070/0, available from http://link.springer.de/link/service/series/0558/papers/2439/243900665.pdf, Metapress URL: http://www.springerlink.com/link.asp?id=h4n29kbl69jvab4c.
160
+ [25] Wikipedia, "Electric sheep — wikipedia, the free encyclopedia," 2007, http://en.wikipedia.org/w/index.php?title=Electric_Sheep&oldid=159677937.
161
+ [26] R. Mason and W. Kelly, “G2-P2P: a fully decentralised fault-tolerant cycle-stealing framework,” in ACSW Frontiers '05: Proceedings of the 2005 Australasian workshop on Grid computing and e-research. Darlinghurst, Australia, Australia: Australian Computer Society, Inc., 2005, pp. 33-39.
162
+ [27] F. F. D. Vega, "A fault tolerant optimization algorithm based on evolutionary computation," in DEPCOS-RELCOMEX '06: Proceedings of the International Conference on Dependability of Computer Systems (DEPCOS-RELCOMEX'06). Washington, DC, USA: IEEE Computer Society, 2006, pp. 335-342.
163
+ [28] L. F. G. Sarmenta, "Sabotage-tolerance mechanisms for volunteer computing systems," Future Generation Computer Systems, vol. 18, no. 4, pp. 561-572, 2002. [Online]. Available: citeseer.nj.nec.com/449803.html
164
+ [29] J. Culbert, "Ajax and distributed computation thoughts," Published at http://culbert.net/?p=6, March 2006, Ultimo acceso Mayo 2006.
165
+ [30] W. B. Langdon, “Pfeiffer – A distributed open-ended evolutionary system,” in AISB'05: Proceedings of the Joint Symposium on Socially Inspired Computing (METAS 2005), B. Edmonds, N. Gilbert, S. Gustafson, D. Hales, and N. Krasnogor, Eds., University of
166
+
167
+ Hertfordshire, Hatfield, UK, 12-15 Apr. 2005, pp. 7-13, sSAISB 2005 Convention. [Online]. Available: http://www.cs.ucl.ac.uk/staff/W.Langdon/ftp/papers/wbl_metas2005.pdf
168
+ [31] J. Klein and L. Spector, "Unwitting distributed genetic programming via asynchronous javascript and xml," in GECCO '07: Proceedings of the 9th annual conference on Genetic and evolutionary computation. New York, NY, USA: ACM, 2007, pp. 1628-1635.
169
+ [32] A.-L. Barabasi, V. W. Freeh, H. Jeong, and J. B. Brockman, "Parasitic computing," Nature, vol. 412, no. 6850, pp. 894-897, August 2001. [Online]. Available: http://www.nature.com/cgi-taf/DynaPage.taf?file=/nature/journal/v412/n6850/abs/412894a0\_fs.html
170
+ [33] G. A. Kohring, "Implicit simulations using messaging protocols," COMPUTERS AND PHYSICS, vol. 14, p. 203, 2003. [Online]. Available: http://www.citebase.org/abstract?id=oai:arXiv.org:cs/0208021
171
+ [34] V. T. Lam, S. Antonatos, P. Akritidis, and K. G. Anagnostakis, "Puppetnets: misusing web browsers as a distributed attack infrastructure," in CCS '06: Proceedings of the 13th ACM conference on Computer and communications security. New York, NY, USA: ACM, 2006, pp. 221-234.
2401.17xxx/2401.17234/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3441630e89ff3a11cc26c0fd5984b891dc3dc6f91be498b5b461665ee445f6d0
3
+ size 344703
2401.17xxx/2401.17234/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17256/d0240179-e0e4-4bf5-bee5-1fa8adc4334e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55eec5f84ea6280dc64d71c7cc5fb78516b085227753b742d028a425a5ea1701
3
+ size 774438
2401.17xxx/2401.17256/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17256/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d357cebc9a0fc11ca1b0eb57d1adfa1a2f4485a72f348474bcfc46f5d1bb39a
3
+ size 1473214
2401.17xxx/2401.17256/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17263/df23b01b-2de6-4c67-99dd-33be07502b9e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbef4350a8250b9e33cdbf6863aa87d11d86b14a6aaa75dc0fa9bbc7b2869751
3
+ size 1217272
2401.17xxx/2401.17263/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17263/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7b72cb2807f23c76fff816c463aec23b3a41348e11161ab824ad07ddd09ffa
3
+ size 448452
2401.17xxx/2401.17263/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17264/9132c0a1-48d0-4e53-b664-220a2ed742eb_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd8e9b759b8199a232bbf8333d5661d175965668ee980c16a241cdac82ff2f49
3
+ size 859983
2401.17xxx/2401.17264/full.md ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Proactive Detection of Voice Cloning with Localized Watermarking
2
+
3
+ Robin San Roman*12 Pierre Fernandez*12 Hady Elsahar*1
4
+
5
+ Alexandre Défossez<sup>3</sup> Teddy Furon<sup>2</sup> Tuan Tran<sup>1</sup>
6
+
7
+ # Abstract
8
+
9
+ In the rapidly evolving field of speech generative models, there is a pressing need to ensure audio authenticity against the risks of voice cloning. We present AudioSeal, the first audio watermarking technique designed specifically for localized detection of AI-generated speech. AudioSeal employs a generator / detector architecture trained jointly with a localization loss to enable localized watermark detection up to the sample level, and a novel perceptual loss inspired by auditory masking, that enables AudioSeal to achieve better imperceptibility. AudioSeal achieves state-of-the-art performance in terms of robustness to real life audio manipulations and imperceptibility based on automatic and human evaluation metrics. Additionally, AudioSeal is designed with a fast, single-pass detector, that significantly surpasses existing models in speed, achieving detection up to two orders of magnitude faster, making it ideal for large-scale and real-time applications. Code is available at github.com/facebookresearch/audioseal.
10
+
11
+ # 1. Introduction
12
+
13
+ Generative speech models are now capable of synthesizing voices that are indistinguishable from real ones (Arik et al., 2018; Kim et al., 2021; Casanova et al., 2022; Wang et al., 2023). Though speech generation and voice cloning are not novel concepts, their recent advancements in quality and accessibility have raised new security concerns. A notable incident occurred where a deepfake audio misleadingly urged US voters to abstain, showcasing the potential for misusing these technologies to spread false information (Murphy et al., 2024). Regulators and governments are implementing measures for AI content transparency and
14
+
15
+ *Equal contribution ${}^{1}$ FAIR,Meta ${}^{2}$ Inria ${}^{3}$ Kyutai. Correspondence to: <robinsr, hadyelsahar, pfz@meta.com>.
16
+
17
+ Proceedings of the $41^{st}$ International Conference on Machine Learning, Vienna, Austria. PMLR 235, 2024. Copyright 2024 by the author(s).
18
+
19
+ ![](images/334f04688b7a04fe3c15ba417c8ae4aa4dcbf4a4fdbb795507f6c1f106745556.jpg)
20
+ Figure 1. Proactive detection of AI-generated speech. We embed an imperceptible watermark in the audio, which can be used to detect if a speech is AI-generated and identify the model that generated it. It can also precisely pinpoint AI-generated segments in a longer audio with a sample level resolution (1/16k seconds).
21
+
22
+ traceability, including forensics and watermarking - see Chi (2023); Eur (2023); USA (2023).
23
+
24
+ The main forensics approach to detect synthesized audio is to train binary classifiers to discriminate between natural and synthesized audios, a technique highlighted in studies by Borsos et al. (2022); Kharitonov et al. (2023); Le et al. (2023). We refer to this technique as passive detection since it does not alter the audio source. Albeit being a straightforward mitigation, it is prone to fail as generative models advance and the difference between synthesized and authentic content diminishes.
25
+
26
+ Watermarking emerges as a strong alternative. It embeds a signal in the generated audio, imperceptible to the ear but robustly detectable by specific algorithms. There are two watermarking types: multi-bit and zero-bit. Zero-bit watermarking detects the presence or absence of a watermarking signal, which is valuable for AI content detection. Multi-bit watermarking embeds a binary message in the content, allowing to link content to a specific user or generative model. Most deep-learning based audio watermarking methods (Pavlovic et al., 2022; Liu et al., 2023a; Chen et al., 2023) are multi-bit. They train a generator to output the watermarked audio from a sample and a message, and an extractor retrieving the hidden message.
27
+
28
+ Current watermarking methods have limitations. First, they are not adapted for detection. The initial applications assumed any sound sample under scrutiny was watermarked (e.g. IP protection). As a result, the decoders were never
29
+
30
+ trained on non-watermarked samples. This discrepancy between the training of the models and their practical use leads to poor or overestimated detection rates, depending on the embedded message (see App. B). Our method aligns more closely with the concurrent work by Juvela & Wang (2023), which trains a detector, rather than a decoder.
31
+
32
+ Second, they are not localized and consider the entire audio, making it difficult to identify small segments of AI-generated speech within longer audio clips. The concurrent WavMark's approach (Chen et al., 2023) addresses this by repeating at 1-second intervals a synchronization pattern followed by the actual binary payload. This has several drawbacks. It cannot be used on spans less than 1 second and is susceptible to temporal edits. The synchronization bits also reduce the capacity for the encoded message, accounting for $31\%$ of the total capacity. Most importantly, the brute force detection algorithm for decoding the synchronization bits is prohibitively slow especially on non-watermarked content, as we show in Sec. 5.5. This makes it unsuitable for real-time and large-scale traceability of AI-generated content on social media platforms, where most content is not watermarked.
33
+
34
+ To address these limitations, we introduce AudioSeal, a method for localized speech watermarking. It jointly trains two networks: a generator that predicts an additive watermark waveform from an audio input, and a detector that outputs the probability of the presence of a watermark at each sample of the input audio. The detector is trained to precisely and robustly detect synthesized speech embedded in longer audio clips by masking the watermark in random sections of the signal. The training objective is to maximize the detector's accuracy while minimizing the perceptual difference between the original and watermarked audio. We also extend AudioSeal to multi-bit watermarking, so that an audio can be attributed to a specific model or version without affecting the detection signal.
35
+
36
+ We evaluate the performance of AudioSeal to detect and localize AI-generated speech. AudioSeal achieves state-of-the-art results on robustness of the detection, far surpassing passive detection with near perfect detection rates over a wide range of audio edits. It also performs sample-level detection (at resolution of 1/16k second), outperforming WavMark in both speed and performance. In terms of efficiency, our detector is run once and yields detection logits at every time-step, allowing for real-time detection of watermarks in audio streams. This represents a major improvement compared to earlier watermarking methods, which require synchronizing the watermark within the detector, thereby substantially increasing computation time. Finally, in conjunction with binary messages, AudioSeal almost perfectly attributes an audio to one model among 1,000, even in the presence of audio edits.
37
+
38
+ Our overall contributions are:
39
+
40
+ - We introduce AudioSeal, the first audio watermarking technique designed for localized detection of AI-generated speech up to the sample-level;
41
+ - A novel perceptual loss inspired by auditory masking, that enables AudioSeal to achieve better imperceptibility of the watermark signal;
42
+ - AudioSeal achieves the state-of-the-art robustness to a wide range of real life audio manipulations (section 5);
43
+ - AudioSeal significantly outperforms the state-of-the-art models in computation speed, achieving up to two orders of magnitude faster detection (section 5.5);
44
+ - Insights on the security and integrity of audio watermarking techniques when open-sourcing (section 6).
45
+
46
+ # 2. Related Work
47
+
48
+ In this section we give an overview of the detection and watermarking methods for audio data. A complementary description of prior works can be found in the Appendix A.
49
+
50
+ Synthetic speech detection. Detection of synthetic speech is traditionally done in the forensics community by building features and exploiting statistical differences between fake and real. These features can be handcrafted (Sahidullah et al., 2015; Janicki, 2015; AlBadawy et al., 2019; Borrelli et al., 2021) and/or learned (Müller et al., 2022; Barrington et al., 2023). The approach of most audio generation papers (Borsos et al., 2022; Kharitonov et al., 2023; Borsos et al., 2023; Le et al., 2023) is to train end-to-end deep-learning classifiers on what their models generate, similarly as Zhang et al. (2017). Accuracy when comparing synthetic to real is usually good, although not performing well on out of distribution audiios (compressed, noised, slowed, etc.).
51
+
52
+ Imperceptible watermarking. Unlike forensics, watermarking actively marks the content to identify it once in the wild. It is enjoying renewed interest in the context of generative models, as it provides a means to track AI-generated content, be it for text (Kirchenbauer et al., 2023; Aaronson & Kirchner, 2023; Fernandez et al., 2023a), images (Yu et al., 2021b; Fernandez et al., 2023b; Wen et al., 2023), or audio/speech (Chen et al., 2023; Juvela & Wang, 2023).
53
+
54
+ Traditional methods for audio watermarking relied on embedding watermarks either in the time or frequency domains (Lie & Chang, 2006; Kalantari et al., 2009; Natgunanathan et al., 2012; Xiang et al., 2018; Su et al., 2018; Liu et al., 2019), usually including domain specific features to design the watermark and its corresponding decoding function. Deep-learning audio watermarking methods focus on multi-bit watermarking and follow a generator/de
55
+
56
+ coder framework (Tai & Mansour, 2019; Qu et al., 2023; Pavlovic et al., 2022; Liu et al., 2023a; Ren et al., 2023). Few works have explored zero-bit watermarking (Wu et al., 2023; Juvela & Wang, 2023), which is better adapted for detection of AI-generated content. Our rationale is that robustness increases as the message payload is reduced to the bare minimum (Furon, 2007).
57
+
58
+ In this study, we compare our work with the state-of-the-art watermarking method, WavMark (Chen et al., 2023), which outperforms previous ones. It uses invertible networks to hide 32 bits in 1-second audio segments. Detection is done by sliding along the audio in 0.05s steps and decoding the message for each window. If the 10 first decoded bits match a synchronization pattern the rest of the payload is saved (22 bits), and the window can directly slide 1s (instead of the 0.05). This brute force detection algorithm is prohibitively slow especially when the watermark is absent, since the algorithm will have to attempt and fail to decode a watermark for each sliding window in the input audio (due to the absence of watermark).
59
+
60
+ # 3. Method
61
+
62
+ The method jointly trains two models. The generator creates a watermark signal that is added to the input audio. The detector outputs local detection logits. The training optimizes two concurrent classes of objectives: minimizing the perceptual distortion between original and watermarked audios and maximizing the watermark detection. To improve robustness to modifications of the signal and localization, we include a collection of train time augmentations. At inference time, the logits precisely localize watermarked segments allowing for detection of AI-generated content. Optionally, short binary identifiers may be added on top of the detection to attribute a watermarked audio to a version of the model while keeping a single detector.
63
+
64
+ # 3.1. Training pipeline
65
+
66
+ Figure 2. Generator-detector training pipeline.
67
+ ![](images/0272929f2db32630c572ca4828515473d789173620ad116de32b807d4566c570.jpg)
68
+ (i) The watermark generator takes as input a waveform $s \in \mathbb{R}^T$ and outputs a watermark waveform $\delta \in \mathbb{R}^T$ of the same dimensionality, where $T$ is the number of
69
+
70
+ samples in the signal. The watermarked audio is then $s_w = s + \delta$ .
71
+
72
+ (ii) To enable sample-level localization, we adopt an augmentation strategy focused on watermark masking with silences and other original audios. This is achieved by randomly selecting $k$ starting points and altering the next $T / 2k$ samples from $s_w$ in one of 4 ways: revert to the original audio (i.e. $s_w(t) = s(t)$ ) with probability 0.4; replacing with zeros (i.e. $s_w(t) = 0$ ) with probability 0.2; or substituting with a different audio signal from the same batch (i.e. $s_w(t) = s'(t)$ ) with probability 0.2, or not modifying the sample at all with probability 0.2.
73
+ (iii) The second class of augmentation ensures the robustness against audio editing. One of the following signal alterations is applied: bandpass filter, boost audio, duck audio, echo, highpass filter, lowpass filter, pink noise, gaussian noise, slower, smooth, resample (full details in App. D.2). The parameters of those augmentations are fixed to aggressive values to enforce maximal robustness and the probability of sampling a given augmentation is proportional to the inverse of its evaluation detection accuracy. We implemented these augmentations in a differentiable way when possible, and otherwise (e.g. MP3 compression) with the straight-through estimator (Yin et al., 2019) that allows the gradients to back-propagate to the generator.
74
+ (iv) Detector $D$ processes the original and the watermarked signals, outputting for each a soft decision at every time step, meaning $D(s)\in [0,1]^T$ . Figure 3 illustrates that the detector's outputs are at one only when the watermark is present.
75
+
76
+ The architectures of the models are based on En-Codec (Défossez et al., 2022). They are presented in Figure 4 and detailed in the appendix D.3.
77
+
78
+ ![](images/8f318b7b6f5540b63bce8e44fd69afaf4eb233a3c295c3bb1774c3e65f3a47e3.jpg)
79
+ Figure 2 illustrates the joint training of the generator and the detector with four critical stages:
80
+
81
+ ![](images/2605e687babf60be454b5a7e19566c95c7051fa52b4d73f1fbbeddba765d945c.jpg)
82
+ Figure 3. (Top) A speech signal (gray) where the watermark is present between 5 and 7.5 seconds (orange, magnified by 5). (Bottom) The output of the detector for every time step. An orange background color indicates the presence of the watermark.
83
+
84
+ ![](images/38116f063c1df624a760c70a9a2e4bc3166a0047aec7bf9cec552ce790131768.jpg)
85
+ Figure 4. Architectures. The generator is made of an encoder and a decoder both derived from EnCodec's design, with optional message embeddings. The encoder includes convolutional blocks and an LSTM, while the decoder mirrors this structure with transposed convolutions. The detector is made of an encoder and a transpose convolution, followed by a linear layer that calculates sample-wise logits. Optionally, multiple linear layers can be used for calculating k-bit messages. More details in App. D.3.
86
+
87
+ ![](images/166453c2bfbbef3d7fde135b31d578edec2778d6131e18d2c84232718b0a653e.jpg)
88
+
89
+ # 3.2. Losses
90
+
91
+ Our setup includes multiple perceptual losses and a localization loss. We balance them during training by scaling their gradients as done by Defossez et al. (2022). The complete list of used losses is detailed below.
92
+
93
+ Perceptual losses enforce the watermark imperceptibility to the human ear. These include an $\ell_1$ loss on the watermark signal to decrease its intensity, the multi-scale Mel spectrogram loss of (Gritsenko et al., 2020), and discriminative losses based on adversarial networks that operate on multi-scale short-term-Fourier-transform spectrograms. Defossez et al. (2022) use this combination of losses for training the EnCodec model for audio compression.
94
+
95
+ In addition, we introduce a novel time-frequency loudness loss TF-Loudness, which operates entirely in the waveform domain. This approach is based on "auditory masking", a psycho-acoustic property of the human auditory system already exploited in the early days of watermarking (Kirovski & Attias, 2003): the human auditory system fails perceiving sounds occurring at the same time and at the same frequency range (Schnupp et al., 2011). TF-Loudness is calculated as follows: first, the input signal $s$ is divided into $B$ signals based on non-overlapping frequency bands $s_0, \ldots, s_{B-1}$ . Subsequently, every signal is segmented using a window of size $W$ , with an overlap amount denoted by $r$ . This procedure is applied to both the original audio signal $s$ and the embedded watermark $\delta$ . As a result, we obtain segments of the signal and watermark in time-frequency dimensions, denoted as $s_b^w$ and $\delta_b^w$ respectively. For every time-frequency window we compute the loudness difference, where loudness is estimated using ITU-R BS.1770-4 recommendations (telecommunication Union, 2011) (see App. D.1 for details):
96
+
97
+ $$
98
+ l _ {b} ^ {w} = \operatorname {L o u d n e s s} \left(\delta_ {b} ^ {w}\right) - \operatorname {L o u d n e s s} \left(s _ {b} ^ {w}\right). \tag {1}
99
+ $$
100
+
101
+ This measure quantifies the discrepancy in loudness between the watermark and the original signal within a spe
102
+
103
+ cific time window $w$ , and a particular frequency band $b$ . The final loss is a weighted sum of the loudness differences using softmax function:
104
+
105
+ $$
106
+ \mathcal {L} _ {\text {l o u d}} = \sum_ {b, w} \left(\operatorname {s o f t m a x} (l) _ {b} ^ {w} * l _ {b} ^ {w}\right). \tag {2}
107
+ $$
108
+
109
+ The softmax prevents the model from targeting excessively low loudness where the watermark is already inaudible.
110
+
111
+ Masked sample-level detection loss. A localization loss ensures that the detection of watermarked audio is done at the level of individual samples. For each time step $t$ , we compute the binary cross entropy (BCE) between the detector's output $D(s)_t$ and the ground truth label (0 for non-watermarked, 1 for watermarked). Overall, this reads:
112
+
113
+ $$
114
+ \mathcal {L} _ {l o c} = \frac {1}{T} \sum_ {t = 1} ^ {T} \operatorname {B C E} \left(D \left(s ^ {\prime}\right) _ {t}, y _ {t}\right), \tag {3}
115
+ $$
116
+
117
+ where $s'$ might be $s$ or $s_w$ , and where time step labels $y_t$ are set to 1 if they are watermarked, and 0 otherwise.
118
+
119
+ # 3.3. Multi-bit watermarking
120
+
121
+ We extend the method to support multi-bit watermarking, which allows for attribution of audio to a specific model version. At generation, we add a message processing layer in the middle of the generator. It takes the activation map in $\mathbb{R}^{h,t'}$ and a binary message $m\in \{0,1\} ^b$ and outputs a new activation map to be added to the original one. We embed $m$ into $e = \sum_{i = 0..b - 1}E_{2i + m_i}\in \mathbb{R}^h$ , where $E\in \mathbb{R}^{2b,h}$ is a learnable embedding layer. $e$ is then repeated $t$ times along the temporal axis to match the activation map size $(t,h)$ . At detection, we add $b$ linear layers at the very end of the detector. Each of them outputs a soft value for each bit of the message at the sample-level. Therefore, the detector outputs a tensor of shape $\mathbb{R}^{t,1 + b}$ (1 for the detection, $b$ for the message). At training, we add a decoding loss $\mathcal{L}_{dec}$ to the localization loss $\mathcal{L}_{loc}$ . This loss $\mathcal{L}_{dec}$ averages the BCE between the original message and the detector's outputs over all parts where the watermark is present.
122
+
123
+ # 3.4. Training details
124
+
125
+ Our watermark generator and detector are trained on a $4.5\mathrm{K}$ hours subset from the VoxPopuli (Wang et al., 2021) dataset. It is important to emphasize that the sole purpose of our generator is to generate imperceptible watermarks given an input audio; without the capability to produce or modify speech content. We use a sampling rate of $16\mathrm{kHz}$ and one-second samples, so $T = 16000$ in our training. A full training requires $600\mathrm{k}$ steps, with Adam, a learning rate of $10^{-4}$ , and a batch size of 32. For the drop augmentation, we use $k = 5$ windows of 0.1 sec. $h$ is set to 32, and the number of additional bits $b$ to 16 (note that $h$ needs to be higher than $b$ , for example $h = 8$ is enough in the zero-bit case). The perceptual losses are balanced and weighted as follows: $\lambda_{\ell_1} = 0.1$ , $\lambda_{msspec} = 2.0$ , $\lambda_{adv} = 4.0$ , $\lambda_{loud} = 10.0$ . The localization and watermarking losses are weighted by $\lambda_{loc} = 10.0$ and $\lambda_{dec} = 1.0$ respectively.
126
+
127
+ # 3.5. Detection, localization and attribution
128
+
129
+ At inference, we may use the generator and detector for:
130
+
131
+ - Detection: To determine if the audio is watermarked or not. To achieve this, we use the average detector's output over the entire audio and flag it if the score exceeds a threshold (default: 0.5).
132
+ - Localization: To precisely identify where the watermark is present. We utilize the sample-wise detector's output and mark a time step as watermarked if the score surpasses a threshold (default: 0.5).
133
+ - Attribution: To identify the model version that produced the audio, enabling differentiation between users or APIs with a single detector. The detector's first output gives the detection score and the remaining $k$ outputs are used for attribution. This is done by computing the average message over detected samples and returning the identifier with the smallest Hamming distance.
134
+
135
+ # 4. Audio/Speech Quality
136
+
137
+ We first evaluate the quality of the watermarked audio using: Scale Invariant Signal to Noise Ratio (SI-SNR): SI-SNR(s, $s_w$ ) = 10 log_{10} $\left( \frac{\|\alpha s\|_2^2}{\|\alpha s - s_w\|_2^2} \right)$ , where $\alpha = \langle s, s_w \rangle / \|s\|_2^2$ ; as well as PESQ (Rix et al., 2001), ViSQOL (Hines et al., 2012) and STOI (Taal et al., 2010) which are objective perceptual metrics measuring the quality of speech signals.
138
+
139
+ Table 1 report these metrics. AudioSeal behaves differently than watermarking methods like WavMark (Chen et al., 2023) that try to minimize the SI-SNR. In practice, high SI-SNR is indeed not necessarily correlated with good perceptual quality. AudioSeal is not optimized for SI-SNR but rather for perceptual quality of speech. This is better cap
140
+
141
+ Table 1. Audio quality metrics. Compared to traditional watermarking methods that minimize the SNR like WavMark, AudioSeal achieves same or better perceptual quality.
142
+
143
+ <table><tr><td>Methods</td><td>SI-SNR</td><td>PESQ</td><td>STOI</td><td>ViSQOL</td><td>MUSHRA</td></tr><tr><td>WavMark</td><td>38.25</td><td>4.302</td><td>0.997</td><td>4.730</td><td>71.52 ± 7.18</td></tr><tr><td>AudioSeal</td><td>26.00</td><td>4.470</td><td>0.997</td><td>4.829</td><td>77.07 ± 6.35</td></tr></table>
144
+
145
+ tured by the other metrics (PESQ, STOI, ViSQOL), where AudioSeal consistently achieves better performance. Put differently, our goal is to hide as much watermark power as possible while keeping it perceptually indistinguishable from the original. Figure 3 also visualizes how the watermark signal follows the shape of the speech waveform.
146
+
147
+ The metric used for our subjective evaluations is MUSHRA test (Series, 2014). The complete details about our full protocol can be found in the Appendix D.4. In this study our samples got ratings very close to the ground truth samples that obtained an average score of 80.49.
148
+
149
+ # 5. Experiments and Evaluation
150
+
151
+ This section evaluates the detection performance of passive classifiers, watermarking methods, and AudioSeal, using True Positive Rate (TPR) and False Positive Rate (FPR) as key metrics for watermark detection. TPR measures correct identification of watermarked samples, while FPR indicates the rate of genuine audio clips falsely flagged. In practical scenarios, minimizing FPR is crucial. For example, on a platform processing 1 billion samples daily, an FPR of $10^{-3}$ and a TPR of 0.5 means that 1 million samples require manual review each day, yet only half of the watermarked samples are detected.
152
+
153
+ # 5.1. Comparison with passive classifier
154
+
155
+ We first compare detection results on samples generated with Voicebox (Le et al., 2023). We compare to the passive setup where a classifier is trained to discriminate between Voicebox-generated and real audios. Following the approach in the Voicebox study, we evaluate 2,000 approximately 5-second samples from LibriSpeech, These samples have masked frames (90%, 50%, and 30% of the phonemes) pre-Voicebox generation. We evaluate on the same tasks, i.e. distinguishing between original and generated, or between original and re-synthesized (created by extracting the Mel spectrogram from original audio and then vocoding it with the HiFi-GAN vocoder).
156
+
157
+ Both active and passive setups achieve perfect classification in the case when trained to distinguish between natural and Voicebox. Conversely, the second part of Tab. 2 highlights a significant drop in performance when the classifier is trained to differentiate between Voicebox
158
+
159
+ Table 2. Comparison with Voicebox binary classifier. Percentage refers to the fraction of masked input frames.
160
+
161
+ <table><tr><td rowspan="2">% Mask</td><td colspan="3">AudioSeal (Ours)</td><td colspan="3">Voicebox Classif.</td></tr><tr><td>Acc.</td><td>TPR</td><td>FPR</td><td>Acc.</td><td>TPR</td><td>FPR</td></tr><tr><td colspan="7">Original audio vs AI-generated audio</td></tr><tr><td>30%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>1.0</td><td>1.0</td><td>0.0</td></tr><tr><td>50%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>1.0</td><td>1.0</td><td>0.0</td></tr><tr><td>90%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>1.0</td><td>1.0</td><td>0.0</td></tr><tr><td colspan="7">Re-synthesized audio vs AI-generated audio</td></tr><tr><td>30%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>0.704</td><td>0.680</td><td>0.194</td></tr><tr><td>50%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>0.809</td><td>0.831</td><td>0.170</td></tr><tr><td>90%</td><td>1.0</td><td>1.0</td><td>0.0</td><td>0.907</td><td>0.942</td><td>0.112</td></tr></table>
162
+
163
+ Table 3. Detection results for different edits applied before detection. Acc. (TPR/FPR) is the accuracy (and TPR/FPR) obtained for the threshold that gives best accuracy on a balanced set of augmented samples. AUC is the area under the ROC curve.
164
+
165
+ <table><tr><td rowspan="2">Edit</td><td colspan="2">AudioSeal (Ours)</td><td colspan="2">WavMark</td></tr><tr><td>Acc. TPR/FPR</td><td>AUC</td><td>Acc. TPR/FPR</td><td>AUC</td></tr><tr><td>None</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>Bandpass</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>Highpass</td><td>0.61 0.82/0.60</td><td>0.61</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>Lowpass</td><td>0.99 0.99/0.00</td><td>0.99</td><td>0.50 1.00/1.00</td><td>0.50</td></tr><tr><td>Boost</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>Duck</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>Echo</td><td>1.00 1.00/0.00</td><td>1.00</td><td>0.93 0.89/0.03</td><td>0.98</td></tr><tr><td>Pink</td><td>1.00 1.00/0.00</td><td>1.00</td><td>0.88 0.81/0.05</td><td>0.93</td></tr><tr><td>White</td><td>0.91 0.86/0.04</td><td>0.95</td><td>0.50 0.54/0.54</td><td>0.50</td></tr><tr><td>Fast (1.25x)</td><td>0.99 0.99/0.00</td><td>1.00</td><td>0.50 0.01/0.00</td><td>0.15</td></tr><tr><td>Smooth</td><td>0.99 0.99/0.00</td><td>1.00</td><td>0.94 0.93/0.04</td><td>0.98</td></tr><tr><td>Resample</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>AAC</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 1.00/0.00</td><td>1.00</td></tr><tr><td>MP3</td><td>1.00 1.00/0.00</td><td>1.00</td><td>1.00 0.99/0.00</td><td>0.99</td></tr><tr><td>EnCodec</td><td>0.98 0.98/0.01</td><td>1.00</td><td>0.51 0.52/0.50</td><td>0.50</td></tr><tr><td>Average</td><td>0.96 0.98/0.04</td><td>0.97</td><td>0.85 0.85/0.14</td><td>0.84</td></tr></table>
166
+
167
+ and re-synthesized. It suggests that the classifier is detecting vocoder artifacts, since the re-synthesized samples are sometimes wrongly flagged. The classification performance quickly decreases as the quality of the AI-generated sample increases (when the input is less masked). On the other hand, our proactive detection does not rely on model-specific artifacts but on the watermark presence. This allows for perfect detection over all the audio clips.
168
+
169
+ # 5.2. Comparison with watermarking
170
+
171
+ We evaluate the robustness of the detection on a wide range of audio editing operations: time modification (faster, resample), filtering (bandpass, highpass, lowpass), audio effects (echo, boost audio, duck audio), noise (pink noise, random noise), and compression (MP3, AAC, EnCodec).
172
+
173
+ These attacks cover a wide range of transformations that are commonly used in audio editing software. For all edits except EnCodec compression, evaluation with parameters in the training range would be perfect. In order to show generalization, we chose stronger parameter to the attacks than those used during training (details in App. D.2).
174
+
175
+ Detection is done on 10k ten-seconds audios from our VoxPopuli validation set. For each edit, we first build a balanced dataset made of the 10k watermarked/ 10k non-watermarked edited audio clips. We quantify the performance by adjusting the threshold of the detection score, selecting the value that maximizes accuracy (we provide corresponding TPR and FPR at this threshold). The ROC AUC (Area Under the Curve of the Receiver Operating Characteristics) gives a global measure of performance over all threshold levels, and captures the TPR/FPR trade-off. To adapt data-hiding methods (e.g. WavMark) for proactive detection, we embed a binary message (chosen randomly beforehand) in the generated speech before release. The detection score is then computed as the Hamming distance between the original message and the one extracted from the scrutinized audio.
176
+
177
+ We observe in Tab. 3 that AudioSeal is overall more robust, with an average AUC of 0.97 vs. 0.84 for WavMark. The performance for lowpass and highpass filters indicates that AudioSeal embeds watermarks neither in the low nor in the high frequencies (WavMark focuses on high frequencies). We give results on more augmentations in App. C.5.
178
+
179
+ Generalization. We evaluate how AudioSeal generalizes on various domains and languages. Specifically, we use the datasets ASVspoof (Liu et al., 2023b) and FakeAVCeleb (Khalid et al., 2021). Additionally, we translate speech samples from a subset of the Expresso dataset (Nguyen et al., 2023) (studio-quality recordings) using the SeamlessExpressive translation model (Seamless Communication et al., 2023). We select four target languages: Mandarin Chinese (CMN), French (FR), Italian (IT), and Spanish (SP). We also evaluate on non-speech AI-generated audios: music from MusicGen (Copet et al., 2023) and environmental sounds from AudioGen (Kreuk et al., 2023). Results are very similar to our in-domain test set and can be found in App. C.4.
180
+
181
+ # 5.3. Localization
182
+
183
+ We evaluate localization with the sample-level detection accuracy, i.e. the proportion of correctly labeled samples, and the Intersection over Union (IoU). The latter is defined as the intersection between the predicted and the ground truth detection masks (1 when watermarked, 0 otherwise), divided by their union. IoU is a more relevant evaluation of the localization of short watermarks in a longer audio.
184
+
185
+ This evaluation is carried out on the same audio clips as for detection. For each one of them, we watermark a randomly placed segment of varying length. Localization with WavMark is a brute-force detection: a window of 1s slides over the 10s of speech with the default shift value of 0.05s. The Hamming distance between the 16 pattern bits is used as the detection score. Whenever a window triggers a positive, we label its 16k samples as watermarked in the detection mask in $\{0,1\}^t$ .
186
+
187
+ Figure 5 plots the sample-level accuracy and IoU for different proportions of watermarked speech in the audio clip. AudioSeal achieves an IoU of 0.99 when just one second of speech is AI-manipulated, compared to WavMark's 0.35. Moreover, AudioSeal allows for precise detection of minor audio alterations: it can pinpoint AI-generated segments in audio down to the sample level (usually 1/16k sec), while the concurrent WavMark only provides one-second resolution and therefore lags behind in terms of IoU. This is especially relevant for speech samples, where a simple word modification may greatly change meaning.
188
+
189
+ # 5.4. Attribution
190
+
191
+ Given an audio clip, the objective is now to find if any of $N$ versions of our model generated it (detection), and if so, which one (identification). For evaluation, we create $N' = 100$ random 16-bits messages and use them to watermark 1k audio clips, each consisting of 5 seconds of speech (not 10s to reduce compute needs). This results in a total of 100k audios. For WavMark, the first 16 bits (/32) are fixed and the detection score is the number of well decoded pattern bits, while the second half of the payload hides the model version. An audio clip is flagged if the average output of the detector exceeds a threshold, corresponding to $\mathrm{FPR} = 10^{-3}$ . Next, we calculate the Hamming distance between the decoded watermark and all $N$ original messages.
192
+
193
+ ![](images/d9f428c60268b1a8dc6636be9cf2b06890f09ee11321e2c5846585370b8ada24.jpg)
194
+ Figure 5. Localization results across different durations of watermarked audio signals in terms of Sample-Level Accuracy and Intersection Over Union (IoU) metrics ( $\uparrow$ is better).
195
+
196
+ Table 4. Attribution results. We report the accuracy of the attribution (Acc.) and false attribution rate (FAR). Detection is done at $\mathrm{FPR} = 10^{-3}$ and attribution matches the decoded message to one of $N$ versions. We report averaged results over the edits of Tab. 3.
197
+
198
+ <table><tr><td></td><td>N</td><td>1</td><td>10</td><td>10²</td><td>10³</td><td>10⁴</td></tr><tr><td rowspan="2">FAR (%) ↓</td><td>WavMark</td><td>0.0</td><td>0.20</td><td>0.98</td><td>1.87</td><td>4.02</td></tr><tr><td>AudioSeal</td><td>0.0</td><td>2.52</td><td>6.83</td><td>8.96</td><td>11.84</td></tr><tr><td rowspan="2">Acc. (%) ↑</td><td>WavMark</td><td>58.4</td><td>58.2</td><td>57.4</td><td>56.6</td><td>54.4</td></tr><tr><td>AudioSeal</td><td>68.2</td><td>65.4</td><td>61.4</td><td>59.3</td><td>56.4</td></tr></table>
199
+
200
+ The message with the smallest Hamming distance is selected. It's worth noting that we can simulate $N > N'$ models by adding extra messages. This may represent versions that have not generated any sample.
201
+
202
+ False Attribution Rate (FAR) is the fraction of wrong attribution among the detected audiios while the attribution accuracy is the proportion of detections followed by a correct attributions over all audiios. AudioSeal has a higher FAR but overall gives a better accuracy, which is what ultimately matters. In summary, decoupling detection and attribution achieves better detection rate and makes the global accuracy better, at the cost of occasional false attributions.
203
+
204
+ # 5.5. Efficiency Analysis
205
+
206
+ To highlight the efficiency of AudioSeal, we conduct a performance analysis and compare it with WavMark. We apply the watermark generator and detector of both models on a dataset of 500 audio segments ranging in length from 1 to 10 seconds, using a single Nvidia Quadro GP100 GPU. The results are displayed in Fig. 6 and Tab. 5. In terms of generation, AudioSeal is $14\mathrm{x}$ faster than WavMark. For detection, AudioSeal outperforms WavMark with two orders of magnitude faster performance on average, notably $485\mathrm{x}$ faster in scenarios where there is no
207
+
208
+ ![](images/82898d37be4701eae0451adb165d2c7ce73825a6edf04610a991d6cee9c60697.jpg)
209
+ Figure 6. Mean runtime ( $\downarrow$ is better). AudioSeal is one order of magnitude faster for watermark generation and two orders of magnitude faster for watermark detection for the same audio input. See Appendix C.1 for full comparison.
210
+
211
+ watermark (Tab. 5). This remarkable speed increase is due to our model's unique localized watermark design, which bypasses the need for watermark synchronization (recall that WavMark relies on 20 pass forwards for a one-second snippet). AudioSeal's detector provides detection logits for each input sample directly with only one pass to the detector, significantly enhancing the detection's computational efficiency. This makes our system highly suitable for real-time and large-scale applications.
212
+
213
+ # 6. Adversarial Watermark Removal
214
+
215
+ We now examine more damaging deliberate attacks, where attackers might either "forge" the watermark by adding it to authentic samples (to overwhelm detection systems) or "remove" it to avoid detection. Our findings suggest that in order to maintain the effectiveness of watermarking against such adversaries, the code for training watermarking models and the awareness that published audios are watermarked can be made public. However, the detector's weights should be kept confidential.
216
+
217
+ We focus on watermark-removal attacks and consider three types of attacks depending on the adversary's knowledge:
218
+
219
+ - White-box: the adversary has access to the detector (e.g. because of a leak), and performs a gradient-based adversarial attack against it. The optimization objective is to minimize the detector's output.
220
+ - Semi black-box: the adversary does not have access to any weights, but is able to re-train generator/detector pairs with the same architectures on the same dataset. They perform the same gradient-based attack as before, but using the new detector as proxy for the original one.
221
+ - Black-box: the adversary does not have any knowledge on the watermarking algorithm being used, but has ac
222
+
223
+ ![](images/4acd5f64daa9912c937abdd56a1c345200122be55d4ab3c28f42a011bef9e96b.jpg)
224
+ Figure 7. Watermark-removal attacks. PESQ is measured between attacked audios and genuine ones (PESQ $< 4$ strongly degrades the audio quality). The more knowledge the attacker has over the watermarking algorithm, the better the attack is.
225
+
226
+ cess to an API that produces watermarked samples, and to negative speech samples from any public dataset. They first collect samples and train a classifier to discriminate between watermarked and not-watermarked. They attack this classifier as if it were the true detector.
227
+
228
+ For every scenario, we watermark 1k samples of 5 seconds, then attack them. The gradient-based attack optimizes an adversarial noise added to the audio, with 100 steps of Adam. During the optimization, we control the norm of the noise to trade off attack strength and audio quality. When training the classifier for the black-box attack, we use $80\mathrm{k} / 80\mathrm{k}$ watermarked/genuine samples of 8 seconds and make sure the classifier has $100\%$ detection accuracy on the validation set. More details in App. D.5.
229
+
230
+ Figure 7 contrasts various attacks at different intensities, using Gaussian noise as a reference. The white-box attack is by far the most effective one, increasing the detection error by around $80\%$ , while maintaining high audio quality $(\mathrm{PESQ} > 4)$ . Other attacks are less effective, requiring significant audio quality degradation to achieve $50\%$ increase the detection error, though they are still more effective than random noise addition. In summary, the more is disclosed about the watermarking algorithm, the more vulnerable it is. The effectiveness of these attacks is limited as long as the detector remains confidential.
231
+
232
+ # 7. Conclusion
233
+
234
+ In this paper, we introduced AudioSeal, a proactive method for the detection, localization, and attribution of AI-generated speech. AudioSeal revamps the design of audio watermarking to be specific to localized detection rather than data hiding. It is based on a generator/detector architecture that can generate and extract watermarks at the audio sample level. This removes the dependency on slow brute force algorithms, traditionally used to encode and decode audio watermarks. The networks are jointly trained through a novel loudness loss, differentiable augmentations and masked sample level detection losses. As a result, AudioSeal achieves state-of-the-art robustness to various audio editing techniques, very high precision in localization, and orders of magnitude faster runtime than methods relying on synchronization. Through an empirical analysis of possible adversarial attacks, we conclude that for watermarking to still be an effective mitigation, the detector's weights have to be kept private - otherwise adversarial attacks might be easily forged. A key advantage of AudioSeal is its practical applicability. It stands as a ready-to-deploy solution for watermarking in voice synthesis APIs. This is pivotal for large-scale content provenance on social media and for detecting and eliminating incidents, enabling swift action on instances like the US voters' deepfake case (Murphy et al., 2024) long before they spread.
235
+
236
+ # Impact Statement
237
+
238
+ This research aims to improve transparency and traceability in AI-generated content, but watermarking in general can have a set of potential misuses such as government surveillance of dissidents or corporate identification of whistle blowers. Additionally, the watermarking technology might be misused to enforce copyright on user-generated content, and its ability to detect AI-generated audio could increase skepticism about digital communication authenticity, potentially undermining trust in digital media and AI. However, despite these risks, ensuring the detectability of AI-generated content is important, along with advocating for robust security measures and legal frameworks to govern the technology's use.
239
+
240
+ # References
241
+
242
+ Chinese ai governance rules, 2023. URL http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm. Accessed on August 29, 2023.
243
+ European ai act, 2023. URL https://artificialintelligenceact.eu/. Accessed on August 29, 2023.
244
+ Aaronson, S. and Kirchner, H. Watermarking gpt outputs, 2023. URL https://www.scottaarson.com/talks/watermark.ppt.
245
+ AlBadawy, E. A., Lyu, S., and Farid, H. Detecting a-synthesized speech using bispectral analysis. In CVPR workshops, pp. 104-109, 2019.
246
+ Arik, S., Chen, J., Peng, K., Ping, W., and Zhou, Y. Neural voice cloning with a few samples. Advances in neural information processing systems, 31, 2018.
247
+ Bai, H., Zheng, R., Chen, J., Ma, M., Li, X., and Huang, L. A³t: Alignment-aware acoustic and text pretraining for speech synthesis and editing. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvári, C., Niu, G., and Sabato, S. (eds.), International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pp. 1399-1411. PMLR, 2022. URL https://proceedings.mlr.press/v162/bai22d.html.
248
+ Barrington, S., Barua, R., Koorma, G., and Farid, H. Single and multi-speaker cloned voice detection: From perceptual to learned features. arXiv preprint arXiv:2307.07683, 2023.
249
+ Borrelli, C., Bestagini, P., Antonacci, F., Sarti, A., and Tubaro, S. Synthetic speech detection through short
250
+
251
+ term and long-term prediction traces. EURASIP Journal on Information Security, 2021(1):1-14, 2021.
252
+ Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O., Sharifi, M., Roblek, D., Teboul, O., Grangier, D., Tagliasacchi, M., and Zeghidour, N. Audiom: A language modeling approach to audio generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 31:2523-2533, 2022.
253
+ Borsos, Z., Sharifi, M., Vincent, D., Kharitonov, E., Zeghidour, N., and Tagliasacchi, M. Soundstorm: Efficient parallel audio generation. arXiv preprint arXiv:2305.09636, 2023.
254
+ Casanova, E., Weber, J., Shulby, C. D., Junior, A. C., Gölge, E., and Ponti, M. A. Yourtts: Towards zero-shot multi-speaker tts and zero-shot voice conversion for everyone. In International Conference on Machine Learning, pp. 2709-2720. PMLR, 2022.
255
+ Chen, G., Wu, Y., Liu, S., Liu, T., Du, X., and Wei, F. Wavmark: Watermarking for audio generation. arXiv preprint arXiv:2308.12770, 2023.
256
+ Copet, J., Kreuk, F., Gat, I., Remez, T., Kant, D., Synnaeve, G., Adi, Y., and Defossez, A. Simple and controllable music generation. arXiv preprint arXiv:2306.05284, 2023.
257
+ Defossez, A., Synnaeve, G., and Adi, Y. Real time speech enhancement in the waveform domain, 2020.
258
+ Défossez, A., Copet, J., Synnaeve, G., and Adi, Y. High fidelity neural audio compression. arXiv preprint arXiv:2210.13438, 2022.
259
+ Fernandez, P., Chaffin, A., Tit, K., Chappelier, V., and Furon, T. Three bricks to consolidate watermarks for large language models. 2023 IEEE International Workshop on Information Forensics and Security (WIFS), 2023a.
260
+ Fernandez, P., Couairon, G., Jégou, H., Douze, M., and Furon, T. The stable signature: Rooting watermarks in latent diffusion models. ICCV, 2023b.
261
+ Furon, T. A constructive and unifying framework for zero-bit watermarking. IEEE Transactions on Information Forensics and Security, 2(2):149-163, 2007.
262
+ Gemmeke, J. F., Ellis, D. P., Freedman, D., Jansen, A., Lawrence, W., Moore, R. C., Plakal, M., and Ritter, M. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 776-780. IEEE, 2017.
263
+
264
+ Gritsenko, A., Salimans, T., van den Berg, R., Snoek, J., and Kalchbrenner, N. A spectral energy distance for parallel speech synthesis. Advances in Neural Information Processing Systems, 33:13062-13072, 2020.
265
+ Hines, A., Skoglund, J., Kokaram, A., and Harte, N. Visqol: The virtual speech quality objective listener. In IWAENC 2012; international workshop on acoustic signal enhancement, pp. 1-4. VDE, 2012.
266
+ Hsu, W.-N., Akinyemi, A., Rakotoarison, A., Tjandra, A., Vyas, A., Guo, B., Akula, B., Shi, B., Ellis, B., Cruz, I., Wang, J., Zhang, J., Williamson, M., Le, M., Moritz, R., Adkins, R., Ngan, W., Zhang, X., Yungster, Y., and Wu, Y.-C. Audiobox: Unified audio generation with natural language prompts. arXiv preprint arXiv:...., 2023.
267
+ Janicki, A. Spoofing countermeasure based on analysis of linear prediction error. In Sixteenth annual conference of the international speech communication association, 2015.
268
+ Juvela, L. and Wang, X. Collaborative watermarking for adversarial speech synthesis. arXiv preprint arXiv:2309.15224, 2023.
269
+ Kalantari, N. K., Akhaee, M. A., Ahadi, S. M., and Amindavar, H. Robust multiplicative patchwork method for audio watermarking. IEEE Trans. Speech Audio Process., 17(6):1133-1141, 2009. doi: 10.1109/TASL.2009.2019259. URL https://doi.org/10.1109/TASL.2009.2019259.
270
+ Khalid, H., Tariq, S., and Woo, S. S. Fakeavceleb: A novel audio-video multimodal deepfake dataset, 2021.
271
+ Kharitonov, E., Vincent, D., Borsos, Z., Marinier, R., Girgin, S., Pietquin, O., Sharifi, M., Tagliasacchi, M., and Zeghidour, N. Speak, read and prompt: High-fidelity text-to-speech with minimal supervision. ArXiv, abs/2302.03540, 2023.
272
+ Kim, C., Min, K., Patel, M., Cheng, S., and Yang, Y. Wouaf: Weight modulation for user attribution and fingerprinting in text-to-image diffusion models. arXiv preprint arXiv:2306.04744, 2023.
273
+ Kim, J., Kong, J., and Son, J. Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech. In International Conference on Machine Learning, pp. 5530-5540. PMLR, 2021.
274
+ Kirchenbauer, J., Geiping, J., Wen, Y., Katz, J., Miers, I., and Goldstein, T. A watermark for large language models. arXiv preprint arXiv:2301.10226, 2023.
275
+ Kirovski, D. and Attias, H. Audio watermark robustness to desynchronization via beat detection. In Petitcolas,
276
+
277
+ F. A. P. (ed.), Information Hiding, pp. 160-176, Berlin, Heidelberg, 2003. Springer Berlin Heidelberg. ISBN 978-3-540-36415-3.
278
+ Kirovski, D. and Malvar, H. S. Spread-spectrum watermarking of audio signals. IEEE Trans. Signal Process., 51(4):1020-1033, 2003. doi: 10.1109/TSP.2003.809384. URL https://doi.org/10.1109/TSP.2003.809384.
279
+ Kong, J., Kim, J., and Bae, J. Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H. (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 17022-17033. Curran Associates, Inc., 2020.
280
+ Kreuk, F., Synnaeve, G., Polyak, A., Singer, U., Defossez, A., Copet, J., Parikh, D., Taigman, Y., and Adi, Y. Audiogen: Textually guided audio generation. In The Eleventh International Conference on Learning Representations, 2023.
281
+ Kumar, K., Kumar, R., de Boissiere, T., Gestin, L., Teoh, W. Z., Sotelo, J. M. R., de Brébisson, A., Bengio, Y., and Courville, A. C. Melgan: Generative adversarial networks for conditional waveform synthesis. In Neural Information Processing Systems, 2019.
282
+ Kumar, R., Seetharaman, P., Luebs, A., Kumar, I., and Kumar, K. High-fidelity audio compression with improved rvqgan. ArXiv, abs/2306.06546, 2023.
283
+ Le, M., Vyas, A., Shi, B., Karrer, B., Sari, L., Moritz, R., Williamson, M., Manohar, V., Adi, Y., Mahadeokar, J., et al. Voicebox: Text-guided multilingual universal speech generation at scale. arXiv preprint arXiv:2306.15687, 2023.
284
+ Lie, W. and Chang, L. Robust and high-quality time-domain audio watermarking based on low-frequency amplitude modification. IEEE Trans. Multim., 8 (1):46-59, 2006. doi: 10.1109/TMM.2005.861292. URL https://doi.org/10.1109/TMM.2005.861292.
285
+ Liu, C., Zhang, J., Fang, H., Ma, Z., Zhang, W., and Yu, N. Dear: A deep-learning-based audio re-recording resilient watermarking. In Williams, B., Chen, Y., and Neville, J. (eds.), Thirty-Seventh AAAI Conference on Artificial Intelligence, AAAI 2023, Thirty-Fifth Conference on Innovative Applications of Artificial Intelligence, IAAI 2023, Thirteenth Symposium on Educational Advances in Artificial Intelligence, EAAI 2023, Washington, DC, USA, February 7-14, 2023, pp. 13201-13209. AAAI Press, 2023a. doi: 10.1609/aaai.v37i11.26550.
286
+
287
+ Liu, X., Wang, X., Sahidullah, M., Patino, J., Delgado, H., Kinnunen, T., Todisco, M., Yamagishi, J., Evans, N., Nautsch, A., et al. Asvspoof 2021: Towards spoofed and deepfake speech detection in the wild. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023b.
288
+ Liu, Z., Huang, Y., and Huang, J. Patchwork-based audio watermarking robust against de-synchronization and recapturing attacks. IEEE Trans. Inf. Forensics Secur., 14(5):1171-1180, 2019. doi: 10.1109/TIFS.2018.2871748. URL https://doi.org/10.1109/TIFS.2018.2871748.
289
+ Luo, Y. and Mesgarani, N. Conv-tasnet: Surpassing ideal time-frequency magnitude masking for speech separation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 27(8):1256-1266, 2019. doi: 10.1109/TASLP.2019.2915167.
290
+ Luo, Y., Chen, Z., and Yoshioka, T. Dual-path rnn: efficient long sequence modeling for time-domain single-channel speech separation. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 46-50. IEEE, 2020.
291
+ Müller, N. M., Czempin, P., Dieckmann, F., Froghyar, A., and Böttinger, K. Does audio deepfake detection generalize? arXiv preprint arXiv:2203.16263, 2022.
292
+ Murphy, M., Metz, R., Bergen, M., and Bloomberg. Biden audio deepfake spurs ai startup elevenlabs—valued at $1.1 billion—to ban account: 'we're going to see a lot more of this'. Fortune, January 2024. URL https://fortune.com/2024/01/27/ai-firm-elevenlabs-bans-account-for-biden-audio-deepfake/.
293
+ Natgunanathan, I., Xiang, Y., Rong, Y., Zhou, W., and Guo, S. Robust patchwork-based embedding and decoding scheme for digital audio watermarking. IEEE Trans. Speech Audio Process., 20(8):2232-2239, 2012. doi: 10.1109/TASL.2012.2199111. URL https://doi.org/10.1109/TASL.2012.2199111.
294
+ Nguyen, T. A., Hsu, W.-N., d'Avirro, A., Shi, B., Gat, I., Fazel-Zarani, M., Remez, T., Copet, J., Synnaeve, G., Hassid, M., et al. Expresso: A benchmark and analysis of discrete expressive speech resynthesis. arXiv preprint arXiv:2308.05725, 2023.
295
+ Pavlović, K., Kovačević, S., Djurovic, I., and Wojciechowski, A. Robust speech watermarking by a jointly trained embedder and detector using a dnn. Digital Signal Processing, 122:103381, 2022.
296
+ Qu, X., Yin, X., Wei, P., Lu, L., and Ma, Z. Audioqr: Deep neural audio watermarks for qr code. *IJCAI*, 2023.
297
+
298
+ Ren, Y., Zhu, H., Zhai, L., Sun, Z., Shen, R., and Wang, L. Who is speaking actually? robust and versatile speaker traceability for voice conversion. arXiv preprint arXiv:2305.05152, 2023.
299
+ Rix, A. W., Beerends, J. G., Hollier, M. P., and Hekstra, A. P. Perceptual evaluation of speech quality (pesq)-a new method for speech quality assessment of telephone networks and codes. In 2001 IEEE international conference on acoustics, speech, and signal processing. Proceedings (Cat. No. 01CH37221), volume 2, pp. 749-752. IEEE, 2001.
300
+ Sahidullah, M., Kinnunen, T., and Hanilci, C. A comparison of features for synthetic speech detection. ISCA (the International Speech Communication Association), 2015.
301
+ Schnupp, J., Nelken, I., and King, A. Auditory neuroscience: Making sense of sound. MIT press, 2011.
302
+ Seamless Communication, Barrault, L., Chung, Y.-A., Meglioli, M. C., Dale, D., Dong, N., Duppenthaler, M., Duquenne, P.-A., Ellis, B., Elsahar, H., Haaheim, J., Hoffman, J., Hwang, M.-J., Inaguma, H., Klaiber, C., Kulikov, I., Li, P., Licht, D., Maillard, J., Mavlyutov, R., Rakotoarison, A., Sadagopan, K. R., Ramakrishnan, A., Tran, T., Wenzek, G., Yang, Y., Ye, E., Evtimov, I., Fernandez, P., Gao, C., Hansanti, P., Kalbassi, E., Kallet, A., Kozhevnikov, A., Mejia, G., Roman, R. S., Touret, C., Wong, C., Wood, C., Yu, B., Andrews, P., Balioglu, C., Chen, P.-J., Costa-jussa, M. R., Elbayad, M., Gong, H., Guzmán, F., Heffernan, K., Jain, S., Kao, J., Lee, A., Ma, X., Mourachko, A., Peloquin, B., Pino, J., Popuri, S., Ropers, C., Saleem, S., Schwenk, H., Sun, A., Tomasello, P., Wang, C., Wang, J., Wang, S., and Williamson, M. Seamless: Multilingual expressive and streaming speech translation. 2023.
303
+ Series, B. Method for the subjective assessment of intermediate quality level of audio systems. International Telecommunication Union Radiocommunication Assembly, 2014.
304
+ Shen, K., Ju, Z., Tan, X., Liu, Y., Leng, Y., He, L., Qin, T., Zhao, S., and Bian, J. Naturalspeech 2: Latent diffusion models are natural and zero-shot speech and singing synthesizers. CoRR, abs/2304.09116, 2023. doi: 10.48550/ARXIV.2304.09116. URL https://doi.org/10.48550/arXiv.2304.09116.
305
+ Su, Z., Zhang, G., Yue, F., Chang, L., Jiang, J., and Yao, X. Snr-constrained heuristics for optimizing the scaling parameter of robust audio watermarking. IEEE Trans. Multim., 20(10):2631-2644, 2018. doi: 10.1109/TMM.2018.2812599. URL https://doi.org/10.1109/TMM.2018.2812599.
306
+
307
+ Taal, C. H., Hendriks, R. C., Heusdens, R., and Jensen, J. A short-time objective intelligibility measure for time-frequency weighted noisy speech. In 2010 IEEE international conference on acoustics, speech and signal processing, pp. 4214-4217. IEEE, 2010.
308
+ Tai, Y.-Y. and Mansour, M. F. Audio watermarking over the air with modulated self-correlation. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2452-2456. IEEE, 2019.
309
+ telecommunication Union, I. Algorithms to measure audio programme loudness and true-peak audio level. Series, BS, 2011.
310
+ USA. Ensuring safe, secure, and trustworthy ai. https://www.whitehouse.gov/wp-content/uploads/2023/07/Ensuring-Safe-Secure-and-Trustworthy-AI.pdf, July 2023. Accessed: [july 2023].
311
+ van den Oord, A., Dieleman, S., Zen, H., Simonyan, K., Vinyals, O., Graves, A., Kalchbrenner, N., Senior, A., and Kavukcuoglu, K. Wavenet: A generative model for raw audio. In Arxiv, 2016.
312
+ Wang, C., Rivière, M., Lee, A., Wu, A., Talnikar, C., Haziza, D., Williamson, M., Pino, J. M., and Dupoux, E. Voxpopuli: A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation. In Zong, C., Xia, F., Li, W., and Navigli, R. (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, pp. 993-1003. Association for Computational Linguistics, 2021. doi: 10.18653/V1/2021.ACL-LONG.80. URL https://doi.org/10.18653/v1/2021.acl-long.80.
313
+ Wang, C., Chen, S., Wu, Y., Zhang, Z., Zhou, L., Liu, S., Chen, Z., Liu, Y., Wang, H., Li, J., et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023.
314
+ Wen, Y., Kirchenbauer, J., Geiping, J., and Goldstein, T. Tree-ring watermarks: Fingerprints for diffusion images that are invisible and robust. arXiv preprint arXiv:2305.20030, 2023.
315
+ Wu, S., Liu, J., Huang, Y., Guan, H., and Zhang, S. Adversarial audio watermarking: Embedding watermark into deep feature. In 2023 IEEE International Conference on Multimedia and Expo (ICME), pp. 61-66. IEEE, 2023.
316
+
317
+ Xiang, Y., Natgunanathan, I., Guo, S., Zhou, W., and Nahavandi, S. Patchwork-based audio watermarking method robust to de-synchronization attacks. IEEE ACM Trans. Audio Speech Lang. Process., 22(9):1413-1423, 2014. doi: 10.1109/TASLP.2014.2328175. URL https://doi.org/10.1109/TASLP.2014.2328175.
318
+ Xiang, Y., Natgunanathan, I., Peng, D., Hua, G., and Liu, B. Spread spectrum audio watermarking using multiple orthogonal PN sequences and variable embedding strengths and polarities. IEEE ACM Trans. Audio Speech Lang. Process., 26(3):529-539, 2018. doi: 10.1109/TASLP.2017.2782487. URL https://doi.org/10.1109/TASLP.2017.2782487.
319
+ Yang, Y.-Y., Hira, M., Ni, Z., Chourdia, A., Astafurov, A., Chen, C., Yeh, C.-F., Puhrsch, C., Pollack, D., Genzel, D., Greenberg, D., Yang, E. Z., Lian, J., Mahadeokar, J., Hwang, J., Chen, J., Goldsborough, P., Roy, P., Narethiran, S., Watanabe, S., Chintala, S., Quenneville-Bélair, V., and Shi, Y. Torchaudio: Building blocks for audio and speech processing. arXiv preprint arXiv:2110.15018, 2021.
320
+ Yin, P., Lyu, J., Zhang, S., Osher, S., Qi, Y., and Xin, J. Understanding straight-through estimator in training activation quantized neural nets. arXiv preprint arXiv:1903.05662, 2019.
321
+ Yu, N., Skripniuk, V., Abdelnabi, S., and Fritz, M. Artificial fingerprinting for generative models: Rooting deepfake attribution in training data. In Proceedings of the IEEE/CVF International conference on computer vision, pp. 14448-14457, 2021a.
322
+ Yu, N., Skripniuk, V., Chen, D., Davis, L. S., and Fritz, M. Responsible disclosure of generative models using scalable fingerprinting. In International Conference on Learning Representations, 2021b.
323
+ Zeghidour, N., Luebs, A., Omran, A., Skoglund, J., and Tagliasacchi, M. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:495-507, 2022. doi: 10.1109/TASLP.2021.3129994.
324
+ Zhang, C., Yu, C., and Hansen, J. H. An investigation of deep-learning frameworks for speaker verification antispoofing. IEEE Journal of Selected Topics in Signal Processing, 11(4):684-694, 2017.
325
+
326
+ # A. Extended related work
327
+
328
+ Zero-shot TTS and vocal style preservation. There has been an emergence of models that imitate or preserve vocal style using only a small amount of data. One key example is zero-shot text-to-speech (TTS) models. These models create speech in vocal styles they haven't been specifically trained on. For instance, models like VALL-E (Wang et al., 2023), YourTTS (Casanova et al., 2022), Natural-Speech2 (Shen et al., 2023) synthesize high-quality personalized speech with only a 3-second recording. On top, zero-shot TTS models like Voicebox (Le et al., 2023), $\mathrm{A}^{3}\mathrm{T}$ (Bai et al., 2022) and Audiobox (Hsu et al., 2023), with their non-autoregressive inference, perform tasks such as text-guided speech infilling, where the goal is to generate masked speech given its surrounding audio and text transcript. It makes them a powerful tool for speech manipulation. In the context of speech machine translation, SeamlessExpressive (Seamless Communication et al., 2023) is a model that not only translates speech, but also retains the speaker's unique vocal style and emotional inflections, thereby broadening the capabilities of such systems.
329
+
330
+ Audio generation and compression. Early models are autoregressive like WaveNet (van den Oord et al., 2016), with dilated convolutions and waveform reconstruction as objective. Subsequent approaches explore different audio losses, such as scale-invariant signal-to-noise ratio (SISNR) (Luo & Mesgarani, 2019) or Mel spectrogram distance (Defossez et al., 2020). None of these objectives are deemed ideal for audio quality, leading to the adoption of adversarial models in HiFi-GAN (Kong et al., 2020) or MelGAN (Kumar et al., 2019). Our training objectives and architectures are inspired by more recent neural audio compression models (Defossez et al., 2022; Kumar et al., 2023; Zeghidour et al., 2022), that focus on high-quality waveform generation and integrate a combination of these diverse objectives in their training processes.
331
+
332
+ Synchronization and Detection speed. To accurately extract watermarks, synchronization between the encoder and decoder is crucial. However, this can be disrupted by desynchronization attacks such as time and pitch scaling. To address this issue, various techniques have been developed. One approach is block repetition, which repeats the watermark signal along both the time and frequency domains (Kirovski & Malvar, 2003; Kirovski & Attias, 2003). Another method involves implanting synchronization bits into the watermarked signal (Xiang et al., 2014). During decoding, these synchronization bits serve to improve synchronization and mitigate the effects of de-synchronization attacks. Detection of those synchronization bits for watermark detection usually involves exhaustive search using brute force algorithms, which significantly slows down decoding time.
333
+
334
+ # B. False Positive Rates - Theory and Practice
335
+
336
+ Theoretical FPR. When doing multi-bit watermarking, previous works (Yu et al., 2021a; Kim et al., 2023; Fernandez et al., 2023b; Chen et al., 2023) usually extract the message $m'$ from the content $x$ and compare it to the original binary signature $m \in \{0,1\}^k$ embedded in the speech sample. The detection test relies on the number of matching bits $M(m,m')$ :
337
+
338
+ $$
339
+ \text {i f} M \left(m, m ^ {\prime}\right) \geq \tau \text {w h e r e} \tau \in \{0, \dots , k \}, \tag {4}
340
+ $$
341
+
342
+ then the audio is flagged. This provides theoretical guarantees over the false positive rates.
343
+
344
+ Formally, the statistical hypotheses are $H_{1}$ : "The audio signal $x$ is watermarked", and the null hypothesis $H_{0}$ : "The audio signal $x$ is genuine". Under $H_{0}$ (i.e., for unmarked audio), if the bits $m_{1}', \ldots, m_{k}'$ are independent and identically distributed (i.i.d.) Bernoulli random variables with parameter 0.5, then $M(m, m')$ follows a binomial distribution with parameters $(k, 0.5)$ . The False Positive Rate (FPR) is defined as the probability that $M(m, m')$ exceeds a given threshold $\tau$ . A closed-form expression can be given using the regularized incomplete beta function $I_{x}(a; b)$ (linked to the CDF of the binomial distribution):
345
+
346
+ $$
347
+ \operatorname {F P R} (\tau) = \mathbb {P} (M \geq \tau | H _ {0}) = I _ {1 / 2} (\tau , k - \tau + 1). \tag {5}
348
+ $$
349
+
350
+ Empirical study. We empirically study the FPR of WavMark-based detection on our validation dataset. We use the same parameters as in the original paper, i.e. $k = 32$ -bits are extracted from 1s speech samples. We first extract the soft bits (before thresholding) from 10k genuine samples and plot the histogram of the scores in Fig. 8 (left). We should observe a Gaussian distribution with mean 0.5, while empirically the scores are centered around 0.38. This makes the decision heavily biased towards bit 0 on genuine samples. It is therefore impossible to theoretically set the FPR since this would largely underestimate the actual one.
351
+
352
+ ![](images/35a89af217c109c72ae5433082995560e1d8d05c1c7f9ff547056fc3037dca89.jpg)
353
+ Figure 8. (Left) Histogram of scores output by WavMark's extractor on 10k genuine samples. (Right) Empirical and theoretical FPR when the chosen hidden message is all 0.
354
+
355
+ ![](images/c9cc72cecadc0953d9c250d6bf6e8046a432ee8d59953d6fb353e99867c85aca.jpg)
356
+
357
+ Table 5. The average runtime (ms) per sample of our proposed AudioSeal model against the state-of-the-art Wavmark(Chen et al., 2023) method. Our experiments were conducted on a dataset of audio segments spanning 1 sec to 10 secs, using a single Nvidia Quadro GP100 GPU. The results, displayed in the table, demonstrate substantial speed enhancements for both Watermark Generation and Detection with and without the presence of a watermark. Notably, for watermark detection, AudioSeal is $485 \times$ faster than Wavmark during the absence of a watermark, more details in section 5.5.
358
+
359
+ <table><tr><td>Model</td><td>Watermarked</td><td>Detection ms (speedup)</td><td>Generation ms (speedup)</td></tr><tr><td>Wavmark</td><td>No</td><td>1710.70 ± 1314.02</td><td>-</td></tr><tr><td>AudioSeal (ours)</td><td>No</td><td>3.25 ± 1.99 (485×)</td><td>-</td></tr><tr><td>Wavmark</td><td>Yes</td><td>106.21 ± 66.95</td><td>104.58 ± 65.66</td></tr><tr><td>AudioSeal (ours)</td><td>Yes</td><td>3.30 ± 2.03 (35×)</td><td>7.41 ± 4.52 (14×)</td></tr></table>
360
+
361
+ For instance, Figure 8 (right) shows the theoretical and empirical FPR for different values of $\tau$ when the chosen hidden message is full 0. Put differently, the argument that says that hiding bits allows for theoretical guarantees over the detection rates is not valid in practice.
362
+
363
+ # C. Additional Experimental Results
364
+
365
+ # C.1. Computational efficiency
366
+
367
+ We show in Figure 9 the mean runtime of the detection and generation depending on the audio duration. Corresponding numbers are given in Table 5.
368
+
369
+ ![](images/4543344cdec428e24d62a85b1901578e5a68748e71f1bea0b634a065d6f61982.jpg)
370
+ Figure 9. Mean runtime ( $\downarrow$ is better) of AudioSeal versus Wav-Mark. AudioSeal is one order of magnitude faster for watermark generation and two orders of magnitude faster for watermark detection for the same audio input, signifying a considerable enhancement in real-time audio watermarking efficiency.
371
+
372
+ # C.2. Another architecture
373
+
374
+ Our architecture relies on the SOTA compression method EnCodec. However, to further validate our approach, we conduct an ablation study using a different architecture DPRNN (Luo et al., 2020). The results are presented in Tab. 6. They show that the performance of AudioSeal is consistent across different architectures, with similar performances using the much slower and heavier architecture from Luo et al. (2020). This indicates that model capacity is not a limiting factor for AudioSeal.
375
+
376
+ Table 6. Results of AudioSeal with different architectures for the generator and detector. The IoU is computed for 1s of watermark in 10s audios (corresponding to the leftmost point in Fig. 5).
377
+
378
+ <table><tr><td>Method</td><td>SISNR</td><td>STOI</td><td>PESQ</td><td>Acc.</td><td>IoU</td></tr><tr><td>EnCodec</td><td>26.00</td><td>0.997</td><td>4.470</td><td>1.00</td><td>0.802</td></tr><tr><td>DPRNN</td><td>26.7</td><td>0.996</td><td>4.421</td><td>1.00</td><td>0.796</td></tr></table>
379
+
380
+ # C.3. Audio mixing
381
+
382
+ We hereby evaluate the scenario where two watermarked signals (e.g., vocal and instrumental) are mixed together. To explore this, we conducted experiments using a non-vocal music dataset. In these experiments, we normalized and summed the loudness of watermarked speech and music segments. The results are detailed Tab. 7.
383
+
384
+ Table 7. Detection results for watermarked speech and music mixed signals. $\checkmark$ and $X$ indicate the presence of the watermark.
385
+
386
+ <table><tr><td>Speech</td><td>BG Music</td><td>Acc. FPR / TPR</td><td>AUC</td></tr><tr><td>✓</td><td>✓</td><td>0.9996 0.0003 / 0.9996</td><td>0.9999</td></tr><tr><td>✓</td><td>X</td><td>0.9787 0.0310 / 0.9883</td><td>0.9961</td></tr></table>
387
+
388
+ # C.4. Out of domain (OD) evaluations
389
+
390
+ As previously outlined in Sec. 5.2, we tested AudioSeal on the outputs of various voice cloning models and other audio modalities. We employed the same set of augmentations and observed very similar results, as demonstrated in Tab. 8. Interestingly, even though we did not train our model on AI-generated speech, we noticed an improvement in performance compared to our test data. No sample was misclassified among the 10k samples that comprised each of our out-of-distribution (OOD) datasets. We also provide the other perceptual metrics results on OOD data in Tab. 9.
391
+
392
+ We also evaluated AudioSeal on three additional datasets containing real human speech: AudioSet (Gemmeke et al., 2017), ASVspoof (Liu et al., 2023b), and FakeAVCeleb (Khalid et al., 2021). Again, we observed similar performance, as shown in Tab. 10.
393
+
394
+ Table 8. Evaluation of AudioSeal Generalization across domains and languages. Namely, translations of speech samples from the Experso dataset (Nguyen et al., 2023) to four target languages: Mandarin Chinese (CMN), French (FR), Italian (IT), and Spanish (SP), using the SeamlessExpressive model (Seamless Communication et al., 2023). Music from MusicGen (Copet et al., 2023) and environmental sounds from AudioGen (Kreuk et al., 2023).
395
+
396
+ <table><tr><td>Aug</td><td>Seamless (Cmn)</td><td>Seamless (Spa)</td><td>Seamless (Fra)</td><td>Seamless(Ita)</td><td>Seamless (Deu)</td><td>Voicebox (Eng)</td><td>AudioGen</td><td>MusicGen</td></tr><tr><td>None</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Bandpass</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Highpass</td><td>0.71</td><td>0.68</td><td>0.70</td><td>0.70</td><td>0.70</td><td>0.64</td><td>0.52</td><td>0.52</td></tr><tr><td>Lowpass</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Boost</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Duck</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Echo</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Pink</td><td>0.99</td><td>1.00</td><td>0.99</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>White</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>Fast (x1.25)</td><td>0.97</td><td>0.98</td><td>0.99</td><td>0.98</td><td>0.99</td><td>0.98</td><td>0.87</td><td>0.87</td></tr><tr><td>Smooth</td><td>0.96</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.98</td><td>0.98</td></tr><tr><td>Resample</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>AAC</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.97</td><td>0.99</td><td>0.98</td></tr><tr><td>MP3</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.97</td><td>0.99</td><td>1.00</td></tr><tr><td>Encodec</td><td>0.97</td><td>0.98</td><td>0.99</td><td>0.99</td><td>0.98</td><td>0.96</td><td>0.95</td><td>0.95</td></tr><tr><td>Average</td><td>0.97</td><td>0.97</td><td>0.98</td><td>0.98</td><td>0.98</td><td>0.97</td><td>0.95</td><td>0.95</td></tr></table>
397
+
398
+ Table 9. Audio quality and intelligibility evaluations on AI generated speech data from various models and languages.
399
+
400
+ <table><tr><td>Model</td><td>Dataset</td><td>SISNR</td><td>PESQ</td><td>STOI</td><td>VISQOL</td></tr><tr><td rowspan="3">AudioSeal</td><td>Seam. (Deu)</td><td>23.35</td><td>4.244</td><td>0.999</td><td>4.688</td></tr><tr><td>Seam. (Fr)</td><td>24.02</td><td>4.199</td><td>0.998</td><td>4.669</td></tr><tr><td>Voicebox</td><td>25.23</td><td>4.449</td><td>0.998</td><td>4.800</td></tr><tr><td rowspan="3">WayMark</td><td>Seam. (Deu)</td><td>38.93</td><td>3.982</td><td>0.999</td><td>4.515</td></tr><tr><td>Seam. (Fr)</td><td>39.06</td><td>3.959</td><td>0.999</td><td>4.506</td></tr><tr><td>Voicebox</td><td>39.63</td><td>4.211</td><td>0.998</td><td>4.695</td></tr></table>
401
+
402
+ Table 10. Evaluation of the detection performances on different datasets. AudioSet is an environmental sounds dataset while ASVspoof (Liu et al., 2023b) and FakeAVCeleb (Khalid et al., 2021) are deep-fake detection datasets.
403
+
404
+ <table><tr><td>Dataset</td><td>Acc. TPR/FPR</td><td>AUC</td></tr><tr><td>Audioset</td><td>0.9992 0.9996/0.0011</td><td>1.0</td></tr><tr><td>ASVspoof</td><td>1.0 1.0/0.0</td><td>1.0</td></tr><tr><td>FakeAVCeleb</td><td>1.0 1.0/0.0</td><td>1.0</td></tr></table>
405
+
406
+ # C.5. Robustness results
407
+
408
+ We plot the detection accuracy against the strength of multiple augmentations in Fig. 10. AudioSeal outperforms WavMark for most augmentations at the same strength. However, for highpass filters above our training range (500Hz) WavMark has a much better detection accuracy. Our system's TF-loudness loss embeds the watermark where human speech carries the most energy, typically lower frequencies, due to auditory masking. This contrasts with WavMark, which places the watermark in higher fre
409
+
410
+ quency bands. Embedding the watermark in lower frequencies is advantageous. For example, speech remains audible with a lowpass filter at $1500\mathrm{Hz}$ , but not with a highpass filter at the same frequency. This difference is measurable with PESQ in relation to the original audio, making it more beneficial to be robust against a lowpass filter at a $1500\mathrm{Hz}$ cut-off than a highpass filter at the same cut-off:
411
+
412
+ <table><tr><td>Filter Type</td><td>PESQ</td><td>AudioSeal</td><td>WavMark</td></tr><tr><td>Highpass 1500Hz</td><td>1.85 X</td><td>0.7</td><td>1.0</td></tr><tr><td>Lowpass 1500Hz</td><td>2.93 ✓</td><td>1.0</td><td>0.7</td></tr></table>
413
+
414
+ # D. Experimental details
415
+
416
+ # D.1. Loudness
417
+
418
+ Our loudness function is based on a simplification of the implementation in the torchaudio (Yang et al., 2021) library. It is computed through a multi-step process. Initially, the audio signal undergoes K-weighting, which is a filtering process that emphasizes certain frequencies to mimic the human ear's response. This is achieved by applying a treble filter and a highpass filter. Following this, the energy of the audio signal is calculated for each block of the signal. This is done by squaring the signal and averaging over each block. The energy is then weighted according to the number of channels in the audio signal, with different weights applied to different channels to account for their varying contributions to perceived loudness. Finally, the loudness is computed by taking the logarithm of the weighted sum of energies and adding a constant offset.
419
+
420
+ ![](images/06ce186a735d9c834e764386494b122adcc331a71e721f78b21866d2df9c2c44.jpg)
421
+
422
+ ![](images/b24dedcb583b9add1715365f8bf12536b849c5fce9c81c0cf9bed5db05594539.jpg)
423
+
424
+ ![](images/23036843d60868bac7cc56603e7e26633a5a1000f258d0fc9f4751b332c34f41.jpg)
425
+
426
+ ![](images/3a09c6f4591a4445eec27bae51378c4b1473cab4e9abb9305453c0770acfbacb.jpg)
427
+ Figure 10. Accuracy of the detector on augmented samples with respect to the strength of the augmentation.
428
+
429
+ ![](images/d6ab809e2307e0bc596b986405d555a3de9c2addf071d5dcb16575e73206e402.jpg)
430
+
431
+ ![](images/40e32b1224aaf3d47960d75b20ce73b96809c8c0d992366835dfe782b7fe879d.jpg)
432
+
433
+ # D.2. Robustness Augmentations
434
+
435
+ Here are the details of the audio editing augmentations used at train time (T), and evaluation time (E):
436
+
437
+ - Bandpass Filter: Combines highpass and lowpass filtering to allow a specific frequency band to pass through. (T) fixed between $300\mathrm{Hz}$ and $8000\mathrm{Hz}$ ; (E) fixed between $500\mathrm{Hz}$ and $5000\mathrm{Hz}$ .
438
+ - Highpass Filter: Uses a highpass filter on the input audio to cut frequencies below a certain threshold. (T) fixed at $500\mathrm{Hz}$ ; (E) fixed at $1500\mathrm{Hz}$ .
439
+ - Lowpass Filter: Applies a lowpass filter to the input audio, cutting frequencies above a cutoff frequency. (T) fixed at $5000\mathrm{Hz}$ ; (E) fixed at $500\mathrm{Hz}$ .
440
+ - Speed: Changes the speed of the audio by a factor close to 1. (T) random between 0.9 and 1.1; (E) fixed at 1.25.
441
+ - Resample: Upsamples to intermediate sample rate and then downsamples the audio back to its original rate without changing its shape. (T) and (E) $32\mathrm{kHz}$ .
442
+ - Boost Audio: Amplifies the audio by multiplying by a factor. (T) factor fixed at 1.2; (E) fixed at 10.
443
+ - Duck Audio: Reduces the volume of the audio by a multiplying factor. (T) factor fixed at 0.8; (E) fixed at 0.1.
444
+ - Echo: Applies an echo effect to the audio, adding a delay and less loud copy of the original. (T) random delay between 0.1 and 0.5 seconds, random volume between 0.1 and 0.5; (E) fixed delay of 0.5 seconds, fixed volume of 0.5.
445
+ - Pink Noise: Adds pink noise for a background noise effect. (T) standard deviation fixed at 0.01; (E) fixed at 0.1.
446
+
447
+ - White Noise: Adds gaussian noise to the waveform. (T) standard deviation fixed at 0.001; (E) fixed at 0.05.
448
+ - Smooth: Smooths the audio signal using a moving average filter with a variable window size. (T) window size random between 2 and 10; (E) fixed at 40.
449
+ - AAC: Encodes the audio in AAC format. (T) bitrate of 128kbps; (E) bitrate of 64kbps.
450
+ - MP3: Encodes the audio in MP3 format. (T) bitrate of 128kbps; (E) bitrate of 32kbps.
451
+ - EnCodec: Resamples at $24\mathrm{kHz}$ , encodes the audio with EnCodec with $nq = 16$ (16 streams of tokens), and resamples it back to $16\mathrm{kHz}$ .
452
+
453
+ Implementation is done with the julius python library.
454
+
455
+ # D.3. Networks architectures (Fig. 4)
456
+
457
+ The watermark generator is composed of an encoder and a decoder, both incorporating elements from Encoding (Dafossez et al., 2022). The encoder applies a 1D convolution with 32 channels and a kernel size of 7, followed by four convolutional blocks. Each of these blocks includes a residual unit and down-sampling layer, which uses convolution with stride $S$ and kernel size $K = 2S$ . The residual unit has two kernel-3 convolutions with a skipconnection, doubling channels during down-sampling. The encoder concludes with a two-layer LSTM and a final 1D convolution with a kernel size of 7 and 128 channels. Strides $S$ values are (2, 4, 5, 8) and the nonlinear activation in residual units is the Exponential Linear Unit (ELU). The decoder mirrors the encoder but uses transposed convolutions instead, with strides in reverse order.
458
+
459
+ The detector comprises an encoder, a transposed convolution and a linear layer. The encoder shares the generator's architecture (but with different weights). The transposed convolution has $h$ output channels and upsamples the activation map to the original audio resolution (resulting in an activation map of shape $(t,h)$ ). The linear layer reduces the $h$ dimensions to two, followed by a softmax function that gives sample-wise probability scores.
460
+
461
+ # D.4. MUSHRA protocole detail
462
+
463
+ The MUSHRA protocol is a crowdsourced test in which participants rate the quality of various samples on a scale of 0 to 100. The ground truth is provided for reference. We utilized 100 speech samples, each lasting 10 seconds. Each sample was evaluated by at least 20 participants. As part of the study, we included a low anchor, which is a very lossy compression at 1.5kbps, encoded using EnCodec. Participants who failed to assign the lowest score to the low anchor for at least $80\%$ of their assignments were excluded from the study. For comparison, the ground truth samples received an average score of 80.49, while the low anchor's average score was 53.21.
464
+
465
+ # D.5. Attacks on the watermark
466
+
467
+ Adversarial attack against the detector. Given a sample $x$ and a detector $D$ , we want to find $x' \sim x$ such that $D(x') = 1 - D(x)$ . To that end, we use a gradient-based attack. It starts by initializing a distortion $\delta_{adv}$ with random gaussian noise. The algorithm iteratively updates the distortion for a number of steps $n$ . For each step, the distortion is added to the original audio via $x = x + \alpha \tanh(\delta_{adv})$ , passed through the model to get predictions. A cross-entropy loss is computed with label either 0 (for removal) or 1 (for forging), and back-propagated through the detector to update the distortion, using the Adam optimizer. At the end of the process, the adversarial audio is $x + \alpha \tanh(\delta_{adv})$ . In our attack, we use a scaling factor $\alpha = 10^{-3}$ , a number of steps $n = 100$ , and a learning rate of $10^{-1}$ . The tanh function is used to ensure that the distortion remains small, and gives an upper bound on the SNR of the adversarial audio.
468
+
469
+ Training of the malicious detector. Here, we are interested in training a classifier that can distinguish between watermarked and non-watermarked samples, when access to many samples of both types is available. To train the classifier, we use a dataset made of more than 80k samples of 8 seconds speech from Voicebox (Le et al., 2023) watermarked using our proposed method and a similar amount of genuine (un-watermarked) speech samples. The classifier shares the same architecture as AudioSeal's detector. The classifier is trained for 200k updates with batches of 64 one-second samples. It achieves perfect classification
470
+
471
+ of the samples. This is coherent with the findings of Voicebox (Le et al., 2023).
2401.17xxx/2401.17264/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9c4d194a3b3e4a0a0483e7a475ee95a993c19d12e0cf13994bf0de0e1f9829
3
+ size 680989
2401.17xxx/2401.17264/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17270/2e219f00-345f-4503-8b01-75ee82d505bc_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d87e0638e6f483349798b397db631ec88019e631d9b8768cdbe5a0fa43d91fef
3
+ size 4622601
2401.17xxx/2401.17270/full.md ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLO-World: Real-Time Open-Vocabulary Object Detection
2
+
3
+ Tianheng Cheng $^{3,2,*}$ , Lin Song $^{1,\ast,\text{图}}$ , Yixiao Ge $^{1,2,\dagger}$ , Wenyu Liu $^{3}$ , Xinggang Wang $^{3,\text{图}}$ , Ying Shan $^{1,2}$ *equal contribution † project lead ‡ corresponding author
4
+
5
+ $^{1}$ Tencent AI Lab $^{2}$ ARC Lab, Tencent PCG $^{3}$ School of EIC, Huazhong University of Science & Technology
6
+
7
+ Code & Models:YOLO-World
8
+
9
+ # Abstract
10
+
11
+ The You Only Look Once (YOLO) series of detectors have established themselves as efficient and practical tools. However, their reliance on predefined and trained object categories limits their applicability in open scenarios. Addressing this limitation, we introduce YOLO-World, an innovative approach that enhances YOLO with open-vocabulary detection capabilities through vision-language modeling and pre-training on large-scale datasets. Specifically, we propose a new Re-parameterizable Vision-Language Path Aggregation Network (RepVL-PAN) and region-text contrastive loss to facilitate the interaction between visual and linguistic information. Our method excels in detecting a wide range of objects in a zero-shot manner with high efficiency. On the challenging LVIS dataset, YOLO-World achieves 35.4 AP with 52.0 FPS on V100, which outperforms many state-of-the-art methods in terms of both accuracy and speed. Furthermore, the fine-tuned YOLO-World achieves remarkable performance on several downstream tasks, including object detection and open-vocabulary instance segmentation.
12
+
13
+ # 1. Introduction
14
+
15
+ Object detection has been a long-standing and fundamental challenge in computer vision with numerous applications in image understanding, robotics, and autonomous vehicles. Tremendous works [16, 27, 43, 45] have achieved significant breakthroughs in object detection with the development of deep neural networks. Despite the success of these methods, they remain limited as they only handle object detection with a fixed vocabulary, e.g., 80 categories in the COCO [26] dataset. Once object categories are defined and labeled, trained detectors can only detect those specific categories, thus limiting the ability and applicability of open
16
+
17
+ ![](images/540bd22e124fd87838ec18d6f88472f4596f3dbe350f01939144e08f934b431c.jpg)
18
+ Figure 1. Speed-and-Accuracy Curve. We compare YOLO-World with recent open-vocabulary methods in terms of speed and accuracy. All models are evaluated on the LVIS minival and inference speeds are measured on one NVIDIA V100 w/o TensorRT. The size of the circle represents the model's size.
19
+
20
+ # scenarios.
21
+
22
+ Recent works [8, 13, 48, 53, 58] have explored the prevalent vision-language models [19, 39] to address open-vocabulary detection [58] through distilling vocabulary knowledge from language encoders, e.g., BERT [5]. However, these distillation-based methods are much limited due to the scarcity of training data with a limited diversity of vocabulary, e.g., OV-COCO [58] containing 48 base categories. Several methods [24, 30, 56, 57, 59] reformulate object detection training as region-level vision-language pretraining and train open-vocabulary object detectors at scale. However, those methods still struggle for detection in real-world scenarios, which suffer from two aspects: (1) heavy computation burden and (2) complicated deployment for edge devices. Previous works [24, 30, 56, 57, 59] have
23
+
24
+ demonstrated the promising performance of pre-training large detectors while pre-training small detectors to endow them with open recognition capabilities remains unexplored.
25
+
26
+ In this paper, we present YOLO-World, aiming for high-efficiency open-vocabulary object detection, and explore large-scale pre-training schemes to boost the traditional YOLO detectors to a new open-vocabulary world. Compared to previous methods, the proposed YOLO-World is remarkably efficient with high inference speed and easy to deploy for downstream applications. Specifically, YOLO-World follows the standard YOLO architecture [20] and leverages the pre-trained CLIP [39] text encoder to encode the input texts. We further propose the Re-parameterizable Vision-Language Path Aggregation Network (RepVL-PAN) to connect text features and image features for better visual-semantic representation. During inference, the text encoder can be removed and the text embeddings can be re-parameterized into weights of RepVL-PAN for efficient deployment. We further investigate the open-vocabulary pre-training scheme for YOLO detectors through region-text contrastive learning on largescale datasets, which unifies detection data, grounding data, and image-text data into region-text pairs. The pre-trained YOLO-World with abundant region-text pairs demonstrates a strong capability for large vocabulary detection and training more data leads to greater improvements in open-vocabulary capability.
27
+
28
+ In addition, we explore a prompt-then-detect paradigm to further improve the efficiency of open-vocabulary object detection in real-world scenarios. As illustrated in Fig. 2, traditional object detectors [16, 20, 23, 41-43, 52] concentrate on the fixed-vocabulary (close-set) detection with predefined and trained categories. While previous open-vocabulary detectors [24, 30, 56, 59] encode the prompts of a user for online vocabulary with text encoders and detect objects. Notably, those methods tend to employ large detectors with heavy backbones, e.g., Swin-L [32], to increase the open-vocabulary capacity. In contrast, the prompt-then-detect paradigm (Fig. 2 (c)) first encodes the prompts of a user to build an offline vocabulary and the vocabulary varies with different needs. Then, the efficient detector can infer the offline vocabulary on the fly without re-encoding the prompts. For practical applications, once we have trained the detector, i.e., YOLO-World, we can pre-encode the prompts or categories to build an offline vocabulary and then seamlessly integrate it into the detector.
29
+
30
+ Our main contributions can be summarized into three folds:
31
+
32
+ - We introduce the YOLO-World, a cutting-edge open-vocabulary object detector with high efficiency for real-world applications.
33
+ - We propose a Re-parameterizable Vision-Language PAN
34
+
35
+ to connect vision and language features and an open-vocabulary region-text contrastive pre-training scheme forYOLO-World.
36
+
37
+ - The proposed YOLO-World pre-trained on large-scale datasets demonstrates strong zero-shot performance and achieves 35.4 AP on LVIS with 52.0 FPS. The pre-trained YOLO-World can be easily adapted to downstream tasks, e.g., open-vocabulary instance segmentation and referring object detection. Moreover, the pre-trained weights and codes of YOLO-World will be open-sourced to facilitate more practical applications.
38
+
39
+ # 2. Related Works
40
+
41
+ # 2.1. Traditional Object Detection
42
+
43
+ Prevalent object detection research concentrates on fixed-vocabulary (close-set) detection, in which object detectors are trained on datasets with pre-defined categories, e.g., COCO dataset [26] and Objects365 dataset [46], and then detect objects within the fixed set of categories. During the past decades, the methods for traditional object detection can be simply categorized into three groups, i.e., region-based methods, pixel-based methods, and query-based methods. The region-based methods [11, 12, 16, 27, 44], such as Faster R-CNN [44], adopt a two-stage framework for proposal generation [44] and RoI-wise (Region-of-Interest) classification and regression. The pixel-based methods [28, 31, 42, 49, 61] tend to be one-stage detectors, which perform classification and regression over predefined anchors or pixels. DETR [1] first explores object detection through transformers [50] and inspires extensive query-based methods [64]. In terms of inference speed, Redmon et al. presents YOLOs [40-42] which exploit simple convolutional architectures for real-time object detection. Several works [10, 23, 33, 52, 55] propose various architectures or designs for YOLO, including path aggregation networks [29], cross-stage partial networks [51], and re-parameterization [6], which further improve both speed and accuracy. In comparison to previous YOLOs, YOLO-World in this paper aims to detect objects beyond the fixed vocabulary with strong generalization ability.
44
+
45
+ # 2.2. Open-Vocabulary Object Detection
46
+
47
+ Open-vocabulary object detection (OVD) [58] has emerged as a new trend for modern object detection, which aims to detect objects beyond the predefined categories. Early works [13] follow the standard OVD setting [58] by training detectors on the base classes and evaluating the novel (unknown) classes. Nevertheless, this open-vocabulary setting can evaluate the capability of detectors to detect and recognize novel objects, it is still limited for open scenarios and lacks generalization ability to other domains due to training on the limited dataset and vocabulary. Inspired
48
+
49
+ ![](images/30a5e0224253526d7b0db93961f5d331e978af1be0a400746dcb29e5fcfdf9a4.jpg)
50
+ (a) Traditional Object Detector
51
+
52
+ ![](images/1337e0d3ade6cd978b20269c6d544486f01e39ee52c09e8ca01e4a7975e445ae.jpg)
53
+ (b) Previous Open-Vocabulary Detector
54
+
55
+ ![](images/eac54926f57eac165db3b525fa962371a4736289185680586e5b8ddeae1719c4.jpg)
56
+ (c) YOLO-World
57
+
58
+ ![](images/52d15c856ff5c94592c99ef6edc2ae90779ab104a5bcc651f19a330e6afa3dc2.jpg)
59
+ Figure 2. Comparison with Detection Paradigms. (a) Traditional Object Detector: These object detectors can only detect objects within the fixed vocabulary pre-defined by the training datasets, e.g., 80 categories of COCO dataset [26]. The fixed vocabulary limits the extension for open scenes. (b) Previous Open-Vocabulary Detectors: Previous methods tend to develop large and heavy detectors for open-vocabulary detection which intuitively have strong capacity. In addition, these detectors simultaneously encode images and texts as input for prediction, which is time-consuming for practical applications. (c) YOLO-World: We demonstrate the strong open-vocabulary performance of lightweight detectors, e.g.,YOLO detectors [20, 42], which is of great significance for real-world applications. Rather than using online vocabulary, we present a prompt-then-detect paradigm for efficient inference, in which the user generates a series of prompts according to the need and the prompts will be encoded into an offline vocabulary. Then it can be re-parameterized as the model weights for deployment and further acceleration.
60
+ Figure 3. Overall Architecture of YOLO-World. Compared to traditional YOLO detectors, YOLO-World as an open-vocabulary detector adopts text as input. The Text Encoder first encodes the input text input text embeddings. Then the Image Encoder encodes the input image into multi-scale image features and the proposed RepVL-PAN exploits the multi-level cross-modality fusion for both image and text features. Finally, YOLO-World predicts the regressed bounding boxes and the object embeddings for matching the categories or nouns that appeared in the input text.
61
+
62
+ by vision-language pre-training [19, 39], recent works [8, 22, 53, 62, 63] formulate open-vocabulary object detection as image-text matching and exploit large-scale image-text data to increase the training vocabulary at scale. OWL-ViTs [35, 36] fine-tune the simple vision transformers [7] with detection and grounding datasets and build the simple open-vocabulary detectors with promising performance. GLIP [24] presents a pre-training framework for open-vocabulary detection based on phrase grounding and evaluates in a zero-shot setting. Grounding DINO [30] incorporates the grounded pre-training [24] into detection transformers [60] with cross-modality fusions. Several methods [25, 56, 57, 59] unify detection datasets and image-text
63
+
64
+ datasets through region-text matching and pre-train detectors with large-scale image-text pairs, achieving promising performance and generalization. However, these methods often use heavy detectors like ATSS [61] or DINO [60] with Swin-L [32] as a backbone, leading to high computational demands and deployment challenges. In contrast, we present YOLO-World, aiming for efficient open-vocabulary object detection with real-time inference and easier downstream application deployment. Differing from ZSD-YOLO [54], which also explores open-vocabulary detection [58] with YOLO through language model alignment, YOLO-World introduces a novel YOLO framework with an effective pre-training strategy, enhancing open
65
+
66
+ vocabulary performance and generalization.
67
+
68
+ # 3. Method
69
+
70
+ # 3.1. Pre-training Formulation: Region-Text Pairs
71
+
72
+ The traditional object detection methods, including the YOLO-series [20], are trained with instance annotations $\Omega = \{B_i, c_i\}_{i=1}^N$ , which consist of bounding boxes $\{B_i\}$ and category labels $\{c_i\}$ . In this paper, we reformulate the instance annotations as region-text pairs $\Omega = \{B_i, t_i\}_{i=1}^N$ , where $t_i$ is the corresponding text for the region $B_i$ . Specifically, the text $t_i$ can be the category name, noun phrases, or object descriptions. Moreover, YOLO-World adopts both the image $I$ and texts $T$ (a set of nouns) as input and outputs predicted boxes $\{\hat{B}_k\}$ and the corresponding object embeddings $\{e_k\}$ ( $e_k \in \mathbb{R}^D$ ).
73
+
74
+ # 3.2. Model Architecture
75
+
76
+ The overall architecture of the proposed YOLO-World is illustrated in Fig. 3, which consists of a YOLO detector, a Text Encoder, and a Re-parameterizable Vision-Language Path Aggregation Network (RepVL-PAN). Given the input text, the text encoder in YOLO-World encodes the text into text embeddings. The image encoder in the YOLO detector extracts the multi-scale features from the input image. Then we leverage the RepVL-PAN to enhance both text and image representation by exploiting the cross-modality fusion between image features and text embeddings.
77
+
78
+ YOLO Detector. YOLO-World is mainly developed based on YOLOv8 [20], which contains a Darknet backbone [20, 43] as the image encoder, a path aggregation network (PAN) for multi-scale feature pyramids, and a head for bounding box regression and object embeddings.
79
+
80
+ Text Encoder. Given the text $T$ , we adopt the Transformer text encoder pre-trained by CLIP [39] to extract the corresponding text embeddings $W = \text{TextEncoder}(T) \in \mathbb{R}^{C \times D}$ , where $C$ is the number of nouns and $D$ is the embedding dimension. The CLIP text encoder offers better visual-semantic capabilities for connecting visual objects with texts compared to text-only language encoders [5]. When the input text is a caption or referring expression, we adopt the simple n-gram algorithm to extract the noun phrases and then feed them into the text encoder.
81
+
82
+ Text Contrastive Head. Following previous works [20], we adopt the decoupled head with two $3 \times 3$ convs to regress bounding boxes $\{b_k\}_{k=1}^K$ and object embeddings $\{e_k\}_{k=1}^K$ , where $K$ denotes the number of objects. We present a text contrastive head to obtain the object-text similarity $s_{k,j}$ by:
83
+
84
+ $$
85
+ s _ {k, j} = \alpha \cdot \mathrm {L 2 - N o r m} (e _ {k}) \cdot \mathrm {L 2 - N o r m} (w _ {j}) ^ {\top} + \beta , \quad (1)
86
+ $$
87
+
88
+ where $\mathrm{L2 - Norm}(\cdot)$ is the L2 normalization and $w_{j}\in W$ is the $j$ -th text embeddings. In addition, we add the affine transformation with the learnable scaling factor $\alpha$ and shifting factor $\beta$ . Both the L2 norms and the affine transformations are important for stabilizing the region-text training.
89
+
90
+ Training with Online Vocabulary. During training, we construct an online vocabulary $T$ for each mosaic sample containing 4 images. Specifically, we sample all positive nouns involved in the mosaic images and randomly sample some negative nouns from the corresponding dataset. The vocabulary for each mosaic sample contains at most $M$ nouns, and $M$ is set to 80 as default.
91
+
92
+ Inference with Offline Vocabulary. At the inference stage, we present a prompt-then-detect strategy with an offline vocabulary for further efficiency. As shown in Fig. 3, the user can define a series of custom prompts, which might include captions or categories. We then utilize the text encoder to encode these prompts and obtain offline vocabulary embeddings. The offline vocabulary allows for avoiding computation for each input and provides the flexibility to adjust the vocabulary as needed.
93
+
94
+ # 3.3. Re-parameterizable Vision-Language PAN
95
+
96
+ Fig. 4 shows the structure of the proposed RepVL-PAN which follows the top-down and bottom-up paths in [20, 29] to establish the feature pyramids $\{P_3,P_4,P_5\}$ with the multi-scale image features $\{C_3,C_4,C_5\}$ . Furthermore, we propose the Text-guided CSPLayer (T-CSPLayer) and Image-Pooling Attention (I-Pooling Attention) to further enhance the interaction between image features and text features, which can improve the visual-semantic representation for open-vocabulary capability. During inference, the offline vocabulary embeddings can be re-parameterized into weights of convolutional or linear layers for deployment.
97
+
98
+ Text-guided CSPLayer. As Fig. 4 illustrates, the cross-stage partial layers (CSPLayer) are utilized after the top-down or bottom-up fusion. We extend the CSPLayer (also called C2f) of [20] by incorporating text guidance into multi-scale image features to form the Text-guided CSPLayer. Specifically, given the text embeddings $W$ and image features $X_{l} \in \mathbb{R}^{H \times W \times D}$ ( $l \in \{3,4,5\}$ ), we adopt the max-sigmoid attention after the last dark bottleneck block to aggregate text features into image features by:
99
+
100
+ $$
101
+ X _ {l} ^ {\prime} = X _ {l} \cdot \delta \left(\max _ {j \in \{1.. C \}} \left(X _ {l} W _ {j} ^ {\top}\right)\right) ^ {\top}, \tag {2}
102
+ $$
103
+
104
+ where the updated $X_{l}^{\prime}$ is concatenated with the cross-stage features as output. The $\delta$ indicates the sigmoid function.
105
+
106
+ ![](images/d1d000adc820cda55d0898e00df8672dbc3e6b079fd460d465292783e211c0dc.jpg)
107
+ Figure 4. Illustration of the RepVL-PAN. The proposed RepVL-PAN adopts the Text-guided CSPLayer (T-CSPLayer) for injecting language information into image features and the Image Pooling Attention (I-Pooling Attention) for enhancing image-aware text embeddings.
108
+
109
+ Image-Pooling Attention. To enhance the text embeddings with image-aware information, we aggregate image features to update the text embeddings by proposing the Image-Pooling Attention. Rather than directly using cross-attention on image features, we leverage max pooling on multi-scale features to obtain $3 \times 3$ regions, resulting in a total of 27 patch tokens $\tilde{X} \in \mathbb{R}^{27 \times D}$ . The text embeddings are then updated by:
110
+
111
+ $$
112
+ W ^ {\prime} = W + \text {M u l t i H e a d - A t t e n t i o n} (W, \tilde {X}, \tilde {X}) \tag {3}
113
+ $$
114
+
115
+ # 3.4. Pre-training Schemes
116
+
117
+ In this section, we present the training schemes for pretraining YOLO-World on large-scale detection, grounding, and image-text datasets.
118
+
119
+ Learning from Region-Text Contrastive Loss. Given the mosaic sample $I$ and texts $T$ , YOLO-World outputs $K$ object predictions $\{B_k, s_k\}_{k=1}^K$ along with annotations $\Omega = \{B_i, t_i\}_{i=1}^N$ . We follow [20] and leverage task-aligned label assignment [9] to match the predictions with ground-truth annotations and assign each positive prediction with a text index as the classification label. Based on this vocabulary, we construct the region-text contrastive loss $\mathcal{L}_{\mathrm{con}}$ with region-text pairs through cross entropy between object-text (region-text) similarity and object-text assignments. In addition, we adopt IoU loss and distributed focal loss for bounding box regression and the total training loss is defined as: $\mathcal{L}(I) = \mathcal{L}_{\mathrm{con}} + \lambda_I \cdot (\mathcal{L}_{\mathrm{iou}} + \mathcal{L}_{\mathrm{dfl}})$ , where $\lambda_I$ is an indicator factor and set to 1 when input image $I$ is from detection or grounding data and set to 0 when it is from
120
+
121
+ the image-text data. Considering image-text datasets have noisy boxes, we only calculate the regression loss for samples with accurate bounding boxes.
122
+
123
+ Pseudo Labeling with Image-Text Data. Rather than directly using image-text pairs for pre-training, we propose an automatic labeling approach to generate region-text pairs. Specifically, the labeling approach contains three steps: (1) extract noun phrases: we first utilize the n-gram algorithm to extract noun phrases from the text; (2) pseudo labeling: we adopt a pre-trained open-vocabulary detector, e.g., GLIP [24], to generate pseudo boxes for the given noun phrases for each image, thus providing the coarse region-text pairs. (3) filtering: We employ the pre-trained CLIP [39] to evaluate the relevance of image-text pairs and region-text pairs, and filter the low-relevance pseudo annotations and images. We further filter redundant bounding boxes by incorporating methods such as Non-Maximum Suppression (NMS). We suggest the readers refer to the appendix for the detailed approach. With the above approach, we sample and label 246k images from CC3M [47] with 821k pseudo annotations.
124
+
125
+ # 4. Experiments
126
+
127
+ In this section, we demonstrate the effectiveness of the proposed YOLO-World by pre-training it on large-scale datasets and evaluating YOLO-World in a zero-shot manner on both LVIS benchmark and COCO benchmark (Sec. 4.2). We also evaluate the fine-tuning performance of YOLO-World on COCO, LVIS for object detection.
128
+
129
+ # 4.1. Implementation Details
130
+
131
+ The YOLO-World is developed based on the MMYOLO toolbox [3] and the MMDetection toolbox [2]. Following [20], we provide three variants of YOLO-World for different latency requirements, e.g., small (S), medium (M), and large (L). We adopt the open-source CLIP [39] text encoder with pre-trained weights to encode the input text. Unless specified, we measure the inference speeds of all models on one NVIDIA V100 GPU without extra acceleration mechanisms, e.g., FP16 or TensorRT.
132
+
133
+ # 4.2. Pre-training
134
+
135
+ Experimental Setup. At the pre-training stage, we adopt the AdamW optimizer [34] with an initial learning rate of 0.002 and weight decay of 0.05. YOLO-World is pretrained for 100 epochs on on 32 NVIDIA V100 GPUs with a total batch size of 512. During pre-training, we follow previous works [20] and adopt color augmentation, random affine, random flip, and mosaic with 4 images for data augmentation. The text encoder is frozen during pre-training.
136
+
137
+ <table><tr><td>Dataset</td><td>Type</td><td>Vocab.</td><td>Images</td><td>Anno.</td></tr><tr><td>Objects365V1 [46]</td><td>Detection</td><td>365</td><td>609k</td><td>9,621k</td></tr><tr><td>GQA [17]</td><td>Grounding</td><td>-</td><td>621k</td><td>3,681k</td></tr><tr><td>Flickr [38]</td><td>Grounding</td><td>-</td><td>149k</td><td>641k</td></tr><tr><td>CC3M† [47]</td><td>Image-Text</td><td>-</td><td>246k</td><td>821k</td></tr></table>
138
+
139
+ Table 1. Pre-training Data. The specifications of the datasets used for pre-training YOLO-World.
140
+
141
+ Pre-training Data. For pre-training YOLO-World, we mainly adopt detection or grounding datasets including Objects365 (V1) [46], GQA [17], Flickr30k [38], as specified in Tab. 1. Following [24], we exclude the images from the COCO dataset in GoldG [21] (GQA and Flickr30k). The annotations of the detection datasets used for pretraining contain both bounding boxes and categories or noun phrases. In addition, we also extend the pre-training data with image-text pairs, i.e., $\mathrm{CC3M}^{\dagger}$ [47], which we have labeled 246k images through the pseudo-labeling method discussed in Sec. 3.4.
142
+
143
+ Zero-shot Evaluation. After pre-training, we directly evaluate the proposed YOLO-World on the LVIS dataset [14] in a zero-shot manner. The LVIS dataset contains 1203 object categories, which is much more than the categories of the pre-training detection datasets and can measure the performance on large vocabulary detection. Following previous works [21, 24, 56, 57], we mainly evaluate on LVIS minival [21] and report the Fixed AP [4] for comparison. The maximum number of predictions is set to 1000.
144
+
145
+ Main Results on LVIS Object Detection. In Tab. 2, we compare the proposed YOLO-World with recent state-of-the-art methods [21, 30, 56, 57, 59] on LVIS benchmark in a zero-shot manner. Considering the computation burden and model parameters, we mainly compare with those methods based on lighter backbones, e.g., Swin-T [32]. Remarkably, YOLO-World outperforms previous state-of-the-art methods in terms of zero-shot performance and inference speed. Compared to GLIP, GLIPv2, and Grounding DINO, which incorporate more data, e.g., Cap4M (CC3M+SBU [37]), YOLO-World pre-trained on O365 & GolG obtains better performance even with fewer model parameters. Compared to DetCLIP, YOLO-World achieves comparable performance (35.4 v.s. 34.4) while obtaining $20 \times$ increase in inference speed. The experimental results also demonstrate that small models, e.g., YOLO-World-S with 13M parameters, can be used for vision-language pre-training and obtain strong open-vocabulary capabilities.
146
+
147
+ # 4.3. Ablation Experiments
148
+
149
+ We provide extensive ablation studies to analyze YOLO-World from two primary aspects, i.e., pre-training and architecture. Unless specified, we mainly conduct ablation experiments based on YOLO-World-L and pre-train Objects365 with zero-shot evaluation on LVIS minival.
150
+
151
+ Pre-training Data. In Tab. 3, we evaluate the performance of pre-training YOLO-World using different data. Compared to the baseline trained on Objects365, adding GQA can significantly improve performance with an 8.4 AP gain on LVIS. This improvement can be attributed to the richer textual information provided by the GQA dataset, which can enhance the model's ability to recognize large vocabulary objects. Adding part of CC3M samples (8% of the full datasets) can further bring 0.5 AP gain with 1.3 AP on rare objects. Tab. 3 demonstrates that adding more data can effectively improve the detection capabilities on large-vocabulary scenarios. Furthermore, as the amount of data increases, the performance continues to improve, highlighting the benefits of leveraging larger and more diverse datasets for training.
152
+
153
+ Ablations on RepVL-PAN. Tab. 4 demonstrates the effectiveness of the proposed RepVL-PAN of YOLO-World, including Text-guided CSPLayers and Image Pooling Attention, for the zero-shot LVIS detection. Specifically, we adopt two settings, i.e., (1) pre-training on O365 and (2) pre-training on O365 & GQA. Compared to O365 which only contains category annotations, GQA includes rich texts, particularly in the form of noun phrases. As shown in Tab. 4, the proposed RepVL-PAN improves the baseline (YOLOv8-PAN [20]) by 1.1 AP on LVIS, and the improvements are remarkable in terms of the rare categories $(\mathrm{AP}_r)$ of LVIS, which are hard to detect and recognize. In addition, the improvements become more significant when YOLO-World is pre-trained with the GQA dataset and experiments indicate that the proposed RepVL-PAN works better with rich textual information.
154
+
155
+ Text Encoders. In Tab. 5, we compare the performance of using different text encoders, i.e., BERT-base [5] and CLIP-base (ViT-base) [39]. We exploit two settings during pre-training, i.e., frozen and fine-tuned, and the learning rate for fine-tuning text encoders is a $0.01 \times$ factor of the basic learning rate. As Tab. 5 shows, the CLIP text encoder obtains superior results than BERT (+10.1 AP for rare categories in LVIS), which is pre-trained with image-text pairs and has better capability for vision-centric embeddings. Fine-tuning BERT during pre-training brings significant improvements (+3.7 AP) while fine-tuning CLIP leads to a severe performance drop. We attribute the drop to that
156
+
157
+ <table><tr><td>Method</td><td>Backbone</td><td>Params</td><td>Pre-trained Data</td><td>FPS</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>MDETR [21]</td><td>R-101 [15]</td><td>169M</td><td>GoldG</td><td>-</td><td>24.2</td><td>20.9</td><td>24.3</td><td>24.2</td></tr><tr><td>GLIP-T [24]</td><td>Swin-T [32]</td><td>232M</td><td>O365,GoldG</td><td>0.12</td><td>24.9</td><td>17.7</td><td>19.5</td><td>31.0</td></tr><tr><td>GLIP-T [24]</td><td>Swin-T [32]</td><td>232M</td><td>O365,GoldG,Cap4M</td><td>0.12</td><td>26.0</td><td>20.8</td><td>21.4</td><td>31.0</td></tr><tr><td>GLIPv2-T [59]</td><td>Swin-T [32]</td><td>232M</td><td>O365,GoldG</td><td>0.12</td><td>26.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>GLIPv2-T [59]</td><td>Swin-T [32]</td><td>232M</td><td>O365,GoldG,Cap4M</td><td>0.12</td><td>29.0</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Grounding DINO-T [30]</td><td>Swin-T [32]</td><td>172M</td><td>O365,GoldG</td><td>1.5</td><td>25.6</td><td>14.4</td><td>19.6</td><td>32.2</td></tr><tr><td>Grounding DINO-T [30]</td><td>Swin-T [32]</td><td>172M</td><td>O365,GoldG,Cap4M</td><td>1.5</td><td>27.4</td><td>18.1</td><td>23.3</td><td>32.7</td></tr><tr><td>DetCLIP-T [56]</td><td>Swin-T [32]</td><td>155M</td><td>O365,GoldG</td><td>2.3</td><td>34.4</td><td>26.9</td><td>33.9</td><td>36.3</td></tr><tr><td>YOLO-World-S</td><td>YOLOv8-S</td><td>13M (77M)</td><td>O365,GoldG</td><td>74.1 (19.9)</td><td>26.2</td><td>19.1</td><td>23.6</td><td>29.8</td></tr><tr><td>YOLO-World-M</td><td>YOLOv8-M</td><td>29M (92M)</td><td>O365,GoldG</td><td>58.1 (18.5)</td><td>31.0</td><td>23.8</td><td>29.2</td><td>33.9</td></tr><tr><td>YOLO-World-L</td><td>YOLOv8-L</td><td>48M (110M)</td><td>O365,GoldG</td><td>52.0 (17.6)</td><td>35.0</td><td>27.1</td><td>32.8</td><td>38.3</td></tr><tr><td>YOLO-World-L</td><td>YOLOv8-L</td><td>48M (110M)</td><td>O365,GoldG,CC3M†</td><td>52.0 (17.6)</td><td>35.4</td><td>27.6</td><td>34.1</td><td>38.0</td></tr></table>
158
+
159
+ Table 2. Zero-shot Evaluation on LVIS. We evaluate YOLO-World on LVIS minival [21] in a zero-shot manner. We report the Fixed AP [4] for a fair comparison with recent methods. $\dagger$ denotes the pseudo-labeled CC3M in our setting, which contains 246k samples. The FPS is evaluated on one NVIDIA V100 GPU w/o TensorRT. The parameters and FPS of YOLO-World are evaluated for both the re-parameterized version (w/o bracket) and the original version (w/ bracket).
160
+
161
+ <table><tr><td>Pre-trained Data</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>O365</td><td>23.5</td><td>16.2</td><td>21.1</td><td>27.0</td></tr><tr><td>O365,GQA</td><td>31.9</td><td>22.5</td><td>29.9</td><td>35.4</td></tr><tr><td>O365,GoldG</td><td>32.5</td><td>22.3</td><td>30.6</td><td>36.0</td></tr><tr><td>O365,GoldG,CC3M†</td><td>33.0</td><td>23.6</td><td>32.0</td><td>35.5</td></tr></table>
162
+
163
+ Table 3. Ablations on Pre-training Data. We evaluate the zero-shot performance on LVIS of pre-training YOLO-World with different amounts of data.
164
+
165
+ <table><tr><td>GQA</td><td>T→I</td><td>I→T</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>X</td><td>X</td><td>X</td><td>22.4</td><td>14.5</td><td>20.1</td><td>26.0</td></tr><tr><td>X</td><td>✓</td><td>X</td><td>23.2</td><td>15.2</td><td>20.6</td><td>27.0</td></tr><tr><td>X</td><td>✓</td><td>✓</td><td>23.5</td><td>16.2</td><td>21.1</td><td>27.0</td></tr><tr><td>✓</td><td>X</td><td>X</td><td>29.7</td><td>21.0</td><td>27.1</td><td>33.6</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>31.9</td><td>22.5</td><td>29.9</td><td>35.4</td></tr></table>
166
+
167
+ fine-tuning on O365 may degrade the generalization ability of the pre-trained CLIP, which contains only 365 categories and lacks abundant textual information.
168
+
169
+ # 4.4. Fine-tuning YOLO-World
170
+
171
+ In this section, we further fine-tune YOLO-World for close-set object detection on the COCO dataset and LVIS dataset
172
+
173
+ Table 4. Ablations on Re-parameterizable Vision-Language Path Aggregation Network. We evaluate the zero-shot performance on LVIS of the proposed Vision-Language Path Aggregation Network. $\mathrm{T} \rightarrow \mathrm{I}$ and $\mathrm{I} \rightarrow \mathrm{T}$ denote the Text-guided CSPLayers and Image-Pooling Attention, respectively.
174
+
175
+ <table><tr><td>Text Encoder</td><td>Frozen?</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>BERT-base</td><td>Frozen</td><td>14.6</td><td>3.4</td><td>10.7</td><td>20.0</td></tr><tr><td>BERT-base</td><td>Fine-tune</td><td>18.3</td><td>6.6</td><td>14.6</td><td>23.6</td></tr><tr><td>CLIP-base</td><td>Frozen</td><td>22.4</td><td>14.5</td><td>20.1</td><td>26.0</td></tr><tr><td>CLIP-base</td><td>Fine-tune</td><td>19.3</td><td>8.6</td><td>15.7</td><td>24.8</td></tr></table>
176
+
177
+ Table 5. Text Encoder in YOLO-World. We ablate different text encoders in YOLO-World through the zero-shot LVIS evaluation.
178
+
179
+ to demonstrate the effectiveness of the pre-training.
180
+
181
+ Experimental Setup. We use the pre-trained weights to initialize YOLO-World for fine-tuning. All models are finetuned for 80 epochs with the AdamW optimizer and the initial learning rate is set to 0.0002. In addition, we fine-tune the CLIP text encoder with a learning factor of 0.01. For the LVIS dataset, we follow previous works [8, 13, 63] and finetune YOLO-World on the LVIS-base (common & frequent) and evaluate it on the LVIS-novel (rare).
182
+
183
+ COCO Object Detection. We compare the pre-trained YOLO-World with previous YOLO detectors [20, 23, 52] in Tab. 6. For fine-tuning YOLO-World on the COCO dataset, we remove the proposed RepVL-PAN for further acceleration considering that the vocabulary size of the COCO dataset is small. In Tab. 6, it's evident that our approach can achieve decent zero-shot performance on the COCO dataset, which indicates that YOLO-World has strong generalization ability. Moreover, YOLO-World after fine-tuning on the COCO train2017 demonstrates
184
+
185
+ <table><tr><td>Method</td><td>Pre-train</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td><td>FPS</td></tr><tr><td colspan="6">Training from scratch.</td></tr><tr><td>YOLOv6-S [23]</td><td>✘</td><td>43.7</td><td>60.8</td><td>47.0</td><td>442</td></tr><tr><td>YOLOv6-M [23]</td><td>✘</td><td>48.4</td><td>65.7</td><td>52.7</td><td>277</td></tr><tr><td>YOLOv6-L [23]</td><td>✘</td><td>50.7</td><td>68.1</td><td>54.8</td><td>166</td></tr><tr><td>YOLOv7-T [52]</td><td>✘</td><td>37.5</td><td>55.8</td><td>40.2</td><td>404</td></tr><tr><td>YOLOv7-L [52]</td><td>✘</td><td>50.9</td><td>69.3</td><td>55.3</td><td>182</td></tr><tr><td>YOLOv7-X [52]</td><td>✘</td><td>52.6</td><td>70.6</td><td>57.3</td><td>131</td></tr><tr><td>YOLOv8-S [20]</td><td>✘</td><td>44.4</td><td>61.2</td><td>48.1</td><td>386</td></tr><tr><td>YOLOv8-M [20]</td><td>✘</td><td>50.5</td><td>67.3</td><td>55.0</td><td>238</td></tr><tr><td>YOLOv8-L [20]</td><td>✘</td><td>52.9</td><td>69.9</td><td>57.7</td><td>159</td></tr><tr><td colspan="6">Zero-shot transfer.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>37.6</td><td>52.3</td><td>40.7</td><td>-</td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>42.8</td><td>58.3</td><td>46.4</td><td>-</td></tr><tr><td>YOLO-World-L</td><td>O+G</td><td>44.4</td><td>59.8</td><td>48.3</td><td>-</td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>45.1</td><td>60.7</td><td>48.9</td><td>-</td></tr><tr><td colspan="6">Fine-tuned w/ RepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.9</td><td>62.3</td><td>50.1</td><td>-</td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>51.2</td><td>68.1</td><td>55.9</td><td>-</td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.1</td><td>58.2</td><td>-</td></tr><tr><td colspan="6">Fine-tuned w/o RepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.7</td><td>62.3</td><td>49.9</td><td>373</td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>50.7</td><td>67.2</td><td>55.1</td><td>231</td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.3</td><td>58.1</td><td>156</td></tr></table>
186
+
187
+ higher performance compared to previous methods trained from scratch.
188
+
189
+ LVIS Object Detection. In Tab. 7, we evaluate the fine-tuning performance of YOLO-World on the standard LVIS dataset. Firstly, compared to the oracle YOLOv8s [20] trained on the full LVIS datasets, YOLO-World achieves significant improvements, especially for larger models, e.g., YOLO-World-L outperforms YOLOv8-L by 7.2 AP and $10.2\mathrm{AP}_r$ . The improvements can demonstrate the effectiveness of the proposed pre-training strategy for large-vocabulary detection. Moreover, YOLO-World, as an efficient one-stage detector, outperforms previous state-of-the-art two-stage methods [8, 13, 22, 53, 63] on the overall performance without extra designs, e.g., learnable prompts [8] or region-based alignments [13].
190
+
191
+ Table 6. Comparison with YOLOs on COCO Object Detection. We fine-tune the YOLO-World on COCO train2017 and evaluate on COCO val2017. The results of YOLOv7 [52] and YOLOv8 [20] are obtained from MMYOLO [3]. 'O', 'G', and 'C' denote pertaining using Objects365, GoldG, and CC3M†, respectively. The FPS is measured on one NVIDIA V100 w/ TensorRT.
192
+
193
+ <table><tr><td>Method</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>ViLD [13]</td><td>27.8</td><td>16.7</td><td>26.5</td><td>34.2</td></tr><tr><td>RegionCLIP [62]</td><td>28.2</td><td>17.1</td><td>-</td><td>-</td></tr><tr><td>Detic [63]</td><td>26.8</td><td>17.8</td><td>-</td><td>-</td></tr><tr><td>FVLM [22]</td><td>24.2</td><td>18.6</td><td>-</td><td>-</td></tr><tr><td>DetPro [8]</td><td>28.4</td><td>20.8</td><td>27.8</td><td>32.4</td></tr><tr><td>BARON [53]</td><td>29.5</td><td>23.2</td><td>29.3</td><td>32.5</td></tr><tr><td>YOLOv8-S</td><td>19.4</td><td>7.4</td><td>17.4</td><td>27.0</td></tr><tr><td>YOLOv8-M</td><td>23.1</td><td>8.4</td><td>21.3</td><td>31.5</td></tr><tr><td>YOLOv8-L</td><td>26.9</td><td>10.2</td><td>25.4</td><td>35.8</td></tr><tr><td>YOLO-World-S</td><td>23.9</td><td>12.8</td><td>20.4</td><td>32.7</td></tr><tr><td>YOLO-World-M</td><td>28.8</td><td>15.9</td><td>24.6</td><td>39.0</td></tr><tr><td>YOLO-World-L</td><td>34.1</td><td>20.4</td><td>31.1</td><td>43.5</td></tr></table>
194
+
195
+ Table 7. Comparison with Open-Vocabulary Detectors on LVIS. We train YOLO-World on the LVIS-base (including common and frequent) report the bbox AP. The YOLO-v8 are trained on the full LVIS datasets (including base and novel) along with the class balanced sampling.
196
+
197
+ # 4.5. Open-Vocabulary Instance Segmentation
198
+
199
+ In this section, we further fine-tune YOLO-World for segmenting objects under the open-vocabulary setting, which can be termed open-vocabulary instance segmentation (OVIS). Previous methods [18] have explored OVIS with pseudo-labelling on novel objects. Differently, considering that YOLO-World has strong transfer and generalization capabilities, we directly fine-tune YOLO-World on a subset of data with mask annotations and evaluate the segmentation performance under large-vocabulary settings. Specifically, we benchmark open-vocabulary instance segmentation under two settings:
200
+
201
+ (1) COCO to LVIS setting, we fine-tune YOLO-World on the COCO dataset (including 80 categories) with mask annotations, under which the models need to transfer from 80 categories to 1203 categories $(80\to 1203)$ ;
202
+ (2) LVIS-base to LVIS setting, we fine-tune YOLO-World on the LVIS-base (including 866 categories, common & frequent) with mask annotations, under which the models need to transfer from 866 categories to 1203 categories $(866\rightarrow 1203)$ .
203
+
204
+ We evaluate the fine-tuned models on the standard LVIS val2017 with 1203 categories, in which 337 rare categories are unseen and can be used to measure the open-vocabulary performance.
205
+
206
+ Results. Tab. 8 shows the experimental results of extending YOLO-World for open-vocabulary instance segmentation. Specifically, we adopt two fine-tuning strategies: (1) only fine-tuning the segmentation head and (2) fine-tuning
207
+
208
+ all modules. Under strategy (1), the fine-tuned YOLO-World still retains the zero-shot capabilities acquired from the pre-training stage, allowing it to generalize to unseen categories without additional fine-tuning. Strategy (2) enables YOLO-World fit the LVIS dataset better, but it may result in the degradation of the zero-shot capabilities.
209
+
210
+ Tab. 8 shows the comparisons of fine-tuning YOLO-World with different settings (COCO or LVIS-base) and different strategies (fine-tuning seg. head or fine-tuning all). Firstly, fine-tuning on LVIS-base obtains better performance compared to that based on COCO. However, the ratios between AP and $\mathrm{AP}_r$ $(\mathrm{AP}_r / \mathrm{AP})$ are nearly unchanged, e.g., the ratios of YOLO-World on COCO and LVIS-base are $76.5\%$ and $74.3\%$ , respectively. Considering that the detector is frozen, we attribute the performance gap to the fact that the LVIS dataset provides more detailed and denser segmentation annotations, which are beneficial for learning the segmentation head. When fine-tuning all modules, YOLO-World obtains remarkable improvements on LVIS, e.g., YOLO-World-L achieves 9.6 AP gain. However, the fine-tuning might degrade the open-vocabulary performance and lead to a 0.6 box $\mathrm{AP}_r$ drop for YOLO-World-L.
211
+
212
+ # 4.6. Visualizations
213
+
214
+ We provide the visualization results of pre-trained YOLO-World-L under three settings: (a) we perform zero-shot inference with LVIS categories; (b) we input the custom prompts with fine-grained categories with attributes; (c) referring detection. The visualizations also demonstrate that YOLO-World has a strong generalization ability for open-vocabulary scenarios along with referring ability.
215
+
216
+ Zero-shot Inference on LVIS. Fig. 5 shows the visualization results based on the LVIS categories which are generated by the pre-trained YOLO-World-L in a zero-shot manner. The pre-trained YOLO-World exhibits strong zero-shot transfer capabilities and is able to detect as many objects as possible within the image.
217
+
218
+ Inference with User's Vocabulary. In Fig. 6, we explore the detection capabilities of YOLO-World with our defined categories. The visualization results demonstrate that the pre-trained YOLO-World-L also exhibits the capability for (1) fine-grained detection (i.e., detect the parts of one object) and (2) fine-grained classification (i.e., distinguish different sub-categories of objects.).
219
+
220
+ Referring Object Detection. In Fig. 7, we leverage some descriptive (discriminative) noun phrases as input, e.g., the standing person, to explore whether the model can locate regions or objects in the image that match our given input. The visualization results display the phrases and their
221
+
222
+ corresponding bounding boxes, demonstrating that the pretrained YOLO-World has the referring or grounding capability. This ability can be attributed to the proposed pretraining strategy with large-scale training data.
223
+
224
+ # 5. Conclusion
225
+
226
+ We present YOLO-World, a cutting-edge real-time open-vocabulary detector aiming to improve efficiency and open-vocabulary capability in real-world applications. In this paper, we have reshaped the prevalent YOLOs as a vision-language YOLO architecture for open-vocabulary pretraining and detection and proposed RepVL-PAN, which connects vision and language information with the network and can be re-parameterized for efficient deployment. We further present the effective pre-training schemes with detection, grounding and image-text data to endow YOLO-World with a strong capability for open-vocabulary detection. Experiments can demonstrate the superiority of YOLO-World in terms of speed and open-vocabulary performance and indicate the effectiveness of vision-language pre-training on small models, which is insightful for future research. We hope YOLO-World can serve as a new benchmark for addressing real-world open-vocabulary detection.
227
+
228
+ # References
229
+
230
+ [1] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229, 2020. 2
231
+ [2] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5
232
+ [3] MMYOLO Contributors. MMYOLO: OpenMMLab YOLO series toolbox and benchmark. https://github.com/open-mmlab/mmyolo, 2022. 5, 8
233
+ [4] Achal Dave, Piotr Dólar, Deva Ramanan, Alexander Kirillov, and Ross B. Girshick. Evaluating large-vocabulary object detectors: The devil is in the details. CoRR, abs/2102.01066, 2021. 6, 7
234
+ [5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In NAACL-HLT, pages 4171–4186, 2019. 1, 4, 6
235
+ [6] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In CVPR, pages 13733-13742, 2021.
236
+ [7] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner,
237
+
238
+ <table><tr><td>Model</td><td>Fine-tune Data</td><td>Fine-tune Modules</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td><td>APb</td><td>APr</td></tr><tr><td>YOLO-World-M</td><td>COCO</td><td>Seg Head</td><td>12.3</td><td>9.1</td><td>10.9</td><td>14.6</td><td>22.3</td><td>16.2</td></tr><tr><td>YOLO-World-L</td><td>COCO</td><td>Seg Head</td><td>16.2</td><td>12.4</td><td>15.0</td><td>19.2</td><td>25.3</td><td>18.0</td></tr><tr><td>YOLO-World-M</td><td>LVIS-base</td><td>Seg Head</td><td>16.7</td><td>12.6</td><td>14.6</td><td>20.8</td><td>22.3</td><td>16.2</td></tr><tr><td>YOLO-World-L</td><td>LVIS-base</td><td>Seg Head</td><td>19.1</td><td>14.2</td><td>17.2</td><td>23.5</td><td>25.3</td><td>18.0</td></tr><tr><td>YOLO-World-M</td><td>LVIS-base</td><td>All</td><td>25.9</td><td>13.4</td><td>24.9</td><td>32.6</td><td>32.6</td><td>15.8</td></tr><tr><td>YOLO-World-L</td><td>LVIS-base</td><td>All</td><td>28.7</td><td>15.0</td><td>28.3</td><td>35.2</td><td>36.2</td><td>17.4</td></tr></table>
239
+
240
+ Table 8. Open-Vocabulary Instance Segmentation. We evaluate YOLO-World for open-vocabulary instance segmentation under the two settings. We fine-tune the segmentation head or all modules of YOLO-World and report Mask AP for comparison. $\mathbf{AP}^b$ denotes the box AP.
241
+
242
+ ![](images/7daeee39b858e4fcaaeb380ef44bbe95f2af627000468d8204a400d2661a198c.jpg)
243
+ Figure 5. Visualization Results on Zero-shot Inference on LVIS. We adopt the pre-trained YOLO-World-L and infer with the LVIS vocabulary (containing 1203 categories) on the COCO val2017.
244
+
245
+ Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 3
246
+ [8] Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. In CVPR, pages 14064-14073, 2022. 1, 3, 7, 8
247
+ [9] Chengjian Feng, Yujie Zhong, Yu Gao, Matthew R. Scott, and Weilin Huang. TOOD: task-aligned one-stage object detection. In ICCV, pages 3490-3499. IEEE, 2021. 5
248
+ [10] Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, and Jian Sun. YOLOX: exceeding YOLO series in 2021. CoRR, abs/2107.08430, 2021. 2
249
+ [11] Ross B. Girshick. Fast R-CNN. In ICCV, pages 1440-1448, 2015. 2
250
+ [12] Ross B. Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In CVPR, pages 580-587, 2014. 2
251
+ [13] Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui.
252
+
253
+ Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2022. 1, 2, 7, 8
254
+ [14] Agrim Gupta, Piotr Dólár, and Ross B. Girshick. LVIS: A dataset for large vocabulary instance segmentation. In CVPR, pages 5356-5364, 2019. 6
255
+ [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 7
256
+ [16] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask R-CNN. In ICCV, pages 2980-2988, 2017. 1, 2
257
+ [17] Drew A. Hudson and Christopher D. Manning. GQA: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, pages 6700-6709, 2019. 6
258
+ [18] Dat Huynh, Jason Kuen, Zhe Lin, Jieuxiang Gu, and Ehsan Elhamifar. Open-vocabulary instance segmentation via robust cross-modal pseudo-labeling. In CVPR, pages 7010-7021, 2022. 8
259
+ [19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation
260
+
261
+ ![](images/b4fc24285991da59158e74765634d79f00fd7882057442e136384a80be7d961f.jpg)
262
+ {men, women, boy, girl}
263
+
264
+ ![](images/3fa0b1f1a6c59149bdd714be54971da9a9cbce29e6624d69dac6b04f19365e0c.jpg)
265
+ {elephant, ear, leg, trunk, ivory}
266
+
267
+ ![](images/a2b34ba5035b24c3ba21f5af7527fbfe395eb4a3b02e2c3403cc0caf77ed2cfd.jpg)
268
+ {golden dog, black dog, spotted dog}
269
+
270
+ ![](images/f642cfefa0a524ec359f364f7f3609a52cee436412023c5f9a2c294a4fa1cf2e.jpg)
271
+ {grass, sky, zebra, trunk, tree}
272
+
273
+ ![](images/d4a62d7edd671c1495870f4c2ddd34ccde6a5ce4a432f9826952ec5bc0c40731.jpg)
274
+ Figure 6. Visualization Results on User's Vocabulary. We define the custom vocabulary for each input image and YOLO-World can detect the accurate regions according to the vocabulary. Images are obtained from COCO val2017.
275
+ the person in red
276
+
277
+ ![](images/f0a13721535642314b7d870350676a9a008a35b6d36ef14de70ed56ffd464d20.jpg)
278
+ the brown animal
279
+
280
+ ![](images/4799d5e8f411bdf960544706e4dec7fcebb6f82b11724333a737adbe1f31e752.jpg)
281
+ the tallest person
282
+
283
+ ![](images/512d78efd7a20873f8126b188986ed7d2daadde51015e691daa6b4ebd6ae3220.jpg)
284
+ person with a white shirt
285
+
286
+ ![](images/08f2c9525ed4caa4ba4d422c912c7137290e85edf548c67a1fe7b9efdfdd1ea1.jpg)
287
+ the jumping person
288
+ Figure 7. Visualization Results on Referring Object Detection. We explore the capability of the pre-trained YOLO-World to detect objects with descriptive noun phrases. Images are obtained from COCO val2017.
289
+
290
+ ![](images/d0f65223fc0767ed06ce35c060e34d2d0f35706423bc35cd992a1ada6bccff82.jpg)
291
+ person holding a baseball bat
292
+
293
+ ![](images/5768e972e5552e153eb5de949ac525c49600b78d98975da294a71114947bd955.jpg)
294
+ person holding a toy
295
+
296
+ ![](images/7d2ca0337dfe251e7248d00cc76ecf9a98a9cd603318177d057e41308c130e08.jpg)
297
+ the standing person
298
+
299
+ ![](images/e81ea8b958058538832825bf40d1649ea093fa71da70f6a662df4101ad183233.jpg)
300
+ moon
301
+
302
+ learning with noisy text supervision. In ICML, pages 4904-4916, 2021. 1, 3
303
+ [20] Glenn Jocher, Ayush Chaurasia, and Jing Qiu. Ultralytics yolov8. https://github.com/ultralytics/ultralytics, 2023. 2, 3, 4, 5, 6, 7, 8
304
+ [21] Aishwarya Kamath, Mannat Singh, Yann LeCun, Gabriel Synnaeve, Ishan Misra, and Nicolas Carion. MDETR - modulated detection for end-to-end multi-modal understanding. In ICCV, pages 1760-1770, 2021. 6, 7
305
+ [22] Weicheng Kuo, Yin Cui, Xiuye Gu, A. J. Piergiovanni, and Anelia Angelova. F-VLM: open-vocabulary object detection upon frozen vision and language models. CoRR, abs/2209.15639, 2022. 3, 8
306
+ [23] Chuyi Li, Lulu Li, Hongliang Jiang, Kaiheng Weng, Yifei Geng, Liang Li, Zaidan Ke, Qingyuan Li, Meng Cheng, Weiqiang Nie, Yiduo Li, Bo Zhang, Yufei Liang, Linyuan Zhou, Xiaoming Xu, Xiangxiang Chu, Xiaoming Wei, and Xiaolin Wei. Yolov6: A single-stage object detection frame
307
+
308
+ work for industrial applications. CoRR, abs/2209.02976, 2022. 2, 7, 8
309
+ [24] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In CVPR, pages 10955–10965, 2022. 1, 2, 3, 5, 6, 7, 13
310
+ [25] Chuang Lin, Peize Sun, Yi Jiang, Ping Luo, Lizhen Qu, Gholamreza Haffari, Zehuan Yuan, and Jianfei Cai. Learning object-language alignments for open-vocabulary object detection. In ICLR, 2023. 3
311
+ [26] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft COCO: common objects in context. In Proceedings of the European Conference on Computer Vision (ECCV), pages 740-755, 2014. 1, 2, 3, 13
312
+ [27] Tsung-Yi Lin, Piotr Dólar, Ross B. Girshick, Kaiming He, Bharath Hariharan, and Serge J. Belongie. Feature pyramid
313
+
314
+ networks for object detection. In CVPR, pages 936-944, 2017. 1, 2
315
+ [28] Tsung-Yi Lin, Priya Goyal, Ross B. Girshick, Kaiming He, and Piotr Dollar. Focal loss for dense object detection. In ICCV, pages 2999-3007, 2017. 2
316
+ [29] Shu Liu, Lu Qi, Haifang Qin, Jianping Shi, and Jiaya Jia. Path aggregation network for instance segmentation. In CVPR, pages 8759-8768, 2018. 2, 4
317
+ [30] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, and Lei Zhang. Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. CoRR, abs/2303.05499, 2023. 1, 2, 3, 6, 7
318
+ [31] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott E. Reed, Cheng-Yang Fu, and Alexander C. Berg. SSD: single shot multibox detector. In ECCV, pages 21-37, 2016. 2
319
+ [32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 9992-10002, 2021. 2, 3, 6, 7
320
+ [33] Xiang Long, Kaipeng Deng, Guanzhong Wang, Yang Zhang, Qingqing Dang, Yuan Gao, Hui Shen, Jianguo Ren, Shumin Han, Errui Ding, and Shilei Wen. PP-YOLO: an effective and efficient implementation of object detector. CoRR, abs/2007.12099, 2020. 2
321
+ [34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR, 2019. 5
322
+ [35] Matthias Minderer, Alexey A. Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. Simple open-vocabulary object detection with vision transformers. In ECCV, 2022. 3
323
+ [36] Matthias Minderer, Alexey A. Gritsenko, and Neil Houlsby. Scaling open-vocabulary object detection. In NeurIPS, 2023. 3
324
+ [37] Vicente Ordonez, Girish Kulkarni, and Tamara L. Berg. Im2text: Describing images using 1 million captioned photographs. In NeurIPS, pages 1143–1151, 2011. 6
325
+ [38] Bryan A. Plummer, Liwei Wang, Chris M. Cervantes, Juan C. Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. Int. J. Comput. Vis., pages 74–93, 2017. 6
326
+ [39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 1, 2, 3, 4, 5, 6, 13
327
+ [40] Joseph Redmon and Ali Farhadi. YOLO9000: better, faster, stronger. In CVPR, pages 6517-6525, 2017. 2
328
+ [41] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. CoRR, abs/1804.02767, 2018. 2
329
+ [42] Joseph Redmon, Santosh Kumar Divvala, Ross B. Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In CVPR, pages 779-788, 2016. 2, 3
330
+
331
+ [43] Joseph Redmon, Santosh Kumar Divvala, Ross B. Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In CVPR, pages 779-788, 2016. 1, 2, 4
332
+ [44] Shaoqing Ren, Kaiming He, Ross B. Girshick, and Jian Sun. Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1137-1149, 2017. 2
333
+ [45] Shaoqing Ren, Kaiming He, Ross B. Girshick, and Jian Sun. Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1137-1149, 2017. 1
334
+ [46] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In ICCV, pages 8429-8438, 2019. 2, 6
335
+ [47] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In ACL, pages 2556-2565, 2018. 5, 6, 13
336
+ [48] Cheng Shi and Sibei Yang. Edadet: Open-vocabulary object detection using early dense alignment. In ICCV, pages 15678-15688, 2023. 1
337
+ [49] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. FCOS: fully convolutional one-stage object detection. In ICCV, pages 9626-9635, 2019. 2
338
+ [50] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, pages 5998-6008, 2017. 2
339
+ [51] Chien-Yao Wang, Hong-Yuan Mark Liao, Yueh-Hua Wu, Ping-Yang Chen, Jun-Wei Hsieh, and I-Hau Yeh. Cspnet: A new backbone that can enhance learning capability of CNN. In CVPRW, pages 1571–1580, 2020. 2
340
+ [52] Chien-Yao Wang, Alexey Bochkovskiy, and Hong-Yuan Mark Liao. Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. In CVPR, pages 7464-7475, 2023. 2, 7, 8
341
+ [53] Size Wu, Wenwei Zhang, Sheng Jin, Wentao Liu, and Chen Change Loy. Aligning bag of regions for open-vocabulary object detection. In CVPR, pages 15254-15264, 2023. 1, 3, 8
342
+ [54] Johnathan Xie and Shuai Zheng. ZSD-YOLO: zero-shot YOLO detection using vision-language knowledge distillation. CoRR, 2021. 3
343
+ [55] Shangliang Xu, Xinxin Wang, Wenyu Lv, Qinyao Chang, Cheng Cui, Kaipeng Deng, Guanzhong Wang, Qingqing Dang, Shengyu Wei, Yuning Du, and Baohua Lai. PP-YOLOE: an evolved version of YOLO. CoRR, abs/2203.16250, 2022. 2
344
+ [56] Lewei Yao, Jianhua Han, Youpeng Wen, Xiaodan Liang, Dan Xu, Wei Zhang, Zhenguo Li, Chunjing Xu, and Hang Xu. Detclip: Dictionary-enriched visual-concept paralleled pretraining for open-world detection. In NeurIPS, 2022. 1, 2, 3, 6, 7
345
+ [57] Lewei Yao, Jianhua Han, Xiaodan Liang, Dan Xu, Wei Zhang, Zhenguo Li, and Hang Xu. Detclipv2: Scalable open-vocabulary object detection pre-training via word
346
+
347
+ region alignment. In CVPR, pages 23497-23506, 2023. 1, 3, 6
348
+ [58] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In CVPR, pages 14393-14402, 2021. 1, 2, 3
349
+ [59] Haotian Zhang, Pengchuan Zhang, Xiaowei Hu, Yen-Chun Chen, Liunian Harold Li, Xiyang Dai, Lijuan Wang, Lu Yuan, Jenq-Neng Hwang, and Jianfeng Gao. Glipv2: Unifying localization and vision-language understanding. In NeurIPS, 2022. 1, 2, 3, 6, 7
350
+ [60] Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M. Ni, and Heung-Yeung Shum. DINO: DETR with improved denoising anchor boxes for end-to-end object detection. In ICLR, 2023. 3
351
+ [61] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In CVPR, pages 9756-9765, 2020. 2, 3
352
+ [62] Yiwu Zhong, Jianwei Yang, Pengchuan Zhang, Chunyuan Li, Noel Codella, Liunian Harold Li, Luowei Zhou, Xiyang Dai, Lu Yuan, Yin Li, and Jianfeng Gao. Regionclip: Region-based language-image pretraining. In CVPR, pages 16772-16782, 2022. 3, 8
353
+ [63] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In ECCV, pages 350-368, 2022. 3, 7, 8
354
+ [64] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable DETR: deformable transformers for end-to-end object detection. In ICLR, 2021. 2
355
+
356
+ # A. Additional Details
357
+
358
+ # A.1. Re-parameterization for RepVL-PAN
359
+
360
+ During inference on an offline vocabulary, we adopt reparameterization for RepVL-PAN for faster inference speed and deployment. Firstly, we pre-compute the text embeddings $W \in \mathbb{R}^{C \times D}$ through the text encoder.
361
+
362
+ Re-parameterize T-CSPLayer. For each T-CSPLayer in RepVL-PAN, we can re-parameterize and simplify the process of adding text guidance by reshaping the text embeddings $W \in \mathbb{R}^{C \times D \times 1 \times 1}$ into the weights of a $1 \times 1$ convolution layer (or a linear layer), as follows:
363
+
364
+ $$
365
+ X ^ {\prime} = X \odot \text {S i g m o i d} (\max (\operatorname {C o n v} (X, W), \dim = 1)), \tag {4}
366
+ $$
367
+
368
+ where $X \times \in \mathbb{R}^{B \times D \times H \times W}$ and $X' \in \mathbb{R}^{B \times D \times H \times W}$ are the input and output image features. $\odot$ is the matrix multiplication with reshape or transpose.
369
+
370
+ Re-parameterize I-Pooling Attention. The I-Pooling Attention can be re-parameterize or simplified by:
371
+
372
+ $$
373
+ \tilde {X} = \operatorname {c a t} (\mathrm {M P} (X _ {3}, 3), \mathrm {M P} (X _ {4}, 3), \mathrm {M P} (X _ {5}, 3)), \tag {5}
374
+ $$
375
+
376
+ where cat is the concentration and $\mathbb{M}\mathbb{P}(\cdot ,3)$ denotes the max pooling for $3\times 3$ output features. $\{X_3,X_4,X_5\}$ are the multi-scale features in RepVL-PAN. $\tilde{X}$ is flattened and has the shape of $B\times D\times 27$ . Then we can update the text embeddings by:
377
+
378
+ $$
379
+ W ^ {\prime} = W + \operatorname {S o f t m a x} (W \odot \tilde {X}), \dim = - 1) \odot W, \tag {6}
380
+ $$
381
+
382
+ # A.2. Fine-tuning Details.
383
+
384
+ We remove all T-CSPLayers and Image-Pooling Attention in RepVL-PAN when transferring YOLO-World to COCO [26] object detection, which only contains 80 categories and has a relatively low dependency on visual-language interaction. During fine-tuning, we initialize YOLO-World using pre-trained weights. The learning rate of fine-tuning is set to 0.0002 with the weight decay set to 0.05. After fine-tuning, we pre-compute the class text embeddings with given COCO categories and store the embeddings into the weights of the classification layers.
385
+
386
+ # B. Automatic Labeling on Large-scale Image-Text Data
387
+
388
+ In this section, we add details procedures for labeling region-text pairs with large-scale image-text data, e.g., CC3M [47]. The overall labeling pipeline is illustrated in Fig. 8, which mainly consists of three procedures, i.e., (1) extract object nouns, (2) pseudo labeling, and (3) filtering. As discussed in Sec. 3.4, we adopt the simple n-gram algorithm to extract nouns from captions.
389
+
390
+ Region-Text Proposals. After obtaining the set of object nouns $T = \{t_k\}^K$ from the first step, we leverage a pretrained open-vocabulary detector, i.e., GLIP-L [24], to generate pseudo boxes $\{B_i\}$ along with confidence scores $\{c_i\}$ :
391
+
392
+ $$
393
+ \left\{B _ {i}, t _ {i}, c _ {i} \right\} _ {i = 1} ^ {N} = \text {G L I P - L a b e l e r} (I, T), \tag {7}
394
+ $$
395
+
396
+ where $\{B_i, t_i, c_i\}_{i=1}^N$ are the coarse region-text proposals.
397
+
398
+ CLIP-based Re-scoring & Filtering. Considering the region-text proposals containing much noise, we present a restoring and filtering pipeline with the pre-trained CLIP [39]. Given the input image $I$ , caption $T$ , and the coarse region-text proposals $\{B_i, t_i, c_i\}_{i=1}^N$ , the specific pipeline is listed as follows:
399
+
400
+ (1) Compute Image-Text Score: we forward the image $I$ with its caption $T$ into CLIP and obtain the image-text similarity score $s^{img}$ .
401
+ (2) Compute Region-Text Score: we crop the region images from the input image according to the region boxes $\{B_i\}$ . Then we forward the cropped images along with their texts $\{t_i\}$ into CLIP and obtain the region-text similarity $S^r = \{s_i^r\}_{i=1}^N$ .
402
+
403
+ ![](images/359395d57c91384cb75fe9ef2c84207140e34cc1b1b2ba9d4a935d41eb642133.jpg)
404
+ Figure 8. Labeling Pipeline for Image-Text Data We first leverage the simple n-gram to extract object nouns from the captions. We adopt a pre-trained open-vocabulary detector to generate pseudo boxes given the object nouns, which forms the coarse region-text proposals. Then we use a pre-trained CLIP to rescore or relabel the boxes along with filtering.
405
+
406
+ (3) [Optional] Re-Labeling: we can forward each cropped image with all nouns and assign the noun with maximum similarity, which can help correct the texts wrongly labeled by GLIP.
407
+ (4) Rescoring: we adopt the region-text similarity $S^r$ to rescore the confidence scores $\tilde{c}_i = \sqrt{c_i * s_i^r}$ .
408
+ - (5) Region-level Filtering: we first divide the region-text proposals into different groups according to the texts and then perform non-maximum suppression (NMS) to filter the duplicate predictions (the NMS threshold is set to 0.5). Then we filter out the proposals with low confidence scores (the threshold is set to 0.3).
409
+ (6) Image-level Filtering: we compute the image-level region-text scores $s^{region}$ by averaging the kept region-text scores. Then we obtain the image-level confidence score by $s = \sqrt{s^{img} * s^{region}}$ and we keep the images with scores larger than 0.3.
410
+
411
+ The thresholds mentioned above are empirically set according to the part of labeled results and the whole pipeline is automatic without human verification. Finally, the labeled samples are used for pre-training YOLO-World. We will provide the pseudo annotations of CC3M for further research.
412
+
413
+ # C. Pre-training YOLO-World at Scale
414
+
415
+ When pre-training small models, e.g., YOLO-World-S, a natural question we have is: how much capacity does a small model have, and how much training data or what kind of data does a small model need? To answer this question, we leverage different amounts of pseudo-labeled region-text pairs to pre-train YOLO-World. As shown in Tab. 9, adding more image-text samples can increase the zero-shot performance of YOLO-World-S. Tab. 9 indicates: (1) adding image-text data can improve the overall zero-shot performance of YOLO-World-S; (2) using an excessive amount of pseudo-labeled data may have some negative effects for small models (YOLO-World-S), though it can improve the
416
+
417
+ on rare categories $(\mathrm{AP}_r)$ . However, using fine-grained annotations (GoldG) for small models can provide significant improvements, which indicates that large-scale high-quality annotated data can significantly enhance the capabilities of small models. And Tab. 3 in the main text has shown that pre-training with the combination of fine-annotated data and pseudo-annotated data can perform better. We will explore more about the data for pre-training small models or YOLO detectors in future work.
418
+
419
+ <table><tr><td>Method</td><td>Pre-trained Data</td><td>Samples</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>YOLO-World-S</td><td>O365</td><td>0.61M</td><td>16.3</td><td>9.2</td><td>14.1</td><td>20.1</td></tr><tr><td>YOLO-World-S</td><td>O365+GoldG</td><td>1.38M</td><td>24.2</td><td>16.4</td><td>21.7</td><td>27.8</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-245k</td><td>0.85M</td><td>16.5</td><td>10.8</td><td>14.8</td><td>19.1</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-520k</td><td>1.13M</td><td>19.2</td><td>10.7</td><td>17.4</td><td>22.4</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-750k</td><td>1.36M</td><td>18.2</td><td>11.2</td><td>16.0</td><td>21.1</td></tr></table>
420
+
421
+ Table 9. Zero-shot Evaluation on LVIS. We evaluate the performance of pre-training YOLO-World-S with different amounts of data, the image-text data.
2401.17xxx/2401.17270/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a92324240f326acde7217b0afcb45dff8406386334782dc84a024c771f8ae271
3
+ size 1112355
2401.17xxx/2401.17270/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17377/9b945812-ca2f-4c41-8deb-ac4bffe5ff80_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b38d351be0569f4184be8ff2b0ad5f776c83be96dc60452a0c35bd5ace3f09e
3
+ size 5369648
2401.17xxx/2401.17377/full.md ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Infini-gram: Scaling Unbounded $n$ -gram Language Models to a Trillion Tokens
2
+
3
+ Jiacheng Liu $^{\text{♥}}$ Sewon Min $^{\text{♥}}$
4
+
5
+ Luke Zettlemoyer $^{\text{♥}}$ Yejin Choi $^{\text{♣}}$ Hannaneh Hajishirzi $^{\text{♣}}$
6
+
7
+ $^{\diamond}$ Paul G. Allen School of Computer Science & Engineering, University of Washington
8
+
9
+ $\spadesuit$ Allen Institute for Artificial Intelligence liujc@cs.washington.edu
10
+
11
+ # Abstract
12
+
13
+ Are $n$ -gram language models still relevant in this era of neural large language models (LLMs)? Our answer is yes, and we showcase their values in both text analysis and improving neural LLMs. This was done by modernizing $n$ -gram LMs in two aspects. First, we train them at the same data scale as neural LLMs—5 trillion tokens. This is the largest $n$ -gram LM ever built. Second, existing $n$ -gram LMs use small $n$ which hinders their performance; we instead allow $n$ to be arbitrarily large, by introducing a new $\infty$ -gram LM with backoff. Instead of pre-computing $n$ -gram count tables (which would be very expensive), we develop an engine named infiniti-gram—powered by suffix arrays—that can compute $\infty$ -gram (as well as $n$ -gram with arbitrary $n$ ) probabilities with millisecond-level latency. The $\infty$ -gram framework and infiniti-gram engine enable us to conduct many novel and interesting analyses of human-written and machine-generated text: we find that the $\infty$ -gram LM has fairly high accuracy for next-token prediction (47%), and can complement neural LLMs to greatly reduce their perplexity. When analyzing machine-generated text, we also observe irregularities in the machine- $\infty$ -gram agreement level with respect to the suffix length, which indicates deficiencies in neural LLM pretraining and the positional embeddings of Transformers.
14
+
15
+ ![](images/f8fa2926865f7ea2e43a2b763a86088ea6df87d87367c7003060df835d3f5f8f.jpg)
16
+
17
+ ![](images/6c30dc930117b0b875f81e921743b1309e495902e236ab77eb71fbe8523b7220.jpg)
18
+
19
+ ![](images/04eb70ce20c64ec3f49064858444371f931137261e7e1fa18ea1f4635cd724b5.jpg)
20
+
21
+ ![](images/4d1e36294f7a24c25d75dbeda2ca25367a9564acd001205fdad4e2c9bc21189b.jpg)
22
+
23
+ ![](images/19c415e0b4fc47d2d059481c3d3e2c6ec5ef66822b4cd565bfeba05e34420b1d.jpg)
24
+
25
+ Project Homepage
26
+
27
+ Web Interface
28
+
29
+ API Endpoint
30
+
31
+ Python Package
32
+
33
+ Source Code
34
+
35
+ infini-gram.io
36
+
37
+ infini-gram.io/demo
38
+
39
+ api.infini-gram.io
40
+
41
+ pypi.org/project/infini-gram
42
+
43
+ github.com/liujch1998/infini-gram
44
+
45
+ ![](images/bf167053a3970f0e192cdfc369fca320aa54d24fb1b06ebdb1a3185b453a45c1.jpg)
46
+
47
+ ... conducts research at the Paul G. Allen School of Computer Science and Engineering, University of
48
+
49
+ 5-gram LM $(n = 5)$
50
+
51
+ $\infty$ -gram LM
52
+
53
+ $(n = 16$ for this case)
54
+
55
+ cnt(Engineering, University of) = 274644
56
+
57
+ cnt(research at the Paul G. Allen School of Computer Science and Engineering, University of) = 0
58
+
59
+ cnt(at the Paul G. Allen School of Computer Science and Engineering, University of) = 10
60
+
61
+ $P(\ast |$ at the Paul G. Allen School of Computer Science and Engineering, University of) $=$
62
+
63
+ $P(\ast |$ Engineering, University of) $=$
64
+
65
+ ![](images/ed092b7dcc083de8d643a5e4f6f420dce69b098f2b7a30f285b884cfb5573fb1.jpg)
66
+ Figure 1: An example where a 5-gram LM gives an incorrect prediction but the $\infty$ -gram gives the correct prediction by using the longest suffix of the prompt that has a non-zero count in the corpus. The counting and distribution estimate in $\infty$ -gram LM are powered by our infini-gram engine.
67
+
68
+ ![](images/6d2773f7bf835a47637bd1b5711053c46a48dc139abd38b0cab97f43e7b7b119.jpg)
69
+
70
+ ![](images/c593a7723611933a198e7d212bc2088fa34e5f8707bb1271126c066f4d390c59.jpg)
71
+
72
+ ![](images/e2a9d646a300493a23ffc9a285e45b1a30227f0150e89b7263dda25878f60be4.jpg)
73
+
74
+ ![](images/893e89843b37b115ba0d580a56da894c892386a7b6dc7fa7c8526990a27e710e.jpg)
75
+
76
+ ![](images/1e068c417cfce1c7bb495404ea6bd7bf00d098346e9aee1e1b5fffd6bde5c234.jpg)
77
+
78
+ # 1 Introduction
79
+
80
+ When pretrained on trillion-token corpora, neural large language models (LLMs) achieve groundbreaking performance (Touvron et al., 2023a; Geng & Liu, 2023; Groeneveld et al., 2024). However, we do not yet know how such data scale would benefit other language modeling approaches. In particular, how well does the classical, $n$ -gram language model (LM) perform if estimated from such massive corpora? In other words, are $n$ -gram LMs still relevant in this era of neural LLMs?
81
+
82
+ Our answer is yes. As we will show, $n$ -gram LMs are useful for both text analysis and improving neural LLMs. Yet we need to first modernize the canonical $n$ -gram LM in two aspects: the training data size, and the value of $n$ . To achieve broader data coverage, we scale up the training data for $n$ -gram LMs to 5 trillion tokens, by combining some of the largest open-source text corpora. This is the largest $n$ -gram LM ever built. Historically, $n$ -gram indexes have been built only for small $n$ 's (e.g., $n \leq 5$ ; Brants et al. (2007)), because the size of naive $n$ -gram count table grows almost exponentially wrt $n$ . We instead find significant value in increasing the value of $n$ . As illustrated in Figure 1, a 5-gram LM is poorly predictive of the next token, because it discards the rich context in the prompt; meanwhile, if we can use a larger $n$ (in this case $n = 16$ ), the prediction becomes much more accurate. As such, we develop our $n$ -gram LM with unbounded $n$ , or in other words, an $\infty$ -gram LM. We use a variant of backoff (Jurafsky & Martin, 2000), where we resort to smaller $n$ when longer $n$ -grams have a zero count. Due to sparsity in the $\infty$ -gram estimates, in some of the later experiments (§5), we will interpolate between the $\infty$ -gram LM and neural LMs to yield a hybrid LM upon which perplexity can be computed.
83
+
84
+ We develop a low-latency, resource-efficient engine to serve this massive $\infty$ -gram LM. Instead of building an explicit $n$ -gram count table, which is infeasible for arbitrarily large $n$ and such extreme data scale, we power the $\infty$ -gram LM with a suffix array of the dataset - a data structure that supports fast $n$ -gram counting, and is efficient in both storage space and compute. Our index takes 7 bytes of storage per token (3.5x overhead compared to the raw dataset), and on a dataset with 1.4 trillion tokens, it can be built with a single 128-code CPU node in about 2 days, using 10 TB of disk storage. Average inference latency is less than 20 milliseconds for counting an $n$ -gram and finding all positions of its occurrence (regardless of how large $n$ is or how frequently the $n$ -gram appears), and under 200 milliseconds for all other query types including $\infty$ -gram language modeling and decoding. All indexes stay on-disk at inference time. We refer to this engine as infiniti-gram.
85
+
86
+ Analyses with $\infty$ -gram (§4) offers new insights into human-written and machine-generated text. We found that $\infty$ -gram has a fairly high accuracy (47%) when predicting the next token given a prefix of a human-written document, and this accuracy is higher on tokens where the effective $n$ is larger. In contrast, conventional $n$ -grams (with small $n$ ) are insufficient in capturing a long enough context to predict the next token (29% accuracy). Moreover, we show that $\infty$ -gram can complement neural LMs and reach better performance when combined: heuristically interpolating between the estimates made by $\infty$ -gram and neural LMs can greatly reduce perplexity (by up to 73%) compared to the neural LMs alone, even when the neural LM is as large as 70B (§5). When analyzing the level of agreement with $\infty$ -gram, nucleus sampling (Holtzman et al., 2019) from neural LMs produces machine-generated text with an agreement plot most similar to human-written text, among other decoding methods like greedy decoding and temperature sampling; for greedy decoding, we observe significant fluctuation in the agreement level wrt the suffix length, which indicates deficiencies in neural LM pretraining and the positional embeddings of Transformers.
87
+
88
+ We are hosting host a public web interface and an API endpoint that serve $n$ -gram/ $\infty$ -gram queries on several popular open corpora: Dolma (Soldaini et al., 2023), RedPajama (Together, 2023), Pile (Gao et al., 2020), and C4 (Raffel et al., 2019). We also release a Python package for local serving and building new indexes, as well as our source code. We hope these tools can enable more insightful analysis and understanding of large text corpora, and open up new avenues for data-driven language modeling.
89
+
90
+ # 2 $\infty$ -gram LM: Extending $n$ -gram LMs with Unbounded $n$
91
+
92
+ Background: $n$ -gram LM. The $n$ -gram LM is a classical, statistical language model based on counting the occurrences of $n$ -grams. In its most simple form, the probability of a token $w_{i}$ given a context $w_{i-(n-1):i-1}$ is estimated as $P_{n}(w_{i} \mid w_{i-(n-1):i-1}) = \frac{\mathrm{cnt}(w_{i-(n-1):i-1} | \mathcal{D})}{\mathrm{cnt}(w_{i-(n-1):i-1} | \mathcal{D})}$ , where $\mathrm{cnt}(\mathbf{w} \mid \mathcal{D})$ is the number of times the $n$ -gram $\mathbf{w}$ appears in the training data $\mathcal{D}$ (i.e., a corpus), and $n$ is a pre-defined hyperparameter. (When $n = 1$ , we define $w_{i-(n-1):i-1}$ as the empty string $\varepsilon$ , whose count is equal to $|\mathcal{D}|$ .) However, this naive version of $n$ -gram LM faces the sparsity issue: the numerator may be zero, resulting in an infinite perplexity. One common solution is backoff (Jurafsky & Martin, 2000): on an instance-wise basis, when the numerator is zero we decrease $n$ by one, and do this repeatedly until the numerator becomes positive. One caveat in backoff is that it does not yield a valid distribution for $P_{n}(*|w_{i-(n-1):i-1})$ , because the effective $n$ is dependent on $w_{i}$ . Therefore, further probability discounting is required to normalize this distribution (e.g., Katz backoff (Katz, 1987)).
93
+
94
+ Conventionally, $n$ -gram LMs have been implemented by building an $n$ -gram count table of the training data. This table stores all unique $n$ -grams that appear in the training data, each associated with its count. Such $n$ -gram count tables are huge and grow almost exponentially wrt $n$ . For example, the 5-gram count table for a 1.4-trillion-token corpus would consume 28 TB of disk space. As a result, previous $n$ -gram LMs are limited to very small $n$ , most commonly $n = 5$ , and to frequent $n$ -grams only (e.g., Franz & Brants (2006)). As we illustrated in Figure 1 and will further quantify in §4, the problem with small $n$ is that it discards richer context, making such $n$ -gram LMs poorly predictive of future tokens.
95
+
96
+ $\infty$ -gram LM. The $\infty$ -gram LM is a generalization of the $n$ -gram LM, where conceptually we start backing off from $n = \infty$ . We use a variant of backoff: we backoff only when the denominator is zero. This means we stop backing off as soon as the denominator becomes positive, upon which the numerator might still be zero. On an instance-wise basis, the effective $n$ is equal to one plus the length of the prompt's longest suffix that appears in the training data.
97
+
98
+ For the rest of this paper, we will use "∞-gram" to refer to the $\infty$ -gram LM. $\infty$ -gram is formally defined as
99
+
100
+ $$
101
+ P _ {\infty} (w _ {i} \mid w _ {1: i - 1}) = \frac {\operatorname {c n t} (w _ {i - (n - 1) : i - 1} w _ {i} \mid \mathcal {D})}{\operatorname {c n t} (w _ {i - (n - 1) : i - 1} \mid \mathcal {D})}
102
+ $$
103
+
104
+ where $w_{1:i-1}$ are all tokens preceding $w_i$ in the document, and
105
+
106
+ $$
107
+ n = \max \left\{n ^ {\prime} \in [ 1, i ] \mid \operatorname {c n t} \left(w _ {i - \left(n ^ {\prime} - 1\right): i - 1} \mid \mathcal {D}\right) > 0 \right\}.
108
+ $$
109
+
110
+ Unlike Katz backoff, $P_{\infty}(*|w_{1:i-1})$ is a valid distribution by construction and does not require discounting. This is because the effective $n$ is solely dependent on $w_{1:i-1}$ and does not depend on $w_i$ , and $\sum_{w_i \in \mathcal{V}} \operatorname{cnt}(w_{i-(n-1):i-1}w_i \mid \mathcal{D}) = \operatorname{cnt}(w_{i-(n-1):i-1} \mid \mathcal{D})$ .
111
+
112
+ Further, we define the sparsity of this $\infty$ -gram estimate: an estimate is sparse iff $P(w_{i}|w_{i - (n - 1):i - 1}) = 1$ for one of the $w_{i}\in \mathcal{V}$ , and is zero for all other tokens in the vocabulary. Intuitively, this means there is only one possible next token given this context, according to the training data. As we will show in §4, sparse estimates are more predictive of the actual tokens than non-sparse ones.
113
+
114
+ Interpolating with neural LMs. $\infty$ -gram estimates contain zero probabilities, which may lead to infinite perplexity. We do not attempt to compute the perplexity of the $\infty$ -gram itself. Instead, we interpolate it with neural LMs and show perplexity improvement over the neural LMs alone (§5). The combined model is
115
+
116
+ $$
117
+ P (y \mid x) = \lambda P _ {\infty} (y \mid x) + (1 - \lambda) P _ {\text {n e u r a l}} (y \mid x),
118
+ $$
119
+
120
+ where $\lambda \in [0,1]$ is a hyperparameter.
121
+
122
+ ![](images/9223e59d736503ff84ed8821a80b90a0481d09f38633d5c0148e8a2dd73dc13c.jpg)
123
+ Figure 2: Left: the suffix array for a toy string. Right: illustration of the suffix array in the infini-gram index, with $N = 4$ tokens in the dataset.
124
+
125
+ ![](images/91586b0058f77049445e0e2692b3c0539f82662e8135be0c1c2b8d74752f9779.jpg)
126
+
127
+ # 3 Infini-gram: A Performant Engine for $n$ -gram/ $\infty$ -gram Queries
128
+
129
+ We train $\infty$ -gram on modern, trillion-token text corpora. However, it is practically infeasible to build $n$ -gram count tables with unbounded $n$ for such massive datasets. In this section, we describe our infini-gram engine that processes $n$ -gram/ $\infty$ -gram queries efficiently. Infinitagram is powered by a data structure called suffix array. We will show how to build this suffix array index and how to perform $n$ -gram/ $\infty$ -gram inferences with it.
130
+
131
+ Suffix array. The essence of $n$ -gram and $\infty$ -gram LMs is counting a given $n$ -gram in the training data. As such, we leverage the suffix array data structure, which is originally designed for efficiently counting the number of times a given "needle" string (length $L$ ) appears as substring of a huge "haystack" string (length $N$ ). When the suffix array is built for a haystack string, counting a given needle string has time complexity $O(L + \log N)$ .
132
+
133
+ A suffix array represents the lexicographical ordering of all suffixes of an array (or a string, which is an array of characters). For an array of length $N$ , the suffix array contains $N$ unique integers, where the $i$ -th element is the starting position of the suffix ranked $i$ -th among all suffixes. Figure 2 (left) shows the suffix array for a toy string, aabaca.
134
+
135
+ As shown in Figure 2 (right), we build the suffix array on the byte array of the tokenized dataset (i.e., token array). Documents are separated by the \xff\xff token. In the token array, each consecutive two bytes represent a token ID (assuming that $|\mathcal{V}| < 2^{16} = 65536$ ). Given that the dataset has $N$ tokens, the token array has $2N$ bytes. The suffix array contains $N$ elements, each pointing to a token in the token array by storing its byte offset. Every token in the token array appears exactly once in the suffix array. Each pointer can be stored with $\lceil \log_2(2N) / 8 \rceil$ bytes. For corpora with 2B to 500B tokens (which is the range we deal with, after sharding (SA.3)), this is 5 bytes per pointer, and thus the suffix array has $5N$ bytes. Therefore, the combined size of token array and suffix array (i.e., the infiniti-gram index) is $7N$ bytes.
136
+
137
+ Building the suffix array. Suffix arrays can be built in linear time with respect to the size of the token array (Kärkkäinen et al., 2006). We adapted from the suffix array implementation in Lee et al. (2022) and further optimized it for efficiency. It took us $\sim 48$ hours to build the suffix array for RedPajama on a single node with 128 CPUs and 1TiB RAM. We have built the suffix arrays for Dolma (3T tokens), RedPajama (1.4T tokens), Pile (380B tokens), and C4 (200B tokens). Since infini-gram indexes are additive (SA.2), together these can be easily combined into a larger index with a total of 5 trillion tokens, and the implicit count table contains at least 2 quadrillion unique $n$ -grams (SA.1).
138
+
139
+ Inference with the suffix array. Computing the $n$ -gram LM probability involves counting the number of occurrences of a token string, i.e., $\mathrm{cnt}(x_1 \dots x_n)$ . By construction, the occurrence positions of strings starting with $x_1 \dots x_n$ lie in a single, consecutive segment in the suffix array. Thus we only need to find the first and last occurrence positions, and the count would be the difference between them. Beyond counting and $n$ -gram/∞-gram language modeling, infiniti-gram can also be used to retrieve documents containing an $n$ -gram, or a CNF expression with multiple $n$ -grams (SA.4).
140
+
141
+ During inference, the entire infiniti-gram index can stay on-disk, which minimizes the compute resources needed (no GPU, and minimal CPU / RAM). In §A.4, we discuss several optimization techniques applied to the inference engine: parallelized shard processing, hinted search, memory pre-fetching, fast effective- $n$ lookup, and amortized query processing. On RedPajama, our most optimized infiniti-gram engine can count a given $n$ -gram with an average latency of less than 20 milliseconds. It can compute the probability and next-token distribution in 40 milliseconds for $n$ -gram LMs, and in 200 milliseconds for the $\infty$ -gram. See §A.5 for the full list of supported query types and additional details on latency benchmarking.
142
+
143
+ # 4 Analyzing Human-written and Machine-generated Text using $\infty$ -gram
144
+
145
+ In this section, we present some analyses of human-written and machine-generated text from the perspective of $\infty$ -gram, mostly focusing on the token-wise agreement between $\infty$ -gram's prediction and the actual text. In summary, we found that:
146
+
147
+ 1. $\infty$ -gram has a fairly high accuracy (47%) when predicting the next token given a prefix of a human-written document, and this accuracy is higher when a longer suffix of the prompt can be used (i.e., when the effective $n$ is larger);
148
+ 2. Conventional $n$ -gram LMs ( $n \leq 5$ ) are insufficient for capturing a long enough context to determine the next token, while our $\infty$ -gram method is highly predictive of human-written and machine-generated text;
149
+ 3. $\infty$ -gram has significant potential to complement and improve neural LMs when predicting human-written text (which we further investigate in §5);
150
+ 4. When plotting the agreement level with respect to the suffix length, text generated by neural LMs with nucleus sampling is most similar to human-written text, among other decoding methods like greedy decoding and temperature sampling. For greedy decoding, the agreement plot suffers from significant fluctuation, which may be rooted in deficiencies in neural LM pretraining and the positional embeddings of Transformers.
151
+
152
+ $\infty$ -gram training data. For analyses in this section, we use a decontaminated version of Pile's training set ("Pile-train") (Gao et al., 2020) as training data for the $\infty$ -gram. We built an infini-gram index on Pile-train using the Llama-2 tokenizer (Touvron et al., 2023b), and yield 360 billion tokens.
153
+
154
+ Decontamination. It is important that the training data is decontaminated against the evaluation data, because otherwise it is very easy for $\infty$ -gram to cheat by copying from the very same document in the training data. We run decontamination of Pile-train against its validation and test sets ("Pile-val" and "Pile-test", which we will use for evaluation below and also in §5), using the method from Groeneveld (2023) that filters out a document if it has excessive $n$ -gram overlap with the evaluation data. See §B for more details.
155
+
156
+ Decontamination is non-trivial, and its definition could vary (e.g., when there is an identical sentence, is it contamination, or is it a quote that naturally occurs in real test-time scenarios?) Thus we followed the standard best practices for decontamination.
157
+
158
+ # 4.1 Human-written text
159
+
160
+ Setup. We use Pile-val set as the human-written text. For this analysis, we sampled 50 documents from each domain of Pile-val, and truncated each document to 1024 tokens (so the total number of tokens per domain is about $50\mathrm{k}$ ). We aggregate results from all domains.
161
+
162
+ We measure the token-wise agreement between $\infty$ -gram's prediction and the actual human-written text. Since computing the full next-token distribution (or the argmax of it) in $\infty$ -gram is relatively slow, we compute the $\infty$ -gram probability of the actual next-token, and deem it as accurate if this probability is higher than 0.5. (This is a lower-bound of argmax accuracy, though the gap is small.) We further categorize all tokens by their effective $n$ , i.e., one plus the length of their prompt's longest suffix that has a non-zero count in the training data.
163
+
164
+ ![](images/afcce1b3eb629a628923093069969da25478f508b05ae251d77203e2062fd445.jpg)
165
+ Figure 3: Token-wise agreement between human-written text and $n$ -gram/ $\infty$ -gram LMs.
166
+
167
+ ![](images/4fb2b65d22c4e5836462ae4a4d26953a7914bd11a3d38cb583f2c9e18442febd.jpg)
168
+
169
+ ![](images/69abdde8cb7b4e60a44fec897806bc628f56183891df7345bac87041d50ddc96.jpg)
170
+
171
+ ![](images/49870ed684deb176503ad569f290b04aefac8cb23968e412a0aa9cca1853d07b.jpg)
172
+ Figure 4: Distribution of probabilities assigned by neural LMs to human-written text tokens, and $\infty$ -gram's agreement with these tokens. Takeaway: $\infty$ -gram and neural LMs are predictive of actual human text on different tokens, and thus $\infty$ -gram estimates - especially sparse $\infty$ -gram estimates - can be used to complement neural LMs. See Figure 8 for extended results on Llama-2 13B/7B models.
173
+
174
+ ![](images/9e9a69676a5e42e69e0c86918f64634b9202ddaea7bb970bfffa8ce18d3b9adb.jpg)
175
+
176
+ For each category, we visualize the number of such tokens (in gray bars) as well as the agreement level (in green dots) in the middle plot of Figure 3.
177
+
178
+ Results. Overall, $\infty$ -gram agrees with the human-written text on $47\%$ of the tokens. We see that $\infty$ -gram becomes more accurate with the increase of effective $n$ : when the effective $n \geq 16$ , agreement is higher than $75\%$ . Further analysis (Appendix Figure 7) shows that the count of this longest suffix in the training data does not affect agreement substantially.
179
+
180
+ In the left plot of Figure 3, we show the same analysis for a 5-gram LM trained on the same data, and it has much lower agreement than the $\infty$ -gram. 5-gram LMs, which has been used extensively in previous literature (Franz & Brants, 2006; Aiden & Michel, 2011), does not capture a long enough context to correctly predict the next token: over $90\%$ tokens in the evaluation data has an effective $n$ of at least 5, and the $\infty$ -gram analysis shows that the median of effective $n$ is 7 (and mean is 9.1).
181
+
182
+ In the right plot of Figure 3, we show the same analysis for only tokens with a sparse $\infty$ -gram estimate, which covers more than $50\%$ of all tokens. The overall agreement is even higher ( $75\%$ ), and when the effective $n \geq 14$ , agreement is higher than $80\%$ . This means when the next token is unique according to the training data, that unique token is very likely to be the actual token in human-written text.
183
+
184
+ Qualitatively, we found that $\infty$ -gram is often good at completing multi-token words (e.g., hippopotamus, correctly predicted tokens are underlined), common phrases (e.g., born in), and entity names (e.g., educated at Trinity College). $\infty$ -gram is not very good at recalling factual knowledge (e.g., predicting the first token of an entity name), likely due to insufficient contextualization.
185
+
186
+ $\infty$ -gram can shine where neural LMs fail. In Figure 4, we plot the distribution of probabilities assigned by the Llama-2 70B/13B/7B models (Touvron et al., 2023b) to the actual tokens in human-written text, and the human- $\infty$ -gram agreement for tokens in each probability bucket. (The higher the assigned probability, the higher agreement Llama-2 has with the actual tokens.) We observe a positive, yet imperfect, correlation between neural LMs and $\infty$ -gram regarding their agreement with the actual text. In particular, when the neural LM performance is very poor (left side of the histogram), $\infty$ -gram still gives a non-trivial agreement of above $20\%$ ; if only considering tokens with sparse $\infty$ -gram estimates, the agreement is as high as $50\%$ . This indicates a huge potential of complementing and improving the
187
+
188
+ ![](images/4c9b7e363efa1146daa01cf60b1c3a4881f7c2b2256a39dc25ed6d9cd7aef931.jpg)
189
+
190
+ ![](images/3a242eff5429a5dbfb4d5613c438d83a4f5bd3d6f56d90a0beaac82ef596cee3.jpg)
191
+
192
+ ![](images/1c7a2c365b5d746b8b8681c5c90ac7939c8556daf0c7f7603aa954ba51fb31a0.jpg)
193
+
194
+ ![](images/e30554b1e1f12687d9d4baf3a3d22f63002ad2a0480e40f5b2a6ada7d0d9a3e0.jpg)
195
+ Figure 5: Token-wise agreement between machine-generated text and $\infty$ -gram. All tokens are considered. See Figure 9 for results on GPT-Neo models.
196
+
197
+ ![](images/2dd40804dc224f8c931d89e2be22e78b11d3b105699d3127070df9e6ff727828.jpg)
198
+
199
+ ![](images/28f36b1ac8d98f0888ad8fcde6bae9262902c00db2e5cb801913d3102278550f.jpg)
200
+
201
+ performance of neural LMs with $\infty$ -gram for predicting human-written text, which we further investigate in §5.
202
+
203
+ # 4.2 Machine-generated text
204
+
205
+ Setup. Similar to the analysis with human-written text, we sampled 50 documents from each domain of Pile-val. We use the first 50 tokens of each document to prompt neural LMs to generate a continuation. Generation continues up to the original length of the document, or when an [EOS] token is generated. We experiment with three decoding methods: greedy decoding, temperature sampling, and nucleus sampling (Holtzman et al., 2019). The neural LMs are Llama-2 70B/13B/7B, GPT-J 6B (Wang & Komatsuzaki, 2021), and GPT-Neo 2.7B/1.3B/125M (Gao et al., 2020). The tokenizer of GPT-Neo/J is different from Llama-2, so we built a separate version of infini-gram index for Pile-train based on the GPT-Neo/J tokenizer.
206
+
207
+ Impact of decoding method. The top row of Figure 5 shows results of the three decoding method on the same neural LM – Llama-2 70B. In general, increasing stochasticity shifts the effective $n$ to the smaller side, and also decreases the agreement level. Nucleus sampling has the most similar distribution of effective $n$ compared to human-written text (Figure 3, middle plot), which is probably why nucleus sampling is usually preferred in text generation. Greedy decoding has even higher effective $n$ than human-written text, which implies that greedy decoding could lead to over-memorization of training data as well as lack of diversity.
208
+
209
+ Impact of model size. The bottom row of Figure 5 shows the same analysis for different sizes of neural LM under greedy decoding. In general, increasing model size slightly increases the effective $n$ , and also increases the agreement level. This indicates that larger models memorizes more from the training data, and are also more inclined to copy verbatim. The agreement level of GPT-Neo/J models is higher than Llama-2 models, probably because GPT-Neo/J are trained on the same data as the $\infty$ -gram (i.e., Pile-train). Overall, text generated by these neural LMs has similar agreement level with $\infty$ -gram as human text.
210
+
211
+ One very curious phenomenon is that, as effective $n$ increases, the agreement level fluctuates greatly in greedy decoding (but not nucleus or temperature sampling, where agreement level almost increases monotonically). Such fluctuation is even more rapid for smaller models (Llama-2 13B/7B and GPT-Neo/J models), and for Llama-2 7B the fluctuation is even periodic (rapidly dropping at effective $n = 20, 24, 28, 32$ ; this is statistically significant, a two-proportion z-test gives a p-value of $< 10^{-99}$ ). We suspect that this may be caused by the application of positional embeddings when pretraining these Transformer-based models, and we welcome further investigation from the community.
212
+
213
+ <table><tr><td rowspan="2">Neural LM</td><td rowspan="2">Size</td><td rowspan="2">Reference Data</td><td colspan="3">Validation</td><td colspan="3">Test</td></tr><tr><td>Neural</td><td colspan="2">+∞-gram</td><td>Neural</td><td colspan="2">+∞-gram</td></tr><tr><td>GPT-2</td><td>117M</td><td>Pile-train</td><td>22.82</td><td>13.71</td><td>(42%)</td><td>22.86</td><td>13.58</td><td>(42%)</td></tr><tr><td>GPT-2</td><td>345M</td><td>Pile-train</td><td>16.45</td><td>11.22</td><td>(34%)</td><td>16.69</td><td>11.18</td><td>(35%)</td></tr><tr><td>GPT-2</td><td>774M</td><td>Pile-train</td><td>15.35</td><td>10.39</td><td>(35%)</td><td>15.40</td><td>10.33</td><td>(35%)</td></tr><tr><td>GPT-2</td><td>1.6B</td><td>Pile-train</td><td>14.42</td><td>9.93</td><td>(33%)</td><td>14.61</td><td>9.93</td><td>(34%)</td></tr><tr><td>GPT-Neo</td><td>125M</td><td>Pile-train</td><td>13.50</td><td>10.76</td><td>(22%)</td><td>14.08</td><td>10.79</td><td>(25%)</td></tr><tr><td>GPT-Neo</td><td>1.3B</td><td>Pile-train</td><td>8.29</td><td>7.31</td><td>(13%)</td><td>8.61</td><td>7.36</td><td>(16%)</td></tr><tr><td>GPT-Neo</td><td>2.7B</td><td>Pile-train</td><td>7.46</td><td>6.69</td><td>(12%)</td><td>7.77</td><td>6.76</td><td>(15%)</td></tr><tr><td>GPT-J</td><td>6.7B</td><td>Pile-train</td><td>6.25</td><td>5.75</td><td>(10%)</td><td>6.51</td><td>5.85</td><td>(12%)</td></tr><tr><td>Llama-2</td><td>7B</td><td>Pile-train</td><td>5.69</td><td>5.05</td><td>(14%)</td><td>5.83</td><td>5.06</td><td>(16%)</td></tr><tr><td>Llama-2</td><td>13B</td><td>Pile-train</td><td>5.30</td><td>4.75</td><td>(13%)</td><td>5.43</td><td>4.76</td><td>(15%)</td></tr><tr><td>Llama-2</td><td>70B</td><td>Pile-train</td><td>4.59</td><td>4.21</td><td>(11%)</td><td>4.65</td><td>4.20</td><td>(12%)</td></tr><tr><td>Llama-2</td><td>7B</td><td>Pile-train + RedPajama</td><td>5.69</td><td>4.66</td><td>(22%)</td><td>5.83</td><td>4.66</td><td>(24%)</td></tr><tr><td>Llama-2</td><td>13B</td><td>Pile-train + RedPajama</td><td>5.30</td><td>4.41</td><td>(21%)</td><td>5.43</td><td>4.42</td><td>(23%)</td></tr><tr><td>Llama-2</td><td>70B</td><td>Pile-train + RedPajama</td><td>4.59</td><td>3.96</td><td>(18%)</td><td>4.65</td><td>3.95</td><td>(19%)</td></tr></table>
214
+
215
+ Table 1: Perplexity (lower is better) on Pile's validation and test sets. Numbers in parentheses are the relative perplexity improvement. The first eight rows share the same tokenizer, and the last six rows share the same tokenizer.
216
+
217
+ # 5 Improving Neural LMs with the $\infty$ -gram
218
+
219
+ The results in §4 motivate us to combine neural LMs and $\infty$ -gram (§2) to yield better language models. In this section, we will show strong experimental results of the combined model. In §4 we found that the $\infty$ -gram estimate has higher agreement with human-written text when it is sparse. Therefore, we use two separate interpolation hyperparameters: $\lambda_{1}$ for sparse and $\lambda_{2}$ for non-sparse $\infty$ -gram estimates. These hyperparameters are tuned on the validation set to minimize the perplexity of the combined model.
220
+
221
+ # 5.1 Experimental setup
222
+
223
+ Evaluation and metric. We measure the perplexity of each model on Pile's validation and test sets, as well as the relative improvement of perplexity between models. To show generalization, we also evaluate on time-shifted data (i.e., data created after the cutoff date of the $\infty$ -gram training data). The relative improvement of model $M$ against model $M_0$ is defined as $(1 - \frac{\mathrm{PPL}(M) - 1}{\mathrm{PPL}(M_0) - 1}) \times 100\%$ , which is the percentage of perplexity gap closed towards perfect language modeling (i.e., $\mathrm{PPL} = 1$ ). Additional details on evaluation data processing in §D.1.
224
+
225
+ Reference data. To reduce confusion, in this section we will use reference data to refer to the training data of the $\infty$ -gram. In addition to Pile's training set that we used in the previous analyses (§4), we also consider RedPajama (Together, 2023) as reference data. The decontaminated Pile-train and Redpajama have 360 billion and 1.4 trillion tokens, respectively, summing up to 1.8 trillion tokens (based on the Llama-2 tokenizer). We later perform ablations on varying sizes and domains of the reference data.
226
+
227
+ Neural LMs. We use a range of large, competitive neural LMs, both as baselines and as models to interpolate with the $\infty$ -gram. In total, 14 models are considered: GPT-2 117M/345M/774M/1.6B (Radford et al., 2019), GPT-Neo 125M/1.3B/2.7B, GPT-J-6B, Llama-2 7B/13B/70B, and SILO PD/PDSW/PDSWBY (Min et al., 2023a). See §D.1 for additional details about these models and their training data.
228
+
229
+ Tokenizers. Among these models, GPT-2, GPT-Neo and GPT-J share the same tokenizer, but Llama-2 and SILO use different tokenizers. We therefore built three versions of the infini-gram index on Pile-train and RedPajama, one for each tokenizer. Due to the tokenizer variation, the perplexity of GPT-2, GPT-Neo and GPT-J are comparable to each other, but perplexity of Llama-2 and SILO are not comparable to them nor to each other.
230
+
231
+ <table><tr><td rowspan="2">Neural LM</td><td colspan="4">Validation</td><td colspan="4">Test</td></tr><tr><td>Neural</td><td>+∞-gram</td><td>+kNN-LM†</td><td>+RIC-LM†</td><td>Neural</td><td>+∞-gram</td><td>+kNN-LM†</td><td>+RIC-LM†</td></tr><tr><td colspan="9">Eval data: Wikipedia</td></tr><tr><td>Silo PD</td><td>26.60</td><td>15.30 (43%)</td><td>20.62</td><td>27.91</td><td>28.42</td><td>14.44 (51%)</td><td>-</td><td>-</td></tr><tr><td>Silo PDSW</td><td>18.93</td><td>12.36 (36%)</td><td>14.10</td><td>18.90</td><td>20.02</td><td>11.84 (43%)</td><td>14.5</td><td>19.4</td></tr><tr><td>Silo PDSWBY</td><td>10.66</td><td>8.77 (19%)</td><td>10.14</td><td>10.87</td><td>10.76</td><td>8.41 (24%)</td><td>-</td><td>-</td></tr><tr><td>Pythia</td><td>9.00</td><td>-</td><td>8.50</td><td>8.84</td><td>9.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan="9">Eval data: Enron Emails</td></tr><tr><td>Silo PD</td><td>19.56</td><td>6.31 (70%)</td><td>8.56</td><td>15.45</td><td>15.71</td><td>4.85 (73%)</td><td>-</td><td>-</td></tr><tr><td>Silo PDSW</td><td>14.66</td><td>5.58 (65%)</td><td>6.70</td><td>10.80</td><td>11.23</td><td>4.35 (66%)</td><td>5.9</td><td>9.9</td></tr><tr><td>Silo PDSWBY</td><td>14.67</td><td>5.61 (65%)</td><td>7.24</td><td>10.91</td><td>11.52</td><td>4.44 (66%)</td><td>-</td><td>-</td></tr><tr><td>Pythia</td><td>7.577</td><td>-</td><td>4.99</td><td>6.16</td><td>6.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td colspan="9">Eval data: NIH ExPorters</td></tr><tr><td>Silo PD</td><td>27.46</td><td>16.26 (41%)</td><td>19.27</td><td>25.51</td><td>27.94</td><td>16.00 (44%)</td><td>-</td><td>-</td></tr><tr><td>Silo PDSW</td><td>19.35</td><td>12.70 (35%)</td><td>14.95</td><td>18.35</td><td>19.12</td><td>12.39 (37%)</td><td>15.0</td><td>18.5</td></tr><tr><td>Silo PDSWBY</td><td>15.01</td><td>10.62 (30%)</td><td>12.33</td><td>14.29</td><td>14.81</td><td>10.33 (32%)</td><td>-</td><td>-</td></tr><tr><td>Pythia</td><td>11.20</td><td>-</td><td>11.20</td><td>10.83</td><td>11.1</td><td>-</td><td>-</td><td>-</td></tr></table>
232
+
233
+ Table 2: Perplexity (the lower the better) on the validation and the test datasets of the Wikipedia, Enron Emails, and NIH ExPorters of the Pile. All neural models are 1.3B models, and the reference data is always the Pile. indicates in-domain; indicates out-of-domain;
234
+
235
+ indicates out-of-domain but has relevant data in-domain, all with respect to the training data of the neural LM. †: Results retrieved from Min et al. (2023a), which use much smaller reference data: 45-million to 1.2-billion tokens, compared to our 360-billion tokens.
236
+
237
+ # 5.2 Results
238
+
239
+ Experimental results with GPT-2, GPT-Neo/J, and Llama-2 on Pile's evaluation sets are shown in Table 1. Results on time-shifted data can be found in §D.2, and we show the impact of the size and domain of reference data in §D.3.
240
+
241
+ Interpolating with $\infty$ -gram greatly and consistently improves the perplexity of neural LMs. The magnitude of improvement trends smaller as the neural LM size grows within the same family, while the largest models can still benefit a lot from our method (e.g., Pile-train alone improves Llama-2 70B by $12\%$ ).
242
+
243
+ However, this trend does not hold across different families of LMs. For example, $\infty$ -gram can improve GPT-2 1.6B by $34\%$ , but only improves a smaller model, GPT-Neo 1.3B, by $16\%$ . This may be because GPT-Neo/J models are trained precisely on Pile, while GPT-2 models are not. $\infty$ -gram works better when the reference data distribution differs from, or complements, the pretraining data distribution, which emphasizes the importance of data diversity. Meanwhile, the fact that $\infty$ -gram also improves neural LMs already pretrained on its reference data shows that there is consistent advantage in introducing $\infty$ -gram.
244
+
245
+ On the choice of $\infty$ -gram reference data, the union of Pile-train and RedPajama yields larger improvements on the Llama-2 models than Pile-train alone. The combination of Llama-2 13B and $\infty$ -gram with Pile-train + RedPajama outperforms Llama-2 70B, and interpolating with $\infty$ -gram pushes the perplexity of Llama-2 70B below 4.0.
246
+
247
+ When the neural LM is SILO (which is trained on permissive-licensed data only and thus has less training data), adding the $\infty$ -gram component is more helpful when SILO is trained on more restrictive data (i.e., PD > PDSW > PDSWBY). The usage of $\infty$ -gram can be precisely traced back to the contributing document(s) in the reference data, which is in-line with the philosophy of SILO: to allow crediting the source data when using them for language modeling. When compared to the existing retrieval-augmentation methods used by SILO, i.e., kNN-LM and RIC-LM, $\infty$ -gram yields better improvement in perplexity. Therefore, $\infty$ -gram can serve as a better alternative as the retrieval-augmentation method for SILO.
248
+
249
+ A note on text generation. While $\infty$ -gram can be interpolated with neural LMs and greatly improve their perplexity, our preliminary experiments show that such method might not be helpful, and even harmful, to open-ended text generation tasks. During generation,
250
+
251
+ $\infty$ -gram can make odd mistakes (e.g., predicting totally irrelevant tokens) which makes the model to digress. Thus this combined model is not ready to replace neural LMs. Additional investigation is required to make $\infty$ -gram best contribute to text generation.
252
+
253
+ # 6 Related Work
254
+
255
+ We discuss closely related work here. See §E for extended discussion, and Table 6 for comparison with other $n$ -gram models and nonparametric language models.
256
+
257
+ $n$ -gram language models. $n$ -gram has been one of the most classical language modeling methods since the inception of natural language processing (Jurafsky & Martin, 2000). People have been pushing the limits of $n$ -gram LMs by scaling up its training data. To date, the largest $n$ -gram table (Brants et al., 2007) counts 5-grams in a corpus of 2 trillion tokens.
258
+
259
+ While $n$ -gram LMs are currently largely surpassed by neural LMs, there has been recent work that revisit $n$ -grams and $n$ -gram LMs. Mikolov & Zweig (2012) finds that interpolating with a 5-gram Kneser-Ney model improves the perplexity of RNN models, whereas Khandelwal et al. (2020) finds that interpolating $n$ -gram models with Transformers does not improve perplexity substantially. Li et al. (2022) finds that the $n$ -gram model is as competitive as a small neural LM, and training a neural model to be complementary to the $n$ -gram model and using both at inference time outperforms the neural-only LM. However, both use limited reference data (101M tokens) and compare with small neural LMs (117-250M parameters). Some prior work has found value in scaling up the $n$ -gram training data (Allamanis & Sutton, 2013).
260
+
261
+ Our work scales up the training data of $n$ -gram LMs to trillions of tokens. With the addition of scaling up the value of $n$ in the $n$ -gram, our model can significantly improve state-of-the-art neural models as large as 70B.
262
+
263
+ Unbounded $n$ -grams, suffix arrays, suffix trees. Previous work has explored using suffix-based data structures to enable $n$ -gram queries with unbounded $n$ , with limited scale of the training data. Stehouwer & van Zaanen (2010) proposes to use suffix arrays for $\infty$ -gram, and yet their formulation does not yield proper probability distributions and, consequently, a language model. Kennington et al. (2012) proposes to use suffix trees for the same purpose, and yet the storage overhead of suffix trees is very high such that it hinders scaling, which may be mitigated with highly intricate compression techniques (Shareghi et al., 2015). Among the three aforementioned papers, only the third evaluates on the general language modeling task, and the perplexity numbers are too high to be practically useful. Our training data is $500 \times 500$ larger than the largest one in these previous work.
264
+
265
+ Nonparametric language models. Nonparametric LMs refer to LMs whose complexity is not bounded a priori, because the complexity can change according to the reference data (Khandelwal et al., 2020; Borgeaud et al., 2022; Asai et al., 2023). The $\infty$ -gram LM is one instance of nonparametric LMs, and its simplicity makes it possible to significantly scale the reference data with modest resources (§3). To the best of our knowledge, our $\infty$ -gram LM is the largest in both the size of the reference data (5 trillion tokens) and the size of the base neural LM (70B).
266
+
267
+ # 7 Conclusion
268
+
269
+ In this paper, we modernized the classical $n$ -gram language model by scaling it up to a trillion tokens and extending to unbounded $n$ . We presented the infiniti-gram engine that performs efficient training and inference under this extreme setup. We also proposed the $\infty$ -gram language model, powered by the infiniti-gram engine, and showed that it can offer novel insights into human-written and machine-generated text and can improve existing neural language models.
270
+
271
+ # Acknowledgments
272
+
273
+ We would like to thank Zexuan Zhong, Mike Lewis, Yanai Elazar, Will Merrill, Tim Dettmers, Ximing Lu, Alisa Liu, Weijia Shi, Xiaochuang Han, members of the H2lab, and Ziqi Ma for their invaluable feedback.
274
+
275
+ # References
276
+
277
+ Erez Lieberman Aiden and Jean-Baptiste Michel. Quantitative analysis of culture using millions of digitized books. Science, 331:176 - 182, 2011. URL https://api-semanticscholar.org/CorpusID:40104730.
278
+ Miltiadis Allamanis and Charles Sutton. Mining source code repositories at massive scale using language modeling. 2013 10th Working Conference on Mining Software Repositories (MSR), pp. 207-216, 2013. URL https://api(semanticscholar.org/CorpusID:1857729.
279
+ Akari Asai, Sewon Min, Zexuan Zhong, and Danqi Chen. ACL 2023 tutorial: Retrieval-based language models and applications. ACL 2023, 2023.
280
+ Akari Asai, Zexuan Zhong, Danqi Chen, Pang Wei Koh, Luke Zettlemoyer, Hanna Hajishirzi, and Wen tau Yih. Reliable, adaptable, and attributable language models with retrieval. 2024. URL https://api-semanticscholar.org/CorpusID:268248911.
281
+ Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. In Proceedings of the International Conference on Learning Representations, 2019.
282
+ Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In Proceedings of the International Conference of Machine Learning, 2022.
283
+ T. Brants, Ashok Popat, Peng Xu, Franz Josef Och, and Jeffrey Dean. Large language models in machine translation. In Conference on Empirical Methods in Natural Language Processing, 2007. URL https://api-semanticscholar.org/CorpusID:633992.
284
+ Charlie Chen, Sebastian Borgeaud, Geoffrey Irving, Jean-Baptiste Lespiau, L. Sifre, and John M. Jumper. Accelerating large language model decoding with speculative sampling. ArXiv, abs/2302.01318, 2023. URL https://api-semanticscholar.org/CorpusID:256503945.
285
+ Jesse Dodge, Ana Marasovic, Gabriel Ilharco, Dirk Groeneveld, Margaret Mitchell, and Matt Gardner. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. In Conference on Empirical Methods in Natural Language Processing, 2021. URL https://api-semanticscholar.org/CorpusID:237568724.
286
+ Yanai Elazar, Akshita Bhagia, Ian Magnusson, Abhilasha Ravichander, Dustin Schwenk, Alane Suhr, Pete Walsh, Dirk Groeneveld, Luca Soldaini, Sameer Singh, Hanna Hajishirzi, Noah A. Smith, and Jesse Dodge. What's in my big data? ArXiv, abs/2310.20707, 2023. URL https://api-semanticscholar.org/CorpusID:264803575.
287
+ Alex Franz and Thorsten Brants. All our n-gram are belong to you. Google Machine Translation Team, 20, 2006. URL https://blog.research.google/2006/08/all-our-n-gram-are-belong-to-you.html.
288
+ Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The Pile: An 800GB dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020.
289
+ Xinyang Geng and Hao Liu. Openllama: An open reproduction of llama, May 2023. URL https://github.com/openlm-research/open_llama.
290
+ Dirk Groeneveld. The big friendly filter. https://github.com/allenai/bff, 2023.
291
+
292
+ Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, Shane Arora, David Atkinson, Russell Authur, Khyathi Chandu, Arman Cohan, Jennifer Dumas, Yanai Elazar, Yuling Gu, Jack Hessel, Tushar Khot, William Merrill, Jacob Morrison, Niklas Muennighoff, Aakanksha Naik, Crystal Nam, Matthew E. Peters, Valentina Pyatkin, Abhilasha Ravichander, Dustin Schwenk, Saurabh Shah, Will Smith, Nishant Subramani, Mitchell Wortsman, Pradeep Dasigi, Nathan Lambert, Kyle Richardson, Jesse Dodge, Kyle Lo, Luca Soldaini, Noah A. Smith, and Hannaneh Hajishirzi. Olmo: Accelerating the science of language models. Preprint, 2024.
293
+ Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pre-training. In Proceedings of the International Conference of Machine Learning, 2020.
294
+ Zhenyu He, Zexuan Zhong, Tianle Cai, Jason D Lee, and Di He. Rest: Retrieval-based speculative decoding. 2023. URL https://api-semanticscholar.org/CorpusID:265157884.
295
+ Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration. ArXiv, abs/1904.09751, 2019. URL https://api_semanticscholar.org/CorpusID:127986954.
296
+ Gautier Izacard, Patrick Lewis, Maria Lomeli, Lucas Hosseini, Fabio Petroni, Timo Schick, Jane Dwivedi-Yu, Armand Joulin, Sebastian Riedel, and Edouard Grave. Few-shot learning with retrieval augmented language models. arXiv preprint arXiv:2208.03299, 2022.
297
+ Dan Jurafsky and James H. Martin. Speech and language processing - an introduction to natural language processing, computational linguistics, and speech recognition. In Prentice Hall series in artificial intelligence, 2000. URL https://api_semanticscholar.org/CorpusID:60691216.
298
+ Juha Kärkkäinen, Peter Sanders, and Stefan Burkhardt. Linear work suffix array construction. J. ACM, 53:918-936, 2006. URL https://api(semanticscholar.org/CorpusID:12825385.
299
+ Slava M. Katz. Estimation of probabilities from sparse data for the language model component of a speech recognizer. IEEE Trans. Acoust. Speech Signal Process., 35:400-401, 1987. URL https://api-semanticscholar.org/CorpusID:6555412.
300
+ Casey Redd Kennington, Martin Kay, and Annemarie Friedrich. Suffix trees as language models. In International Conference on Language Resources and Evaluation, 2012. URL https://api_semanticscholar.org/CorpusID:12071964.
301
+ Urvashi Khandelwal, Omer Levy, Dan Jurafsky, Luke Zettlemoyer, and Mike Lewis. Generalization through memorization: Nearest neighbor language models. In Proceedings of the International Conference on Learning Representations, 2020.
302
+ Tian Lan, Deng Cai, Yan Wang, Heyan Huang, and Xian-Ling Mao. Copy is all you need. In Proceedings of the International Conference on Learning Representations, 2023.
303
+ Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. In Proceedings of the Association for Computational Linguistics, 2022.
304
+ Huayang Li, Deng Cai, Jin Xu, and Taro Watanabe. Residual learning of neural text generation with n-gram language model. In Findings of the Association for Computational Linguistics: EMNLP 2022, 2022. URL https://aclanthology.org/2022-findings-emnlp.109.
305
+ Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Hannaneh Hajishirzi, and Daniel Khashabi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Annual Meeting of the Association for Computational Linguistics, 2022. URL https://api_semanticscholar.org/CorpusID:254877603.
306
+
307
+ Marc Marone and Benjamin Van Durme. Data portraits: Recording foundation model training data. ArXiv, abs/2303.03919, 2023. URL https://api(semanticscholar.org/ CorpusID:257378087.
308
+ Tomas Mikolov and Geoffrey Zweig. Context dependent recurrent neural network language model. 2012 IEEE Spoken Language Technology Workshop (SLT), pp. 234-239, 2012. URL https://apisemantic scholar.org/CorpusID:11383176.
309
+ Sewon Min, Suchin Gururangan, Eric Wallace, Hannaneh Hajishirzi, Noah Smith, and Luke Zettlemoyer. SILO language models: Isolating legal risk in a nonparametric datastore. arXiv preprint arXiv:2308.04430, 2023a. URL https://arxiv.org/abs/2308.04430.
310
+ Sewon Min, Weijia Shi, Mike Lewis, Xilun Chen, Wen-tau Yih, Hannaneh Hajishirzi, and Luke Zettlemoyer. Nonparametric masked language modeling. In Findings of ACL, 2023b.
311
+ Daichi Mochihashi and Eiichiro Sumita. The infinite markov model. In Neural Information Processing Systems, 2007. URL https://api-semanticscholar.org/CorpusID:1279894.
312
+ Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H. Miller, and Sebastian Riedel. Language models as knowledge bases? ArXiv, abs/1909.01066, 2019. URL https://api-semanticscholar.org/CorpusID:202539551.
313
+ Aleksandra Piktus, Christopher Akiki, Paulo Villegas, Hugo Laurenccon, Gérard Dupont, Alexandra Sasha Luccioni, Yacine Jernite, and Anna Rogers. The roots search tool: Data transparency for llms. In Annual Meeting of the Association for Computational Linguistics, 2023. URL https://apisemantic scholar.org/CorpusID:257219882.
314
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.
315
+ Edward Raff, William Fleming, Richard Zak, H. Anderson, Bill Finlayson, Charles K. Nicholas, and Mark McLean. Kilograms: Very large n-grams for malware classification. ArXiv, abs/1908.00200, 2019. URL https://api(semanticscholar.org/CorpusID:199064443.
316
+ Colin Raffel, Noam M. Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21:140:1-140:67, 2019. URL https://api_semanticscholar.org/CorpusID:204838007.
317
+ Ehsan Shareghi, Matthias Petri, Gholamreza Haffari, and Trevor Cohn. Compact, efficient and unlimited capacity: Language modeling with compressed suffix trees. In Conference on Empirical Methods in Natural Language Processing, 2015. URL https://apisemantic scholar.org/CorpusID:225428.
318
+ Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Rich James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. REPLUG: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652, 2023.
319
+ Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Khyathi Chandu, Jennifer Dumas, Li Lucy, Xinxi Lyu, Ian Magnusson, Aakanksha Naik, Crystal Nam, Matthew E. Peters, Abhilasha Ravichander, Zejiang Shen, Emma Strubell, Nishant Subramani, Oyvind Tafjord, Evan Pete Walsh, Hannaneh Hajishirzi, Noah A. Smith, Luke Zettlemoyer, Iz Beltagy, Dirk Groeneveld, Jesse Dodge, and Kyle Lo. Dolma: An Open Corpus of 3 Trillion Tokens for Language Model Pretraining Research. Technical report, Allen Institute for AI, 2023. Released under ImpACT License as Medium Risk artifact, https://github.com/allenai/dolma.
320
+ Herman Stehouwer and Menno van Zaanen. Using suffix arrays as language models: Scaling the n-gram. 2010. URL https://api_semanticscholar.org/CorpusID:18379946.
321
+ Together. RedPajama: An open source recipe to reproduce LLaMA training dataset, 2023. URL https://github.com/togethercomputer/RedPajama-Data.
322
+
323
+ Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. LLaMA: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023a.
324
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Jasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023b.
325
+ Thuy-Trang Vu, Xuanli He, Gholamreza Haffari, and Ehsan Shareghi. Koala: An index for quantifying overlaps with pre-training corpora. In Conference on Empirical Methods in Natural Language Processing, 2023. URL https://api_semanticscholar.org/CorpusID:257766452.
326
+ Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021.
327
+ Frank D. Wood, C. Archambeau, Jan Gasthaus, Lancelot F. James, and Yee Whye Teh. A stochastic memoizer for sequence data. In International Conference on Machine Learning, 2009. URL https://api-semanticscholar.org/CorpusID:11199892.
328
+ Zexuan Zhong, Tao Lei, and Danqi Chen. Training language models with memory augmentation. In Proceedings of Empirical Methods in Natural Language Processing, 2022.
329
+
330
+ # A Additional Details on the Infini-gram Engine
331
+
332
+ # A.1 What is the size of the $n$ -gram count table implied by an infiniti-gram index?
333
+
334
+ The size of an $n$ -gram LM is often measured by the number of unique $n$ -grams indexed, each associated with its count in the dataset. This size is easy to obtain when indexing is done as the classical $n$ -gram count tables, but non-trivial to compute for infini-gram.
335
+
336
+ If we consider all possible values of $n$ in the $n$ -gram, then a dataset with $N$ tokens would contain about $\frac{1}{2} N^2$ $n$ -grams, and since most of these $n$ -grams are long and thus very likely to be distinct, there would be about $\frac{1}{2} N^2$ unique $n$ -grams. Since we index $N = 5$ trillion tokens, the size of the $n$ -gram count table implied by our infini-gram index would be approximately $1.2 \times 10^{25}$ , or equivalently, 20 mol (using $N_A = 6.02 \times 10^{23} / mol$ ).
337
+
338
+ However, the document separator is meaningless and should not be part of the $n$ -grams, and we should probably only count $n$ -grams within the document boundaries. There are $N = 5 \times 10^{12}$ tokens and $D = 6 \times 10^{9}$ documents, and on average each document has 857 tokens. Using the Cauchy-Schwarz inequality, we have the total number of $n$ -grams as
339
+
340
+ $$
341
+ \sum_ {d} \frac {1}{2} N _ {d} ^ {2} \geq \frac {1}{2} D \cdot (\frac {N}{D}) ^ {2} = \frac {N ^ {2}}{2 D} = 2 \times 1 0 ^ {1 5}
342
+ $$
343
+
344
+ Therefore, there are at least 2 quadrillion unique $n$ -grams in the count table implied by infini-gram.
345
+
346
+ # A.2 Additional details on the infiniti-gram index
347
+
348
+ Infini-gram indexes are additive and subtractive. If we have two or more indexes built on disjoint datasets (with the same tokenizer), we can easily combine them into a single index by adding up the $n$ -gram counts from each index. This feature is useful in the sharding
349
+
350
+ ![](images/242a1ab5635f325a7bef035eececf827faaca20c76117fbc2a7b0faee6113de0.jpg)
351
+ Figure 6: $n$ -gram/ $\infty$ -gram queries on a training data are supported by an associated suffix array. Both the training data and the suffix array are stored on-disk as regular files. Contents on the white strips are file data, and addresses above the strips are byte offsets. Querying for a particular $n$ -gram returns a consecutive segment of the suffix array, where each element is a pointer into the training data where the $n$ -gram appears. E.g., in the trillion-token training data, Artificial Intelligence, A Modern appears 42 times, and in all cases the following token is Approach.
352
+
353
+ technique that we discuss in $\S A.3$ and $\S A.4$ . Similarly, if we have built indexes on a big dataset and a subset of it, we can easily obtain an index of their difference by taking the difference of $n$ -gram counts. Compared to having a single index, both operations incur some additional inference operations (which can be parallelized to mitigate latency overhead), but they would spare us from re-indexing the union or difference sets from scratch.
354
+
355
+ Document offsets and metadata. To enable efficient document retrieval, the infiniti-gram index stores additional data about documents. A document offset file stores the byte offset of each document in the tokenized dataset, and its format is similar to the suffix array. A document metadata file stores a comma-separated string for each document that contains its metadata (e.g., document ID, source, URL), and a document metadata offset file stores the byte offset of each document's metadata in the document metadata file. All the above files are negligible in size compared to the suffix array, because there are far less documents than the total number of tokens.
356
+
357
+ # A.3 Additional details on building the suffix array
358
+
359
+ Sharding. Building the suffix array requires heavy random access to the byte array, and thus the entire byte array must be kept in RAM so that the building time is reasonable. However, the byte array may be too large to fit into RAM. In such cases, we shard the byte array into multiple shards, and build a suffix array for each shard. Sharding would induce additional inference latency, which we discuss and mitigate below (SA.4).
360
+
361
+ # A.4 Additional details on inference with the infiniti-gram index
362
+
363
+ Both the first and last occurrence positions can be found with binary search, with time complexity $O(n \cdot \log N)$ and $O(\log N)$ random array accesses. The two binary searches can be parallelized, reducing the latency by roughly 2x. The impact of query length $n$ is negligible, because computers usually fetch memory in pages of 4K bytes, and string comparison is much faster than page fetching. Therefore, when we analyze time complexity below, we refer to the number of random array accesses.
364
+
365
+ Finding occurrence positions and documents. $n$ -gram counting with suffix arrays has a by-product: we also get to know all positions where the $n$ -gram appears in the training data, for free. This position information is implicitly contained in the suffix array segment we obtained during counting, and to retrieve the original documents where the $n$ -gram appears, all we need to do is to follow each pointer within this segment back into the tokenized dataset, and find the starting and ending position of the enclosing document by performing a binary search on the document offset index. (Note that if we don't have the document offset index, the latency of document search cannot be bounded because we would need to expand the pointer in both directions in the tokenized dataset until hitting the document separator. In practice, we see documents as large as 20M tokens.)
366
+
367
+ Impact of sharding. When the suffix arrays are built on sharded byte arrays, we can simply perform counting on each individual shard and accumulate the counts across all shards. The latency is proportional to the number of shards: time complexity would become $O(S \cdot \log N)$ . The processing of different shards can be parallelized, reducing the time complexity back to $O(\log N)$ .
368
+
369
+ Speeding up $n$ -gram computation by re-using previous search results. On the suffix array, the segment for $x_{1} \ldots x_{n}$ must be a sub-segment of that for $x_{1} \ldots x_{n-1}$ . Therefore, when computing the $n$ -gram probability $P_{n}(x_{n} \mid x_{1} \ldots x_{n-1})$ , we can first count $x_{1} \ldots x_{n-1}$ , and then when counting $x_{1} \ldots x_{n}$ , we only need to search for the first and last occurrence positions within the segment of $x_{1} \ldots x_{n}$ , which reduces the latency by at most $2\mathrm{x}$ .
370
+
371
+ On-disk search. The byte array and suffix array may be too large to fit into RAM, so in practice, we keep them on disk and read them as memory-mapped files. However, this creates a significant latency as the binary search requires random access to the byte array and suffix array. To mitigate this, we implemented a memory pre-fetching method that informs the system of the array offsets we will likely be reading in the near future. Pre-fetching reduces average latency by roughly 5x.
372
+
373
+ <table><tr><td>Reference Data (→)Query Type (↓)</td><td>Pile-trainN=0.36T S=2</td><td>RPJN=1.4T S=8</td><td>Time Complexity(measured by number of random disk accesses)</td></tr><tr><td rowspan="6">1. Counting an n-gram... (n = 1)... (n = 2)... (n = 5)... (n = 10)... (n = 100)... (n = 1000)</td><td>7 ms</td><td>9 ms</td><td>O(log N)</td></tr><tr><td>13 ms</td><td>20 ms</td><td></td></tr><tr><td>14 ms</td><td>19 ms</td><td></td></tr><tr><td>13 ms</td><td>18 ms</td><td></td></tr><tr><td>13 ms</td><td>19 ms</td><td></td></tr><tr><td>14 ms</td><td>19 ms</td><td></td></tr><tr><td>2. Computing a token probability from n-gram LM (n = 5)</td><td>19 ms</td><td>30 ms</td><td>O(log N)</td></tr><tr><td>3. Computing full next-token distribution from n-gram LM (n = 5)</td><td>31 ms</td><td>39 ms</td><td>O(V·log N)</td></tr><tr><td rowspan="2">4. Computing a token probability from ∞-gram LM... on consecutive tokens</td><td>90 ms</td><td>135 ms</td><td>O(log L·log N)</td></tr><tr><td>12 ms</td><td>20 ms</td><td>O(log N)</td></tr><tr><td>5. Computing full next-token distribution from ∞-gram LM</td><td>88 ms</td><td>180 ms</td><td>O((log L+V)·log N)</td></tr></table>
374
+
375
+ Table 3: Inference-time latency of infiniti-gram on different types of queries. Average latency per query is reported. Benchmarked with inference engine written in C++ (with parallelized shard processing) and running on a single, 8-core CPU node. Notations for time complexity: $N =$ number of tokens in the reference data; $S =$ number of shards for the suffix array; $L =$ number of tokens in the query document; $V =$ vocabulary size.
376
+
377
+ Speeding up $\infty$ -gram computation. To compute the $\infty$ -gram probability, we need to count the occurrence of each suffix $x_{l-n+1} \ldots x_l$ up to the maximum $n$ so that the suffix still meets the sufficient appearance requirement (we denote this maximum $n$ as $L$ ). This means $O(L)$ counting operations, and the time complexity for each $\infty$ -gram computation is $O(L \cdot \log N)$ . However, a simple binary-lifting + binary-search algorithm for searching $L$ can reduce the number of counting operations to $O(\log L)$ , and thus the time complexity for each $\infty$ -gram computation becomes $O(\log L \cdot \log N)$ .
378
+
379
+ Speeding up dense $\infty$ -gram computation. During evaluation, we need to compute the $\infty$ -gram probability of each token in the test document. We can save computation by observing that the effective $n$ for one token is at most one token longer than that for the previous token. This brings the amortized time complexity for evaluating each token down to $O(\log N)$ .
380
+
381
+ # A.5 Supported query types and latency benchmarking
382
+
383
+ Infini-gram supports the following types of $n$ -gram/ $\infty$ -gram queries:
384
+
385
+ 1. Counting an $n$ -gram (COUNT);
386
+ 2. Computing a token probability from $n$ -gram LM (with given $n$ , no backoff) (NGRAMPROB);
387
+ 3. Computing the full next-token distribution from $n$ -gram LM (NGRAMDIST);
388
+ 4. Computing a token probability from $\infty$ -gram LM (INFGRAMPROB);
389
+ 5. Computing the full next-token distribution from $\infty$ -gram LM (INFGRAMDIST);
390
+ 6. Returning documents containing an $n$ -gram, or a CNF logical expression of $n$ -gram terms, connected with AND's and/or OR's (e.g., (natural language processing OR artificial intelligence) AND (deep learning OR machine learning)) (SEARCHDOC).
391
+
392
+ We benchmark the latency of infiniti-gram on different types of $n$ -gram and $\infty$ -gram queries, and show results in Table 3. During inference, the training data and the suffix array are stored on an SSD. For each type of query, the benchmarking is conducted on 1,000 tokens randomly and independently sampled from Pile's validation data (except for the task "computing a token probability from $\infty$ -gram LM on consecutive tokens", where we sampled 10 documents and processed 1000 consecutive tokens in each document).
393
+
394
+ All types of queries demonstrate sub-second latency on the trillion-token training data. Computing a token probability from the $\infty$ -gram with RedPajama takes merely 135 milliseconds. Furthermore, our implementation supports counting the occurrence of an $n$ -gram
395
+
396
+ with arbitrarily large $n$ , with roughly constant latency at 20 milliseconds (we experimentally validated up to $n = 1000$ ). Decoding requires computing the full next-token distribution and is thus slightly slower: 39 milliseconds per token with $n$ -gram LMs and 180 milliseconds per token with $\infty$ -gram.
397
+
398
+ # B Decontamination of Reference Data
399
+
400
+ To properly evaluate the effectiveness of $\infty$ -gram LM on Pile's evaluation sets, we performed data decontamination on the Pile's training set and RedPajama before using them as reference data for the $\infty$ -gram LM. We run the Big Friendly Filter (BFF) $^{1}$ (Groeneveld, 2023) on Pile's training set and RedPajama, filtering out documents with too much $n$ -gram overlap with Pile's evaluation sets. Table 4 reports the statistics of decontamination.
401
+
402
+ When using BFF, we always remove whole documents, instead of by paragraphs. Following the default settings, we consider $n$ -grams where $n = 13$ , and discard the document if at least $80\%$ of its $n$ -grams are present in the evaluation set. For Pile's training set, we lowercase all documents to capture more potential contaminations.
403
+
404
+ <table><tr><td colspan="4">REDPAJAMA</td><td colspan="3">PILE (TRAIN)</td></tr><tr><td>Subset</td><td>Total docs</td><td>Filtered docs</td><td>Ratio filtered</td><td>Subset</td><td>Total docs</td><td>Filtered docs</td></tr><tr><td>Arxiv</td><td>1558306</td><td>213</td><td>0.01%</td><td>237741</td><td>1089</td><td></td></tr><tr><td>BookCorpus2</td><td>25355</td><td>6</td><td></td><td>277655</td><td>99</td><td></td></tr><tr><td>Books3</td><td>1918535</td><td>0</td><td>0%</td><td>Enron Emails</td><td>926132</td><td>18236</td></tr><tr><td>DM Mathematics</td><td>131723</td><td>21</td><td></td><td>EuroParl</td><td>131723</td><td>21</td></tr><tr><td>FreeLaw</td><td>5069088</td><td>11821</td><td>0.2%</td><td></td><td></td><td></td></tr><tr><td>Gutenberg (PG-19)</td><td>66981</td><td>70</td><td>0.1%</td><td></td><td></td><td></td></tr><tr><td>HackerNews</td><td>1571968</td><td>14</td><td></td><td></td><td></td><td></td></tr><tr><td>NIH ExPorter</td><td>1777926</td><td>3739</td><td>0.2%</td><td></td><td></td><td></td></tr><tr><td>OpenSubtitles</td><td>632485</td><td>5754</td><td>0.9%</td><td></td><td></td><td></td></tr><tr><td>OpenWebText2</td><td>32333654</td><td>136914</td><td>0.4%</td><td></td><td></td><td></td></tr><tr><td>PhilPapers</td><td>63875</td><td>2324</td><td>0.4%</td><td></td><td></td><td></td></tr><tr><td>Pile-CC</td><td>52441354</td><td>19928</td><td></td><td></td><td></td><td></td></tr><tr><td>PubMed Abstracts</td><td>29329202</td><td>2312</td><td></td><td></td><td></td><td></td></tr><tr><td>PubMed Central</td><td>5679903</td><td>4230</td><td>0.1%</td><td></td><td></td><td></td></tr><tr><td>StackExchange</td><td>29529008</td><td>2072</td><td></td><td></td><td></td><td></td></tr><tr><td>USPTO Backgrounds</td><td>11123325</td><td>80088</td><td>0.7%</td><td></td><td></td><td></td></tr><tr><td>Ubuntu IRC</td><td>20067</td><td>10</td><td></td><td></td><td></td><td></td></tr><tr><td>Wikipedia (en)</td><td>16939503</td><td>45052</td><td>0.3%</td><td></td><td></td><td></td></tr><tr><td>YoutubeSubtitles</td><td>328030</td><td>871</td><td>0.3%</td><td></td><td></td><td></td></tr><tr><td>Total</td><td>210607728</td><td>1296376</td><td>0.6%</td><td></td><td></td><td></td></tr></table>
405
+
406
+ Table 4: Statistics of de-contamination in RedPajama (left) and Pile's training set (right).
407
+
408
+ # C Additional Analysis
409
+
410
+ Figure 7 is an extension to the middle plot of Figure 3, and shows a more fine-grained analysis of the token-wise agreement between human-written text and $\infty$ -gram. Figure 8 extends Figure 4 with results on Llama-2-13b/7b. Figure 9 extends Figure 5 with results on GPT-Neo models.
411
+
412
+ # D Additional Experiments on Improving Neural LMs with $\infty$ -gram
413
+
414
+ # D.1 Additional details on experimental setup
415
+
416
+ Evaluation data processing. We split each document in the evaluation data into batches with a maximum sequence length of 1024 and a sliding window of 512, a setup that is standard in prior language modeling literature (Baevski & Auli, 2019; Khandelwal et al., 2020).
417
+
418
+ ![](images/b9e19fb58e1846ce769b010d6b49f992f02496b2c006264bfae2086de0eb5cd7.jpg)
419
+ Figure 7: Token-wise agreement between human-generated text and $\infty$ -gram, broken down by "effective $n$ " and frequency of the corresponding longest suffix in the reference data. The height of each bar represents token count, and the color represents agreement (red is 0.0, green is 1.0).
420
+
421
+ ![](images/9dc28a352e929ef3e73a61ba5f8cc45f5e3000f86109c6d25e5ecd25c0f7d08b.jpg)
422
+
423
+ ![](images/bc7accd8d3db34dbe340a8d4340e6470eec5d1cc5d5929772b36daff5e4d484c.jpg)
424
+
425
+ ![](images/97281bd1b7b5fd9f16f2b86d8e914e13389d2d6a2ab7188b831d927a2ed8cda2.jpg)
426
+ Figure 8: Continuation of Figure 4, with results on Llama-2-13b/7b.
427
+
428
+ ![](images/3f2ffbe1425068449e04e77f0e505481306831078f286dd3ea74a1f009646681.jpg)
429
+
430
+ ![](images/45e6e83e94ca664406baa7dccd082fd06c320e9cc862d76bbe772ff1888939b8.jpg)
431
+ Figure 9: Continuation of Figure 5, with results on GPT-Neo models. Token-wise agreement between machine-generated text and $\infty$ -gram. All tokens are considered.
432
+
433
+ ![](images/d921a06ee94efb19901b4079d71cc6543650861a97173533419c395d089aa357.jpg)
434
+
435
+ ![](images/be98bfc9f0fb0ad9f8184c4f04deed1900c9a235f933c17c56a33edd805b055e.jpg)
436
+
437
+ Neural LMs. Below are additional details about the families of neural LMs we use.
438
+
439
+ <table><tr><td rowspan="2">Eval Data (Wikipedia)</td><td colspan="3">simple interpolation</td><td colspan="3">w/ Random Forest</td></tr><tr><td>Neural</td><td colspan="2">+∞-gram</td><td>Neural</td><td colspan="2">+∞-gram</td></tr><tr><td>April 2023</td><td>5.64</td><td>5.48</td><td>(3%)</td><td>5.86</td><td>4.89</td><td>(20%)</td></tr><tr><td>May 2023</td><td>5.43</td><td>5.27</td><td>(4%)</td><td>6.01</td><td>5.70</td><td>(6%)</td></tr><tr><td>June 2023</td><td>5.49</td><td>5.21</td><td>(6%)</td><td>5.69</td><td>4.87</td><td>(17%)</td></tr><tr><td>July 2023</td><td>4.93</td><td>4.93</td><td>(0%)</td><td>4.91</td><td>4.78</td><td>(3%)</td></tr><tr><td>August 2023</td><td>4.64</td><td>4.46</td><td>(5%)</td><td>4.81</td><td>4.50</td><td>(8%)</td></tr></table>
440
+
441
+ Table 5: Evaluation on time-shifted data. The evaluation data is taken from newly-added Wikipedia articles since April 2023, which is after the creation of both the Pile and RedPajama. The neural model is Llama-2 (13B), and the $\infty$ -gram reference data is Pile + RPJ.
442
+
443
+ - GPT-2 (Radford et al., 2019), one of the earliest autoregressive language models whose sizes range from $117\mathrm{M}$ , $345\mathrm{M}$ , and $774\mathrm{M}$ to $1.6\mathrm{B}$ . Their training data is a diverse set of web text, although it is not public.
444
+ - GPT-Neo (Gao et al., 2020) and GPT-J (Wang & Komatsuzaki, 2021), language models trained on the Pile whose sizes vary from 125M, 1.3B, and 2.7B to 6.7B.
445
+ - Llama-2 (Touvron et al., 2023b), a subsequent version of LLaMA (Touvron et al., 2023a) trained on two trillion tokens and has sizes of 7B, 13B, and 70B. Llama-2 is one of the most competitive language models whose weights are available at the time of writing the paper. The training data of Llama-2 is unknown, although the precedent version is trained on a large corpus of Common Crawls, Wikipedia and code, which is replicated by RedPajama (Together, 2023).
446
+ - SILO (Min et al., 2023a), 1.3B language models trained on permissively licensed data only. The original paper showed that training on permissively licensed data leads to the challenge of extreme domain generalization because the training data is skewed to highly specific domains like code and government text. We use three different variants, PD, PDSW and PDSWBY, which are trained on different levels of permissivity, leading to varying levels of the domain generalization challenge.
447
+
448
+ # D.2 Evaluating on time-shifted data
449
+
450
+ To further show the effectiveness of $\infty$ -gram and eliminate doubts that our performance gains might be due to insufficient decontamination, we evaluate on time-shifted data: documents that were created after the cutoff time of the $\infty$ -gram reference data. We use new Wikipedia articles created during April and August, 2023, which is after the cutoff time of both Pile and RedPajama.
451
+
452
+ Table 5 reports the perplexity of neural LM as well as the combined model. On documents in four out of the five months, interpolating with $\infty$ -gram improves the perplexity of the neural LM. We find that this improvement can be further boosted by applying a Random Forest to decide an instance-wise interpolation hyperparameter, where the features of the Random Forest are the suffix lengths (1 up to the effective $n$ ) as well as the frequency of each suffix in the reference data. When Random Forest is applied, the perplexity improvement ranges from $3\% - 20\%$ .
453
+
454
+ # D.3 Ablations
455
+
456
+ Effect of the size of reference data. Figure 10 reports performance of the combined model wrt the size of reference data. To create progressively smaller reference data, we repeatedly downsampled the full reference data by $2\mathrm{x}$ (up to $256\mathrm{x}$ , resulting in 9 sizes). We see the improvement brought by $\infty$ -gram widens as reference data size grows, and the relationship is roughly log-linear (except for the NIH ExPorter domain, where $\infty$ -gram doesn't help when the reference data is too small).
457
+
458
+ Effect of the domain of reference data. Figure 10 also compares performance of the combined model where the $\infty$ -gram uses either the full reference data or only the in-domain reference data. Using only the in-domain reference data is roughly as powerful as using
459
+
460
+ ![](images/4742df7019f6e0c29ed28a3016a9a9d4430903c34513cbcd5b357bae7c4440d3.jpg)
461
+ Figure 10: Impact of scaling the datastore of the $\infty$ -gram, all using the Llama-2 models (7B, 13B, and 70B) as neural LMs, and the Pile as the reference data. ---: neural LM only (baseline). $\bullet$ : $\infty$ -gram uses the full Pile; $\circ$ : $\infty$ -gram uses only the in-domain portion of the Pile. Gains increase consistently as the datastore scales.
462
+
463
+ the full reference data, which implies that almost all improvement we have achieved is thanks to in-domain data (which has been decontaminated). This means it would not hurt to use the full reference data, especially when the test domain is unknown or an in-domain reference data is unavailable; however, having in-domain reference data is most helpful.
464
+
465
+ # E Extended Discussion
466
+
467
+ In §4 and §5, we showcased some very preliminary use cases of the infiniti-gram engine. However, we believe that infiniti-gram can enable much broader investigations and applications, including but not limited to:
468
+
469
+ Understanding massive text corpora. Text corpora used for pretraining language models have become prohibitively large, and we have relatively limited understanding of their contents (Elazar et al., 2023). Infini-gram can be a useful tool to quickly find out what is in the corpus and what is not, using $n$ -gram lookup (the COUNT query).
470
+
471
+ Data curation. Data engineers often want to remove problematic content in corpora scraped from the Internet, such as toxicity, hate speech, and personal identifiable information (PII). Using infiniti-gram's SEARCHDOC query (which can be easily modified to return all documents), one can retrieve all documents containing an $n$ -gram term (or a CNF expression with multiple $n$ -gram terms) and remove them from the corpus. Removal can even be done iteratively: infiniti-gram indexes are additive/subtractive, so we can obtain an index of the corpus after round one removal, by indexing the removed set and take the difference of the original index and the removal index.
472
+
473
+ Document retrieval. The scale of datastore is key to the effectiveness of retrieval-augmented LMs (Asai et al., 2024). However, vector-based indexes are difficult to scale up due to compute limit, storage limit, and inference efficiency. Infini-gram's SEARCHDOC function can retrieve documents from datastores as large as the full pretraining corpora, and can potentially boost the performance of retrieval-augmented LMs.
474
+
475
+ Reducing hallucination in factual knowledge. Parametric-only models are prone to generating non-factual statements, which is widely known as the hallucination problem. Infini-gram can potentially be used to mitigate hallucination by reading verbatim from the training data. We have found evidence that the $\infty$ -gram can greatly outperform Llama-2-70B on factual probing benchmarks such as LAMA (Petroni et al., 2019).
476
+
477
+ Detecting data contamination, memorization, and plagiarism. Test set contamination has become a major issue for language model evaluation. $n$ -gram lookup enables us to check if evaluation queries have sneaked into the training data of neural LMs. It also opens up possibility to detect memorization in machine-generated text, or plagiarism in human-written text.
478
+
479
+ Preventing copyright infringement. Recently, generative AIs are facing numerous lawsuits for generating arguably copyrighted materials. Infini-gram may be helpful for pre
480
+
481
+ <table><tr><td>Method</td><td># tokens (↑)</td><td># entries (↑)</td><td>Storage usage (↓)</td><td>max n</td></tr><tr><td colspan="5">Vector-based index</td></tr><tr><td>REtro (Borgeaud et al., 2022)</td><td>1.8 T</td><td>2.8 × 1010</td><td>432 TB (16k bytes / entry)</td><td>-</td></tr><tr><td>Atlas (Izacard et al., 2022)</td><td>27 B</td><td>4 × 108</td><td>200 GB (8 bytes / entry)</td><td>-</td></tr><tr><td>kNN-LM (Khandelwal et al., 2020)</td><td>3 B</td><td>3 × 109</td><td>200 GB (64 bytes / entry)</td><td>-</td></tr><tr><td>NPM (Min et al., 2023b)</td><td>1 B</td><td>1 × 109</td><td>1.4 TB (~2k bytes / entry)</td><td>-</td></tr><tr><td colspan="5">n-gram-based index</td></tr><tr><td>Brants et al. (2007)</td><td>2 T</td><td>3 × 1011</td><td>unreported</td><td>5</td></tr><tr><td>Google&#x27;s (Franz &amp; Brants, 2006)</td><td>1 T</td><td>3.8 × 109</td><td>24 GB</td><td>5</td></tr><tr><td>Google Books Ngram (Aiden &amp; Michel, 2011)</td><td>500 B</td><td>unreported</td><td>unreported</td><td>5</td></tr><tr><td>Stehouwer &amp; van Zaanen (2010)</td><td>90 M</td><td>unreported</td><td>unreported</td><td>∞</td></tr><tr><td>Kennington et al. (2012)</td><td>3 M</td><td>5 × 1012</td><td>330 MB (110 bytes / token)</td><td>∞</td></tr><tr><td>Shareghi et al. (2015)</td><td>9 B</td><td>8 × 1018</td><td>63 GB (7 bytes / token)</td><td>∞</td></tr><tr><td>infini-gram (ours)</td><td>5 T</td><td>1 × 1025</td><td>35 TB (7 bytes / token)</td><td>∞</td></tr></table>
482
+
483
+ Table 6: Comparison with other nonparametric language modeling methods. # tokens: number of tokens in the inference-time reference data. # entries: number of representations (counts) in the index. max n: maximum number of context tokens considered. For infinitagram, we consider the combination of Pile-train and RedPajama as reference data.
484
+
485
+ venting copyright infringement, by diverting neural LMs to alternative (yet still plausible) generation paths when they are about to generate long $n$ -grams that appear in the training data, especially if they mostly appear in documents from copyrighted sources.
486
+
487
+ Measuring popularity of entity. Mallen et al. (2022) showed that LMs have better memorization of facts about popular entities than less popular ones. In that paper, heuristic metrics like Wikipedia pageviews are used as proxy to popularity. A better proxy may be the number of appearances of the entity's name in a massive text corpus, which can be easily computed by infini-gram's COUNT query.
488
+
489
+ Measuring novelty and creativity of text. Are neural LMs generating genuinely novel text, or are they simply copying from its pretraining data? We need a metric for text novelty and creativity, and this metric can be potentially defined by the $n$ -gram overlap between the generated document and the pretraining corpus.
490
+
491
+ Attribution. When using neural LMs to make predictions, people might want to know which training data most influenced the model's decision. Using $n$ -gram lookup with key phrases, we can trace back to related documents in the training data of neural LMs.
492
+
493
+ Non-parametric speculative decoding. Speculative decoding (Chen et al., 2023) speeds up text generation by employing a fast and a slow decoder, where the fast decoder is a smaller model that does the autoregressive token generation, and the slow decoder checks the fast decoder's proposals by parallelizing the forward passes of multiple tokens. Given the low latency of infini-gram, we can potentially use $\infty$ -gram as the fast decoder, similar to He et al. (2023).
494
+
495
+ We welcome the community to collaboratively build toward the aforementioned directions, by leveraging our publicly released web interface and API endpoint.
496
+
497
+ # F Extended Related Work
498
+
499
+ $n$ -grams beyond $n = 5$ . Mochihashi & Sumita (2007) and Wood et al. (2009) consider infinitely-ordered Markov models, to which the $\infty$ -gram LM is a special case. Aside from the $\infty$ -gram LM, some other work has found value in scaling the value of $n$ in $n$ -grams. For example, KiloGrams (Raff et al., 2019) uses 1024-grams as features for malware detection.
500
+
501
+ Other data structures for text indexing. Beside suffix arrays and suffix trees, other data structures have been used to index text corpora to satisfy different trade-offs. Koala (Vu et al., 2023) builds a compressed suffix array to analyze overlap between text corpora. The ROOTS Search Tool (Piktus et al., 2023) builds a BM25 index on the ROOTS corpus, and
502
+
503
+ supports document searching via both exact match and fuzzy match of $n$ -grams. Data Portraits (Marone & Durme, 2023) proposes a lightweight index based on Bloom Filter, and is tailored for probabilistic membership inference (exact match of $n$ -grams of 50 characters, where $n \approx 8$ ) against the Pile and Stacks. ElasticSearch is a proprietary search engine based on the Lucene index, and it has been used by Dodge et al. (2021) to search documents in C4, and also by Elazar et al. (2023) to count $n$ -grams and list most frequent $n$ -grams in various corpora up to 480B tokens.
504
+
505
+ Nonparametric language models. A nonparametric LM refers to the LM whose complexity is not bounded as a priori, because the complexity can grow or update according to the data given at inference time. Prior work is broadly divided into two categories: a token retrieval approach that represents each token as one vector and uses a nonparametric prediction function (Khandelwal et al., 2020; Zhong et al., 2022; Lan et al., 2023; Min et al., 2023b;a; Shi et al., 2023), and a chunk retrieval approach that represents each chunk of text as a vector and incorporates nearest chunks to the neural language model (Guu et al., 2020; Izacard et al., 2022; Borgeaud et al., 2022). Scaling the reference data in nonparametric LMs is very expensive as it requires storing a vector for every unit (either token or chunk). To the best of our knowledge, prior work with the largest reference data is RETRO (Borgeaud et al., 2022), which uses the 7B-parameter LM and the reference data consisting of 1.8 trillion tokens. It stores and searches over 28 billion vectors, estimated to consume 432TB of disk space. $^{2}$ (Detailed comparisons in Table 6.)
506
+
507
+ # G Example Queries and Web Interface
508
+
509
+ Figures 11 to 16 show one example for each of the six query types supported by infiniti-gram. We query the Dolma corpus in these examples. Screenshots are taken from our web interface.
510
+
511
+ ![](images/95e4437bc0ea0edceab81cbd3e4815d7a83c29e9be780f4f45b55b988a1d365b.jpg)
512
+ Figure 11: Example for query type 1 (COUNT): Counting an $n$ -gram.
513
+
514
+ ![](images/95f16ff583c6ccff3183593ce046fb4fce38df26b52bec904b580dc96964e16e.jpg)
515
+ Figure 12: Example for query type 2 (NGRAMPROB): Computing a token probability from $n$ -gram LM (with given $n$ , no backoff).
516
+
517
+ ![](images/223600fc68260d8393d19b8c70d4e8d3c56f20fa4ac96d07e767d049dd68c8d1.jpg)
518
+ Figure 13: Example for query type 3 (NGRAMDIST): Computing the full next-token distribution from $n$ -gram LM. Due to space limits, only top-10 tokens are shown.
519
+
520
+ ![](images/029ba304773e841e1f2802cc4f8f637b9244b90eedd158e09b8296efa617d41b.jpg)
521
+ Figure 14: Example for query type 4 (INFGRAMPROB): Computing a token probability from $\infty$ -gram LM.
522
+
523
+ ![](images/1ceb34f8f13a01d1121531b117da22ca871f9be3480b54d260d09cc8b611927a.jpg)
524
+ Figure 15: Example for query type 5 (INFGRAMDIST): Computing the full next-token distribution from $\infty$ -gram LM. Due to space limits, only top-10 tokens are shown.
525
+
526
+ ![](images/61321f0bdc0e158359c46598efe94f6490f2d09b3e6c2bfcb60aa9ccc755ee04.jpg)
527
+ Figure 16: Example for query type 6 (SEARCHDOC): Returning documents containing an $n$ -gram, or a CNF logical expression of $n$ -gram terms, connected with AND's and/or OR's.
2401.17xxx/2401.17377/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:983fc64e579f0460b7f6d34e790c75589020bfda48528e9da9813df661e7f6ce
3
+ size 1230060
2401.17xxx/2401.17377/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17409/f7f0b765-9c37-492d-8e8c-570cc614171e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ad8a9542e4e72289df150ae9e048d31702cdff25edae8f46cdf4f25ac06796
3
+ size 14263422
2401.17xxx/2401.17409/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17409/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29d79c1a1208520e7e521bb196d45d2ea75a101c44ea0f402f450f9137c2e887
3
+ size 1091132
2401.17xxx/2401.17409/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_content_list.json ADDED
@@ -0,0 +1,2205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Synthetic Dialogue Dataset Generation using LLM Agents",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 196,
8
+ 83,
9
+ 801,
10
+ 104
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Yelaman Abdullin and Diego Molla-Aliod",
17
+ "bbox": [
18
+ 317,
19
+ 114,
20
+ 682,
21
+ 130
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Macquarie University",
28
+ "bbox": [
29
+ 411,
30
+ 131,
31
+ 589,
32
+ 147
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "yelaman.abdullin@hdr.mq.edu.au, diego.molla-aliod@mq.edu.au",
39
+ "bbox": [
40
+ 206,
41
+ 148,
42
+ 794,
43
+ 164
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Bahadorreza Ofoghi and John Yearwood",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 149,
53
+ 180,
54
+ 512,
55
+ 196
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Deakin University",
62
+ "bbox": [
63
+ 255,
64
+ 197,
65
+ 406,
66
+ 212
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "{b.ofoghi,john.yearwood}@deakin.edu.au",
73
+ "bbox": [
74
+ 137,
75
+ 214,
76
+ 524,
77
+ 230
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Qingyang Li",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 608,
87
+ 180,
88
+ 722,
89
+ 197
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "The University of Melbourne",
96
+ "bbox": [
97
+ 544,
98
+ 198,
99
+ 784,
100
+ 212
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "q15@student.unimelb.edu.au",
107
+ "bbox": [
108
+ 534,
109
+ 214,
110
+ 798,
111
+ 229
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Abstract",
118
+ "text_level": 1,
119
+ "bbox": [
120
+ 260,
121
+ 252,
122
+ 339,
123
+ 267
124
+ ],
125
+ "page_idx": 0
126
+ },
127
+ {
128
+ "type": "text",
129
+ "text": "Linear programming (LP) problems are pervasive in real-life applications. However, despite their apparent simplicity, an untrained user may find it difficult to determine the linear model of their specific problem. We envisage the creation of a goal-oriented conversational agent that will engage in conversation with the user to elicit all information required so that a subsequent agent can generate the linear model. In this paper, we present an approach for the generation of sample dialogues that can be used to develop and train such a conversational agent. Using prompt engineering, we develop two agents that \"talk\" to each other, one acting as the conversational agent, and the other acting as the user. Using a set of text descriptions of linear problems from NL4Opt available to the user only, the agent and the user engage in conversation until the agent has retrieved all key information from the original problem description. We also propose an extrinsic evaluation of the dialogues by assessing how well the summaries generated by the dialogues match the original problem descriptions. We conduct human and automatic evaluations, including an evaluation approach that uses GPT-4 to mimic the human evaluation metrics. The evaluation results show an overall good quality of the dialogues, though research is still needed to improve the quality of the GPT-4 evaluation metrics. The resulting dialogues, including the human annotations of a subset, are available to the research community. The conversational agent used for the generation of the dialogues can be used as a baseline.",
130
+ "bbox": [
131
+ 142,
132
+ 282,
133
+ 460,
134
+ 778
135
+ ],
136
+ "page_idx": 0
137
+ },
138
+ {
139
+ "type": "text",
140
+ "text": "1 Introduction",
141
+ "text_level": 1,
142
+ "bbox": [
143
+ 114,
144
+ 795,
145
+ 258,
146
+ 810
147
+ ],
148
+ "page_idx": 0
149
+ },
150
+ {
151
+ "type": "text",
152
+ "text": "Linear programming (LP) is a mathematical optimization technique widely employed to address a multitude of real-world challenges, ranging from resource allocation in supply chain management to portfolio optimization in finance. Despite the ubiquity of LP problems and their practical importance,",
153
+ "bbox": [
154
+ 112,
155
+ 822,
156
+ 489,
157
+ 919
158
+ ],
159
+ "page_idx": 0
160
+ },
161
+ {
162
+ "type": "text",
163
+ "text": "many individuals, particularly those without specialized mathematical backgrounds, often struggle to formulate the appropriate linear models for their specific problem instances. This barrier hinders the broader utilization of LP techniques, especially among non-experts.",
164
+ "bbox": [
165
+ 507,
166
+ 253,
167
+ 884,
168
+ 348
169
+ ],
170
+ "page_idx": 0
171
+ },
172
+ {
173
+ "type": "text",
174
+ "text": "To mitigate this challenge, we propose the development of a goal-oriented conversational agent capable of assisting users in constructing accurate linear models for their unique problem scenarios. This conversational agent would engage users in a dialogue, eliciting relevant information pertaining to the problem, and subsequently generate the corresponding linear model. This paper focuses on an essential aspect of creating such an agent — the generation of synthetic dialogues that can be employed to train and evaluate the conversational agent's performance.",
175
+ "bbox": [
176
+ 507,
177
+ 351,
178
+ 884,
179
+ 543
180
+ ],
181
+ "page_idx": 0
182
+ },
183
+ {
184
+ "type": "text",
185
+ "text": "Our methodology leverages prompt engineering to construct two distinct agents: one simulating the conversational agent's behavior, and the other emulating the user's responses during problem-solving interactions. The agents are designed to engage in purposeful dialogues aimed at extracting the necessary information from the user to construct a valid linear model. To facilitate this process, we utilize a set of text descriptions of linear problems, accessible only to the user agent, sourced from the NL4Opt dataset (Ramamonjison et al., 2022, 2023). These text descriptions serve as the basis for the dialogues and enable the conversational agent to iteratively gather the critical information required for problem formulation.",
186
+ "bbox": [
187
+ 507,
188
+ 546,
189
+ 882,
190
+ 787
191
+ ],
192
+ "page_idx": 0
193
+ },
194
+ {
195
+ "type": "text",
196
+ "text": "In addition to the generation process, we propose an extrinsic evaluation approach for assessing the quality and effectiveness of the generated dialogues. Specifically, we evaluate how well the summaries generated by the dialogues align with the original problem descriptions from NL4Opt. This evaluation encompasses both human assessments, where human judges evaluate the quality",
197
+ "bbox": [
198
+ 507,
199
+ 790,
200
+ 884,
201
+ 919
202
+ ],
203
+ "page_idx": 0
204
+ },
205
+ {
206
+ "type": "aside_text",
207
+ "text": "arXiv:2401.17461v1 [cs.CL] 30 Jan 2024",
208
+ "bbox": [
209
+ 21,
210
+ 312,
211
+ 60,
212
+ 725
213
+ ],
214
+ "page_idx": 0
215
+ },
216
+ {
217
+ "type": "text",
218
+ "text": "of dialogue summaries, and automated metrics to quantitatively measure the informativeness of the generated summaries.",
219
+ "bbox": [
220
+ 112,
221
+ 84,
222
+ 487,
223
+ 131
224
+ ],
225
+ "page_idx": 1
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "Our preliminary results from human and automatic evaluations indicate that the generated dialogues exhibit a high degree of fidelity to the original problem descriptions, thereby demonstrating the quality of the synthetic dialogues generated.",
230
+ "bbox": [
231
+ 112,
232
+ 133,
233
+ 489,
234
+ 212
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "The contributions of this paper are:",
241
+ "bbox": [
242
+ 131,
243
+ 214,
244
+ 394,
245
+ 229
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "list",
251
+ "sub_type": "text",
252
+ "list_items": [
253
+ "1. An approach for the generation of dialogues for the development of goal-oriented conversational agents. In this paper, the goal consists of eliciting information from the user in order to generate a linear programming model, noting that the techniques presented here can be adapted to other goals.",
254
+ "2. A dataset of 476 dialogues for the development of such a conversational agent, of which 28 have been annotated manually. $^{1}$ Even though these 476 dialogues are generated automatically, since the generation process is non-deterministic, separate runs of the same program will generate different dialogues. For this reason, we consider that these dialogues form a useful dataset for the research community to facilitate reproducibility.",
255
+ "3. An extrinsic evaluation approach based on comparing the summaries generated by the dialogue, with the original problem description.",
256
+ "4. An automatic evaluation approach using GPT-4 that mimics the behavior of human evaluation."
257
+ ],
258
+ "bbox": [
259
+ 127,
260
+ 241,
261
+ 489,
262
+ 659
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "2 Background and Related Work",
269
+ "text_level": 1,
270
+ "bbox": [
271
+ 112,
272
+ 674,
273
+ 416,
274
+ 690
275
+ ],
276
+ "page_idx": 1
277
+ },
278
+ {
279
+ "type": "text",
280
+ "text": "2.1 Linear Programming and NL4Opt",
281
+ "text_level": 1,
282
+ "bbox": [
283
+ 112,
284
+ 701,
285
+ 431,
286
+ 717
287
+ ],
288
+ "page_idx": 1
289
+ },
290
+ {
291
+ "type": "text",
292
+ "text": "LP problems are pervasive in real-life applications. They are commonly utilized for resource allocation, planning, scheduling, transportation optimization, portfolio management, and numerous other areas. For instance, in production planning, LP can help determine how to use limited human, material, and financial resources to achieve maximum economic benefits.",
293
+ "bbox": [
294
+ 112,
295
+ 722,
296
+ 487,
297
+ 848
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "text",
303
+ "text": "LP problems are a class of mathematical optimization problems where the goal is to find a set of",
304
+ "bbox": [
305
+ 112,
306
+ 851,
307
+ 487,
308
+ 883
309
+ ],
310
+ "page_idx": 1
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "values for the decision variables that satisfies a set of linear constraints and maximizes or minimizes the value of a linear objective function (Chen et al., 2011). The general form of an LP problem can be formulated as follows,",
315
+ "bbox": [
316
+ 507,
317
+ 84,
318
+ 882,
319
+ 162
320
+ ],
321
+ "page_idx": 1
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "Maximize $\\mathbf{c}^T\\mathbf{x}$",
326
+ "bbox": [
327
+ 603,
328
+ 170,
329
+ 734,
330
+ 186
331
+ ],
332
+ "page_idx": 1
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "subject to $\\mathbf{A}\\mathbf{x}\\leq \\mathbf{b}$",
337
+ "bbox": [
338
+ 606,
339
+ 189,
340
+ 769,
341
+ 204
342
+ ],
343
+ "page_idx": 1
344
+ },
345
+ {
346
+ "type": "equation",
347
+ "text": "\n$$\n\\mathbf {x} \\geq \\mathbf {0}\n$$\n",
348
+ "text_format": "latex",
349
+ "bbox": [
350
+ 717,
351
+ 205,
352
+ 768,
353
+ 219
354
+ ],
355
+ "page_idx": 1
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "where $\\mathbf{x}$ is a vector of decision variables $\\in \\mathbb{R}^n$ , $\\mathbf{c}$ and $\\mathbf{b}$ are given vectors of constants, and $\\mathbf{A}$ is a given matrix of constants. Linear programming is widely applicable in various domains due to its ability to model real-world optimization challenges and the availability of efficient solution algorithms. Once an LP model has been formulated for a problem, powerful solvers driven by efficient algorithms can help one to solve it, even for surprisingly complicated and large-scale problems.",
360
+ "bbox": [
361
+ 507,
362
+ 229,
363
+ 884,
364
+ 388
365
+ ],
366
+ "page_idx": 1
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "In 2022, a competition to extract linear programming formulations from natural language (NL4Opt) developed the first dataset of linear programming word problems (Ramamonjison et al., 2022, 2023). It contains 1101 instances from various domains. These LP problems focus on a variety of common contexts such as production, resource allocation, investment allocation, agriculture, transportation, health sciences, sales, etc. Each instance has an unstructured natural language description of the LP problem involving decision variables, one objective function, and several constraints.",
371
+ "bbox": [
372
+ 507,
373
+ 390,
374
+ 882,
375
+ 580
376
+ ],
377
+ "page_idx": 1
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "The NL4Opt dataset provides valuable examples of real-world natural language descriptions for LP problems, showcasing a range of constraint types. We deconstruct each problem description in the development subset into an objective function description and several constraint descriptions. By analyzing these constraints, we find that they can be categorized into nine refined classes in Table 1. Different instances of the dataset consist of different combinations of these constraint types. Constraint types 1 to 3 and 5 to 7 are frequently used to represent capacity limits, budget constraints, or resource availability. Types 4, 8, and 9 impose ratio control and balancing between different quantities. These nine types of constraints are often encountered in real-world problems and can encompass a substantial portion of common constraints.",
382
+ "bbox": [
383
+ 507,
384
+ 583,
385
+ 882,
386
+ 854
387
+ ],
388
+ "page_idx": 1
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "2.2 Evaluation Methods",
393
+ "text_level": 1,
394
+ "bbox": [
395
+ 507,
396
+ 866,
397
+ 714,
398
+ 879
399
+ ],
400
+ "page_idx": 1
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "Evaluation of dialogue systems is a complex endeavor, typically involving a blend of automated",
405
+ "bbox": [
406
+ 507,
407
+ 887,
408
+ 882,
409
+ 917
410
+ ],
411
+ "page_idx": 1
412
+ },
413
+ {
414
+ "type": "page_footnote",
415
+ "text": "1https://github.com/eabdullin/ optimouse-quest/",
416
+ "bbox": [
417
+ 112,
418
+ 891,
419
+ 364,
420
+ 917
421
+ ],
422
+ "page_idx": 1
423
+ },
424
+ {
425
+ "type": "table",
426
+ "img_path": "images/b6a7b10541b224982d64f4fea34db1a123d2fc2a7d0a69e2fb624771f45904c7.jpg",
427
+ "table_caption": [],
428
+ "table_footnote": [],
429
+ "table_body": "<table><tr><td></td><td>Constraint type</td><td>Math inequality</td></tr><tr><td>1</td><td>Upper bound on single variable</td><td>xi≤b</td></tr><tr><td>2</td><td>Upper bound on sum of vari-ables</td><td>∑ixi≤b</td></tr><tr><td>3</td><td>Upper bound on weighted sum of variables</td><td>∑iaixi≤b</td></tr><tr><td>4</td><td>Upper bound on proportion</td><td>xj≤c∑ixi</td></tr><tr><td>5</td><td>Lower bound on single variable</td><td>xi≥b</td></tr><tr><td>6</td><td>Lower bound on sum of vari-ables</td><td>∑ixi≥b</td></tr><tr><td>7</td><td>Lower bound on weighted sum of variables</td><td>∑iaixi≥b</td></tr><tr><td>8</td><td>Lower bound on proportion</td><td>xj≥c∑ixi</td></tr><tr><td>9</td><td>Comparison constraints</td><td>dxi≤xj</td></tr></table>",
430
+ "bbox": [
431
+ 115,
432
+ 80,
433
+ 468,
434
+ 258
435
+ ],
436
+ "page_idx": 2
437
+ },
438
+ {
439
+ "type": "text",
440
+ "text": "Table 1: Classification of constraints. Suppose an LP problem has $n$ decision variables, ${x}_{i}$ and ${x}_{j}$ are decision variables, ${a}_{i}$ and $b$ are nonnegative constants, $d$ is a positive constant,and $c$ is a constant $\\in \\left( {0,1}\\right\\rbrack ,i,j \\in I =$ $\\{ 1,2,\\ldots ,n\\}$ .",
441
+ "bbox": [
442
+ 112,
443
+ 266,
444
+ 489,
445
+ 338
446
+ ],
447
+ "page_idx": 2
448
+ },
449
+ {
450
+ "type": "text",
451
+ "text": "metrics and human assessments. A traditional automated metric such as ROUGE (Lin, 2004) is frequently employed for measuring textual similarity and evaluating information overlap. However, this metric could be improved in its ability to evaluate semantic coherence and the effective fulfillment of dialogue goals (Liu et al., 2016). In this work, we calculated ROUGE-1, ROUGE-2, and ROUGE-L scores to cover unigram, bigram, and longest common subsequence overlaps, respectively.",
452
+ "bbox": [
453
+ 112,
454
+ 363,
455
+ 489,
456
+ 524
457
+ ],
458
+ "page_idx": 2
459
+ },
460
+ {
461
+ "type": "text",
462
+ "text": "Recent advancements have directed more sophisticated evaluation methods, including BERTScore (Zhang et al., 2020), which leverages contextual embeddings from pre-trained BERT models to assess semantic similarity between generated and reference text. BERTScore complements ROUGE by adding a more nuanced semantic layer to the evaluation, capturing aspects that traditional metrics may miss.",
463
+ "bbox": [
464
+ 112,
465
+ 525,
466
+ 489,
467
+ 669
468
+ ],
469
+ "page_idx": 2
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "Moreover, the rise of Large Language Models (LLMs) has further enriched the toolkit for text generation evaluation (Fu et al., 2023; Liu et al., 2023). LLMs, with their ability for nuanced analysis, offer possibilities beyond syntactic and surface-level metrics. They can potentially capture deeper aspects of dialogue semantics and goal alignment.",
474
+ "bbox": [
475
+ 112,
476
+ 670,
477
+ 489,
478
+ 783
479
+ ],
480
+ "page_idx": 2
481
+ },
482
+ {
483
+ "type": "text",
484
+ "text": "3 Methodology",
485
+ "text_level": 1,
486
+ "bbox": [
487
+ 112,
488
+ 796,
489
+ 263,
490
+ 813
491
+ ],
492
+ "page_idx": 2
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "This section outlines the methods employed to accomplish two objectives of this study: automating the generation of dialogue datasets, and evaluating the quality of the generated dialogues. We utilize a dual-agent setup leveraging LLM, in our case, OpenAI's GPT-4 (OpenAI, 2023), to simulate a",
497
+ "bbox": [
498
+ 112,
499
+ 822,
500
+ 489,
501
+ 919
502
+ ],
503
+ "page_idx": 2
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "conversation between a user and an assistant focusing on linear programming problems.",
508
+ "bbox": [
509
+ 507,
510
+ 84,
511
+ 884,
512
+ 116
513
+ ],
514
+ "page_idx": 2
515
+ },
516
+ {
517
+ "type": "text",
518
+ "text": "3.1 Dual-Agent LLM Setup",
519
+ "text_level": 1,
520
+ "bbox": [
521
+ 507,
522
+ 127,
523
+ 742,
524
+ 143
525
+ ],
526
+ "page_idx": 2
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "The dual-agent setup aims to model a conversation between an automatic conversational agent who asks questions to a user, with the aim to identify all key information of the linear problem to model. Specifically, the setup comprises two distinct LLM agents: a Question Generation Agent and a Question Answering Agent who \"talk\" to each other.",
531
+ "bbox": [
532
+ 507,
533
+ 147,
534
+ 884,
535
+ 260
536
+ ],
537
+ "page_idx": 2
538
+ },
539
+ {
540
+ "type": "list",
541
+ "sub_type": "text",
542
+ "list_items": [
543
+ "- Question Generation (QG) Agent: The primary objective of this agent is to ask questions that will enable it to elicit and summarize the key information of the original problem statement. Importantly, this agent does not have access to the original problem statement and must rely solely on its interactions with the QA Agent to gather all necessary information.",
544
+ "- Question Answering (QA) Agent: This agent is designed to answer questions based on a pre-defined problem statement from NL4Opt, which serves as the simulated knowledge base for the assistant. To make the dialogue more natural and contextually grounded, the agent is configured to impersonate the individual mentioned in the original problem statement. This design choice enhances the genuineness of the interaction, creating a more realistic user-agent dialogue experience."
545
+ ],
546
+ "bbox": [
547
+ 531,
548
+ 271,
549
+ 884,
550
+ 581
551
+ ],
552
+ "page_idx": 2
553
+ },
554
+ {
555
+ "type": "text",
556
+ "text": "3.2 Implementation Details",
557
+ "text_level": 1,
558
+ "bbox": [
559
+ 507,
560
+ 593,
561
+ 741,
562
+ 608
563
+ ],
564
+ "page_idx": 2
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "An essential part of the QA Agent is a component (which also leverages LLM) that compares a summary with an original problem statement. This component's role is twofold: to provide feedback when discrepancies are found between the provided summary and the original problem statement, and to signal the termination of dialogue generation if not. To detect that the QG has generated a summary in the latest dialogue turn, we employed a straightforward approach using regular expressions and predefined keywords.",
569
+ "bbox": [
570
+ 505,
571
+ 613,
572
+ 882,
573
+ 789
574
+ ],
575
+ "page_idx": 2
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "In order to maintain consistency and guide the LLM in its responses, prompts were injected throughout the dialogue. Aside from the initial system prompt, which always begins the conversation, subsequent prompts were introduced at each turn and retained in the dialogue's history. For the QG Agent, a succinct instruction, A NEXTMESSAGE/QUESTION was provided. For the QA",
580
+ "bbox": [
581
+ 507,
582
+ 791,
583
+ 884,
584
+ 919
585
+ ],
586
+ "page_idx": 2
587
+ },
588
+ {
589
+ "type": "text",
590
+ "text": "Agent, the prompts varied based on the dialogue stage. Mostly, the instruction ANSWER SHORTLY. USE MAXIMUM 30 WORDS. was used. However, when the QG agent provided a summary, the prompt adjusted accordingly: either feedback on summary², or THE SUMMARY ACCEPTED. IT'S TIME TO FINISH DIALOG AND SAY GOODBYE, signaling the end of the conversation.",
591
+ "bbox": [
592
+ 112,
593
+ 84,
594
+ 489,
595
+ 212
596
+ ],
597
+ "page_idx": 3
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "We utilized the dataset of linear programming problems from the NL4Opt competition (Ramamonjison et al., 2023), specifically from subtask 2. The task is designed to take a natural language problem description, labeled semantic entities, and an ordered mapping of variable mentions, and then formulate a precise meaning representation. These problem descriptions served as the basis for the simulated conversations between the two LLM agents in this work.",
602
+ "bbox": [
603
+ 112,
604
+ 212,
605
+ 489,
606
+ 370
607
+ ],
608
+ "page_idx": 3
609
+ },
610
+ {
611
+ "type": "list",
612
+ "sub_type": "text",
613
+ "list_items": [
614
+ "- Development Subset: For this subset, consisting of 98 problems. Two to four separate dialogues were generated per problem.",
615
+ "- Training Subset: For the training subset, a single dialogue was generated for the first 241 problems."
616
+ ],
617
+ "bbox": [
618
+ 136,
619
+ 385,
620
+ 487,
621
+ 486
622
+ ],
623
+ "page_idx": 3
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "The dual-agent system was implemented using OpenAI's Chat Completion API<sup>3</sup>. We generated the dialogues using different temperature settings for the development subset to ensure a variety of dialogues. The aim was to generate multiple dialogue instances for each problem so that it is possible to robustly evaluate the model's performance and assess the diversity in the generated dataset in subsequent research.",
628
+ "bbox": [
629
+ 112,
630
+ 497,
631
+ 489,
632
+ 640
633
+ ],
634
+ "page_idx": 3
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "Figure 1 shows part of a sample dialogue, and figure 2 shows the associated problem statement. Appendix B shows the actual prompts used.",
639
+ "bbox": [
640
+ 112,
641
+ 642,
642
+ 489,
643
+ 690
644
+ ],
645
+ "page_idx": 3
646
+ },
647
+ {
648
+ "type": "text",
649
+ "text": "4 Evaluation Strategy",
650
+ "text_level": 1,
651
+ "bbox": [
652
+ 112,
653
+ 701,
654
+ 319,
655
+ 719
656
+ ],
657
+ "page_idx": 3
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "To evaluate the quality of the generated dialogues, we conduct an extrinsic evaluation. The evaluation process focuses on an underlying hypothesis that serves as the core of our study: a correct summary generated by the QG Agent would imply that the agent has posed the appropriate and necessary questions to restore the original problem statement effectively. The hypothesis is grounded in several key assumptions and logical deductions:",
662
+ "bbox": [
663
+ 112,
664
+ 727,
665
+ 489,
666
+ 872
667
+ ],
668
+ "page_idx": 3
669
+ },
670
+ {
671
+ "type": "list",
672
+ "sub_type": "text",
673
+ "list_items": [
674
+ "QG Agent: Hello there! I'm OptiMouse, your friendly helper. I'm here to assist you in making the best decision for your situation. Could you please tell me what you're trying to achieve or improve?",
675
+ "QA Agent: Hello! I'm looking to maximize profit for my furniture store while considering space, capital, and product ratio constraints.",
676
+ "QG Agent: That sounds like a great goal! To help you better, could you please tell me what types of furniture you sell in your store?",
677
+ "QA Agent: We sell dining tables and chairs in our store.",
678
+ "QG Agent: That's great! Now, could you please tell me how much profit you make from selling a dining table and how much from selling a chair?",
679
+ "QA Agent: We make a profit of $350 from selling a dining table and$ 75 from selling a chair.",
680
+ "QG Agent: That's helpful to know. Now, could you please tell me how much space a dining table and a chair take up in your store?",
681
+ "QA Agent: A dining table requires 8 sq ft of floor space while a chair requires 2 sq ft."
682
+ ],
683
+ "bbox": [
684
+ 534,
685
+ 85,
686
+ 855,
687
+ 432
688
+ ],
689
+ "page_idx": 3
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "1 1 \n1 1",
694
+ "bbox": [
695
+ 537,
696
+ 437,
697
+ 645,
698
+ 455
699
+ ],
700
+ "page_idx": 3
701
+ },
702
+ {
703
+ "type": "text",
704
+ "text": "QG Agent: That's a very important detail. Now, could you please tell me if there are any other constraints or requirements that we need to consider for your furniture store?",
705
+ "bbox": [
706
+ 537,
707
+ 455,
708
+ 855,
709
+ 514
710
+ ],
711
+ "page_idx": 3
712
+ },
713
+ {
714
+ "type": "list",
715
+ "sub_type": "text",
716
+ "list_items": [
717
+ "QA Agent: No, there are no other constraints or requirements for my furniture store.",
718
+ "QG Agent: Great! Let me summarize the information:"
719
+ ],
720
+ "bbox": [
721
+ 537,
722
+ 514,
723
+ 855,
724
+ 562
725
+ ],
726
+ "page_idx": 3
727
+ },
728
+ {
729
+ "type": "list",
730
+ "sub_type": "text",
731
+ "list_items": [
732
+ "- You sell dining tables and chairs in your furniture store",
733
+ "- You make a profit of $350 from selling a dining table and$ 75 from selling a chair.",
734
+ "- A dining table requires 8 sq ft of floor space while a chair requires 2 sq ft.",
735
+ "- You have 500 sq ft of space available in your store.",
736
+ "- You have a maximum of $20000 available to purchase the dining tables and chairs.",
737
+ "- A dining table costs you $1000 to purchase and a chair costs $150.",
738
+ "- At least $70\\%$ of all furniture in the store must be chairs."
739
+ ],
740
+ "bbox": [
741
+ 626,
742
+ 573,
743
+ 853,
744
+ 774
745
+ ],
746
+ "page_idx": 3
747
+ },
748
+ {
749
+ "type": "text",
750
+ "text": "Is there anything else you would like to add or modify in this summary?",
751
+ "bbox": [
752
+ 626,
753
+ 785,
754
+ 853,
755
+ 812
756
+ ],
757
+ "page_idx": 3
758
+ },
759
+ {
760
+ "type": "text",
761
+ "text": "Figure 1: A sample dialogue between a QG Agent and a QA Agent about maximizing furniture store profits.",
762
+ "bbox": [
763
+ 507,
764
+ 846,
765
+ 882,
766
+ 876
767
+ ],
768
+ "page_idx": 3
769
+ },
770
+ {
771
+ "type": "page_footnote",
772
+ "text": "Full prompts are provided in Appendix B.",
773
+ "bbox": [
774
+ 136,
775
+ 879,
776
+ 396,
777
+ 892
778
+ ],
779
+ "page_idx": 3
780
+ },
781
+ {
782
+ "type": "page_footnote",
783
+ "text": "<sup>3</sup>https://platform.openai.com/docs/",
784
+ "bbox": [
785
+ 136,
786
+ 892,
787
+ 394,
788
+ 904
789
+ ],
790
+ "page_idx": 3
791
+ },
792
+ {
793
+ "type": "page_footnote",
794
+ "text": "api-reference/chat",
795
+ "bbox": [
796
+ 115,
797
+ 906,
798
+ 255,
799
+ 917
800
+ ],
801
+ "page_idx": 3
802
+ },
803
+ {
804
+ "type": "text",
805
+ "text": " A furniture store only stocks and sells dining tables and chairs. The profit per dining table is $350 and the profit per chair is $75. There is 500 sq ft of space available and a dining table requires 8 sq ft of floor space while a chair requires 2 sq ft. Because chairs sell in larger quantities, at least 70% of all furniture in the store must be chairs. In terms of capital, a dining table ties up $1000 in capital and a chair ties up $150 in capital. The company wants a maximum of $20000 worth of capital tied up at any time. Formulate an LP to maximize profit.",
806
+ "bbox": [
807
+ 131,
808
+ 86,
809
+ 473,
810
+ 209
811
+ ],
812
+ "page_idx": 4
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "Figure 2: A sample problem statement from the NL4Opt dataset.",
817
+ "bbox": [
818
+ 112,
819
+ 228,
820
+ 487,
821
+ 258
822
+ ],
823
+ "page_idx": 4
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "Extrinsic evaluation. In dialogue systems, particularly goal-oriented systems, the dialogue often progresses through a series of questions and answers to reach a particular end state (Qi et al., 2020; Majumder et al., 2021). Therefore, the questions asked directly impact the quality and accuracy of the final output — here, the summary of the original problem statement. We, therefore, conduct an extrinsic evaluation of the dialogues.",
828
+ "bbox": [
829
+ 112,
830
+ 282,
831
+ 489,
832
+ 429
833
+ ],
834
+ "page_idx": 4
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "Incomplete Initial Information. The QG Agent starts with incomplete information about the problem at hand. Therefore, asking the right questions is crucial for the agent to gather enough information for an accurate summary. A poor or incomplete summary would suggest that the agent has asked insufficient or incorrect questions.",
839
+ "bbox": [
840
+ 112,
841
+ 436,
842
+ 489,
843
+ 549
844
+ ],
845
+ "page_idx": 4
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "Semantic Understanding. An accurate summary involves not just factual correctness but also a semantic understanding of the problem's constraints and objectives. Therefore, correct summarization can be viewed as an implicit validation of the agent's ability to grasp the problem's complexities through its questions.",
850
+ "bbox": [
851
+ 112,
852
+ 556,
853
+ 489,
854
+ 670
855
+ ],
856
+ "page_idx": 4
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "To validate these assumptions, we use an approach involving human evaluation as well as automated evaluation through a third LLM agent of generated summaries. By evaluating the correctness of the generated summaries, we thus indirectly assess the QG Agent's ability to pose appropriate and informative questions that lead to a comprehensive understanding of the problem.",
861
+ "bbox": [
862
+ 112,
863
+ 676,
864
+ 490,
865
+ 806
866
+ ],
867
+ "page_idx": 4
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "4.1 Automatic Evaluation",
872
+ "text_level": 1,
873
+ "bbox": [
874
+ 112,
875
+ 816,
876
+ 332,
877
+ 832
878
+ ],
879
+ "page_idx": 4
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "For the automated evaluation, in addition to the well-known ROUGE and BERTScore, we designed a metric that employed a third LLM (GPT-4) agent to compare the original problem statement with the generated summary. The evaluation was conducted",
884
+ "bbox": [
885
+ 112,
886
+ 839,
887
+ 489,
888
+ 920
889
+ ],
890
+ "page_idx": 4
891
+ },
892
+ {
893
+ "type": "table",
894
+ "img_path": "images/4a076aec146325ae0605f25301a74393db139f42226f6b90b0c13697b876c375.jpg",
895
+ "table_caption": [],
896
+ "table_footnote": [],
897
+ "table_body": "<table><tr><td>Metric</td><td>Value</td></tr><tr><td>ROUGE-1 P</td><td>0.54</td></tr><tr><td>ROUGE-1 R</td><td>0.62</td></tr><tr><td>ROUGE-1 F1</td><td>0.57</td></tr><tr><td>ROUGE-2 P</td><td>0.33</td></tr><tr><td>ROUGE-2 R</td><td>0.39</td></tr><tr><td>ROUGE-2 F1</td><td>0.35</td></tr><tr><td>ROUGE-L P</td><td>0.38</td></tr><tr><td>ROUGE-L R</td><td>0.43</td></tr><tr><td>ROUGE-L F1</td><td>0.40</td></tr><tr><td>BERTscore P</td><td>0.88</td></tr><tr><td>BERTscore R</td><td>0.91</td></tr><tr><td>BERTscore F1</td><td>0.90</td></tr><tr><td>GPT-4 R</td><td>4.60</td></tr><tr><td>GPT-4 P</td><td>4.62</td></tr></table>",
898
+ "bbox": [
899
+ 598,
900
+ 80,
901
+ 796,
902
+ 357
903
+ ],
904
+ "page_idx": 4
905
+ },
906
+ {
907
+ "type": "text",
908
+ "text": "Table 2: Average values of metrics per entire dataset. All values range from 0 to 1 except GPT-4, which ranges from 1 to 5.",
909
+ "bbox": [
910
+ 507,
911
+ 369,
912
+ 884,
913
+ 413
914
+ ],
915
+ "page_idx": 4
916
+ },
917
+ {
918
+ "type": "text",
919
+ "text": "using a \"chain-of-thought\" prompting (Wei et al., 2022). Our agent employs three criteria (\"chains\") to evaluate each summary:",
920
+ "bbox": [
921
+ 507,
922
+ 437,
923
+ 884,
924
+ 487
925
+ ],
926
+ "page_idx": 4
927
+ },
928
+ {
929
+ "type": "list",
930
+ "sub_type": "text",
931
+ "list_items": [
932
+ "- Correct Information: Assessing if the summary accurately represents the facts in the original problem statement.",
933
+ "- Incorrect Information: Identifying any factual inaccuracies or misleading statements in the summary.",
934
+ "- Missing Information: Determining if any crucial elements from the original problem statement are bypassed in the summary."
935
+ ],
936
+ "bbox": [
937
+ 531,
938
+ 498,
939
+ 884,
940
+ 655
941
+ ],
942
+ "page_idx": 4
943
+ },
944
+ {
945
+ "type": "text",
946
+ "text": "Considering the three criteria above, the agent provides an \"Information Recall Score\", \"Information Precision Score\", \"Information Repetition Score\", and \"Readability Score\" to judge if the summary forms a coherent and accurate representation of the original problem. These evaluation metrics are the same as the human evaluation conducted in Section 4.2. Appendix B shows the prompts used.",
947
+ "bbox": [
948
+ 507,
949
+ 665,
950
+ 884,
951
+ 796
952
+ ],
953
+ "page_idx": 4
954
+ },
955
+ {
956
+ "type": "text",
957
+ "text": "4.1.1 Results of the Automatic Evaluation",
958
+ "text_level": 1,
959
+ "bbox": [
960
+ 507,
961
+ 803,
962
+ 853,
963
+ 818
964
+ ],
965
+ "page_idx": 4
966
+ },
967
+ {
968
+ "type": "text",
969
+ "text": "Table 2 summarizes the average metric values across the entire dataset. As we can observe, the absolute values of ROUGE are not very high. However, as we will see below, human evaluation of a subset of the data reveals that the dialogues have generated good summaries in general.",
970
+ "bbox": [
971
+ 507,
972
+ 822,
973
+ 882,
974
+ 919
975
+ ],
976
+ "page_idx": 4
977
+ },
978
+ {
979
+ "type": "table",
980
+ "img_path": "images/05e7a1fc64ff6a94c1e1df18f615f28f9c4ad78210379ef2874dbf4d1e793a61.jpg",
981
+ "table_caption": [],
982
+ "table_footnote": [],
983
+ "table_body": "<table><tr><td>Name</td><td>Value</td></tr><tr><td>Total number of dialogues</td><td>476</td></tr><tr><td>Dialogues with temperature 0</td><td>315</td></tr><tr><td>Dialogues with temperature 1</td><td>149</td></tr><tr><td>Dialogues with summary</td><td>97%</td></tr><tr><td>Average dialogue length (characters)</td><td>3658</td></tr><tr><td>Total number of turns</td><td>9480</td></tr><tr><td>Average number of turns per dialogue</td><td>20</td></tr><tr><td>Average turn length (characters)</td><td>184</td></tr></table>",
984
+ "bbox": [
985
+ 122,
986
+ 80,
987
+ 480,
988
+ 236
989
+ ],
990
+ "page_idx": 5
991
+ },
992
+ {
993
+ "type": "text",
994
+ "text": "To provide a more nuanced understanding of our generated dialogues, we have collected some summary statistics. These are presented in Table 3. The dialogues were generated with varying temperature settings to control the randomness of the text generated by the LLMs. In a small number of cases (3%), the dialogue was not able to generate a summary before the cut-off of 40 turns (20 turns for each agent). The high percentage of dialogues with a summary (97%) indicates the Question Generation Agent's effectiveness in concluding the dialogues with a summary, which is crucial for our evaluation. Dialogue length and number of turns give an indication of the depth and extent of the conversations between the agents.",
995
+ "bbox": [
996
+ 112,
997
+ 287,
998
+ 489,
999
+ 527
1000
+ ],
1001
+ "page_idx": 5
1002
+ },
1003
+ {
1004
+ "type": "text",
1005
+ "text": "4.2 Human Evaluation",
1006
+ "text_level": 1,
1007
+ "bbox": [
1008
+ 112,
1009
+ 541,
1010
+ 310,
1011
+ 555
1012
+ ],
1013
+ "page_idx": 5
1014
+ },
1015
+ {
1016
+ "type": "text",
1017
+ "text": "For the human evaluation component, we carefully curated a subset of 28 problem statements from the development subset. This subset was selected to cover all constraint types represented in the development data (Table 1), ensuring a comprehensive evaluation across diverse problem scenarios.",
1018
+ "bbox": [
1019
+ 112,
1020
+ 563,
1021
+ 487,
1022
+ 658
1023
+ ],
1024
+ "page_idx": 5
1025
+ },
1026
+ {
1027
+ "type": "text",
1028
+ "text": "Given the small base of problems (98), the selection of 28 problems was simple: first, apply a greedy approach that satisfies the quantity requirements of the types with fewer counts, and then manually swap selected candidates with other candidates from the development set as appropriate, until we reached an acceptable distribution of constraint types in the selection. Table 4 shows the distribution of constraint types in the development set and the selection used for human evaluation.",
1029
+ "bbox": [
1030
+ 112,
1031
+ 659,
1032
+ 487,
1033
+ 819
1034
+ ],
1035
+ "page_idx": 5
1036
+ },
1037
+ {
1038
+ "type": "text",
1039
+ "text": "To add an element of variability in dialogue generation, each problem statement in this selection was subjected to a single dialogue generation run. The temperature setting for this run was randomly selected to be either 0 or 1.",
1040
+ "bbox": [
1041
+ 112,
1042
+ 821,
1043
+ 487,
1044
+ 900
1045
+ ],
1046
+ "page_idx": 5
1047
+ },
1048
+ {
1049
+ "type": "text",
1050
+ "text": "Four evaluators (details in the Acknowledge-",
1051
+ "bbox": [
1052
+ 131,
1053
+ 903,
1054
+ 489,
1055
+ 917
1056
+ ],
1057
+ "page_idx": 5
1058
+ },
1059
+ {
1060
+ "type": "table",
1061
+ "img_path": "images/e22dae69ce603c16e32c607bc5c58efe4b1c64a8c34880650574dbfdc34a01b2.jpg",
1062
+ "table_caption": [
1063
+ "Table 3: Summary statistics of generated dialogues."
1064
+ ],
1065
+ "table_footnote": [],
1066
+ "table_body": "<table><tr><td></td><td>Constraint Type</td><td>Dev</td><td>Sel</td></tr><tr><td>1</td><td>Upper bound on single variable</td><td>20</td><td>6</td></tr><tr><td>2</td><td>Upper bound on sum of variables</td><td>12</td><td>4</td></tr><tr><td>3</td><td>Upper bound on weighted sum of \nvariables</td><td>93</td><td>28</td></tr><tr><td>4</td><td>Upper bound on proportion</td><td>8</td><td>2</td></tr><tr><td>5</td><td>Lower bound on single variable</td><td>35</td><td>11</td></tr><tr><td>6</td><td>Lower bound on sum of variables</td><td>7</td><td>2</td></tr><tr><td>7</td><td>Lower bound on weighted sum of \nvariables</td><td>59</td><td>18</td></tr><tr><td>8</td><td>Lower bound on proportion</td><td>15</td><td>5</td></tr><tr><td>9</td><td>Comparison</td><td>43</td><td>13</td></tr></table>",
1067
+ "bbox": [
1068
+ 517,
1069
+ 80,
1070
+ 873,
1071
+ 233
1072
+ ],
1073
+ "page_idx": 5
1074
+ },
1075
+ {
1076
+ "type": "table",
1077
+ "img_path": "images/80a07f3c7055fefc3333b3f9a70e8dd41a1dd0eca6fbcf20c1c53ef4080d9c57.jpg",
1078
+ "table_caption": [
1079
+ "Table 4: Counts of types of linear optimization constraints in the development set of 98 problems (\"Dev\" column), and the selection of 28 problems (\"Sel\" column). The sum is larger than the number of problems since a problem may have multiple constraints."
1080
+ ],
1081
+ "table_footnote": [],
1082
+ "table_body": "<table><tr><td>Metric</td><td>Fleiss&#x27; Kappa</td></tr><tr><td>Information Recall</td><td>0.205</td></tr><tr><td>Information Precision</td><td>0.387</td></tr><tr><td>Information Repetition</td><td>-0.009</td></tr><tr><td>Readability</td><td>0.235</td></tr></table>",
1083
+ "bbox": [
1084
+ 539,
1085
+ 326,
1086
+ 853,
1087
+ 416
1088
+ ],
1089
+ "page_idx": 5
1090
+ },
1091
+ {
1092
+ "type": "text",
1093
+ "text": "Table 5: Inter-annotator agreement of each of the 4 human evaluation metrics.",
1094
+ "bbox": [
1095
+ 507,
1096
+ 426,
1097
+ 882,
1098
+ 455
1099
+ ],
1100
+ "page_idx": 5
1101
+ },
1102
+ {
1103
+ "type": "text",
1104
+ "text": "ments section) then scored how well the summary generated at the end of the dialogue matches the problem statement. For every pair of a problem statement and a generated summary, each evaluator produced the following 4 evaluation metrics. These metrics have been adopted from the human evaluation performed by Tsatsaronis et al. (2015):",
1105
+ "bbox": [
1106
+ 507,
1107
+ 480,
1108
+ 882,
1109
+ 592
1110
+ ],
1111
+ "page_idx": 5
1112
+ },
1113
+ {
1114
+ "type": "list",
1115
+ "sub_type": "text",
1116
+ "list_items": [
1117
+ "- Information recall (IR) (1-5) - All the necessary information is in the generated summary.",
1118
+ "- Information precision (IP) (1-5) - No irrelevant information is generated.",
1119
+ "- Information repetition (IRep) (1-5) - The generated summary does not repeat the same information multiple times.",
1120
+ "- Readability (Read) (1-5) - The generated summary is easily readable and fluent."
1121
+ ],
1122
+ "bbox": [
1123
+ 531,
1124
+ 602,
1125
+ 884,
1126
+ 776
1127
+ ],
1128
+ "page_idx": 5
1129
+ },
1130
+ {
1131
+ "type": "text",
1132
+ "text": "4.2.1 Results of the Human Evaluation",
1133
+ "text_level": 1,
1134
+ "bbox": [
1135
+ 507,
1136
+ 788,
1137
+ 830,
1138
+ 802
1139
+ ],
1140
+ "page_idx": 5
1141
+ },
1142
+ {
1143
+ "type": "text",
1144
+ "text": "Inter-annotator agreement of each of the 4 human evaluation metrics, as computed by Fleiss' Kappa, is shown in Table 5.",
1145
+ "bbox": [
1146
+ 507,
1147
+ 806,
1148
+ 882,
1149
+ 853
1150
+ ],
1151
+ "page_idx": 5
1152
+ },
1153
+ {
1154
+ "type": "text",
1155
+ "text": "We observe virtually no agreement in Information Repetition, slight agreement in Information Recall and Readability, and Fair agreement in Information Precision.",
1156
+ "bbox": [
1157
+ 507,
1158
+ 854,
1159
+ 882,
1160
+ 917
1161
+ ],
1162
+ "page_idx": 5
1163
+ },
1164
+ {
1165
+ "type": "table",
1166
+ "img_path": "images/1fe8f0a13f5a708259c31554bf806f3904da893b23ffe3266ea032811292e970.jpg",
1167
+ "table_caption": [],
1168
+ "table_footnote": [],
1169
+ "table_body": "<table><tr><td>Annotator</td><td>IR</td><td>IP</td><td>IRep</td><td>Read</td></tr><tr><td>1</td><td>4.25</td><td>4.25</td><td>4.89</td><td>4.96</td></tr><tr><td>2</td><td>4.18</td><td>4.54</td><td>4.93</td><td>4.96</td></tr><tr><td>3</td><td>4.68</td><td>4.39</td><td>4.93</td><td>4.86</td></tr><tr><td>4</td><td>4.03</td><td>4.36</td><td>4.82</td><td>4.89</td></tr><tr><td>All</td><td>4.29</td><td>4.38</td><td>4.89</td><td>4.92</td></tr></table>",
1170
+ "bbox": [
1171
+ 142,
1172
+ 80,
1173
+ 458,
1174
+ 191
1175
+ ],
1176
+ "page_idx": 6
1177
+ },
1178
+ {
1179
+ "type": "text",
1180
+ "text": "Table 6 shows the average values of the human evaluation scores for the selection of 28 problems. Overall, the human evaluation showed high values, and very high values for Information Repetition and Readability. This suggests that the dialogues generated by the pair of agents are of good quality.",
1181
+ "bbox": [
1182
+ 112,
1183
+ 288,
1184
+ 487,
1185
+ 385
1186
+ ],
1187
+ "page_idx": 6
1188
+ },
1189
+ {
1190
+ "type": "text",
1191
+ "text": "The human annotators observed the following most common mistakes in the generated summaries. See Appendix A for examples of each.",
1192
+ "bbox": [
1193
+ 112,
1194
+ 386,
1195
+ 487,
1196
+ 434
1197
+ ],
1198
+ "page_idx": 6
1199
+ },
1200
+ {
1201
+ "type": "list",
1202
+ "sub_type": "text",
1203
+ "list_items": [
1204
+ "- Missing objective function or decision variables.",
1205
+ "- Inclusion of additional information that seems to be from answers to the agent asking clarifying questions, such as \"otherwise the order does not matter\", or \"there is no upper limit on costs\", etc."
1206
+ ],
1207
+ "bbox": [
1208
+ 136,
1209
+ 449,
1210
+ 487,
1211
+ 574
1212
+ ],
1213
+ "page_idx": 6
1214
+ },
1215
+ {
1216
+ "type": "text",
1217
+ "text": "4.3 Correlation Analysis of Automatic and Human Evaluations",
1218
+ "text_level": 1,
1219
+ "bbox": [
1220
+ 112,
1221
+ 590,
1222
+ 463,
1223
+ 621
1224
+ ],
1225
+ "page_idx": 6
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "text": "Table 7 shows Spearman's rank correlation coefficient $\\rho$ between the automatic and human evaluations. The table compares recall values of the automatic metrics against Information Recall, precision values against Information Precision, and F1 values against the harmonic mean of Information Recall and Information Precision. The last column of the table shows the correlation between the F1 values of the automatic metrics and the average of Information Recall, Information Precision, Information Repetition, and Readability. We can observe nearly identical values to the harmonic mean of Information Recall and Information Precision. The reason for this may be that the annotations for Information Repetition and Readability are nearly always 5, so their contribution is almost a constant value that does not change the rankings, so they do not affect the values of $\\rho$ . This may be a consequence of",
1230
+ "bbox": [
1231
+ 112,
1232
+ 629,
1233
+ 489,
1234
+ 919
1235
+ ],
1236
+ "page_idx": 6
1237
+ },
1238
+ {
1239
+ "type": "table",
1240
+ "img_path": "images/32300c5c70184cf679e9ebcd77df13a8e0f51f722840360a8cfba727db1ec8d8.jpg",
1241
+ "table_caption": [
1242
+ "Table 6: Average human evaluation scores for the sample of 28 documents. IR = Information Recall. IP = Information Precision. IRep = Information Repetition. Read = Readability."
1243
+ ],
1244
+ "table_footnote": [],
1245
+ "table_body": "<table><tr><td>Metric</td><td>IR</td><td>IP</td><td>IF1</td><td>IAvg</td></tr><tr><td>ROUGE-1 R</td><td>0.43</td><td></td><td></td><td></td></tr><tr><td>ROUGE-1 P</td><td></td><td>0.58</td><td></td><td></td></tr><tr><td>ROUGE-1 F1</td><td></td><td></td><td>0.62</td><td>0.60</td></tr><tr><td>ROUGE-2 R</td><td>0.48</td><td></td><td></td><td></td></tr><tr><td>ROUGE-2 P</td><td></td><td>0.58</td><td></td><td></td></tr><tr><td>ROUGE-2 F1</td><td></td><td></td><td>0.56</td><td>0.57</td></tr><tr><td>ROUGE-L R</td><td>0.47</td><td></td><td></td><td></td></tr><tr><td>ROUGE-L P</td><td></td><td>0.74</td><td></td><td></td></tr><tr><td>ROUGE-L F1</td><td></td><td></td><td>0.71</td><td>0.69</td></tr><tr><td>BERTScore R</td><td>0.53</td><td></td><td></td><td></td></tr><tr><td>BERTScore P</td><td></td><td>0.74</td><td></td><td></td></tr><tr><td>BERTScore F1</td><td></td><td></td><td>0.65</td><td>0.65</td></tr><tr><td>GPT-4 R</td><td>0.42</td><td></td><td></td><td></td></tr><tr><td>GPT-4 P</td><td></td><td>0.67</td><td></td><td></td></tr><tr><td>GPT-4 F1</td><td></td><td></td><td>0.59</td><td>0.58</td></tr></table>",
1246
+ "bbox": [
1247
+ 521,
1248
+ 80,
1249
+ 870,
1250
+ 373
1251
+ ],
1252
+ "page_idx": 6
1253
+ },
1254
+ {
1255
+ "type": "text",
1256
+ "text": "Table 7: Spearman's rank correlation coefficient $\\rho$ between the automatic and human evaluations. $\\mathrm{IP} =$ correlation with Information Recall; $\\mathrm{IR} =$ correlation with Information Precision; $\\mathrm{IF1} =$ Correlation with the Harmonic mean of IR and IP; $\\mathrm{IAvg} =$ Correlation with the average of Information Recall, Information Precision, Repetition, and Readability.",
1257
+ "bbox": [
1258
+ 507,
1259
+ 385,
1260
+ 884,
1261
+ 486
1262
+ ],
1263
+ "page_idx": 6
1264
+ },
1265
+ {
1266
+ "type": "text",
1267
+ "text": "using GPT-4 and the carefully designed prompts, which instruct the system to be clear and concise.",
1268
+ "bbox": [
1269
+ 507,
1270
+ 511,
1271
+ 882,
1272
+ 542
1273
+ ],
1274
+ "page_idx": 6
1275
+ },
1276
+ {
1277
+ "type": "text",
1278
+ "text": "Among all automatic metrics, Table 7 shows that the best correlation values are for ROUGE-L. GPT-4 achieved competitive results but did not outperform the other metrics. Further work is needed to improve the use of GPT-4. In particular, GPT-4 usually was more generous and would give higher ratings than the human evaluators would. The inclusion of few-shot samples, and more sophisticated prompts, might help the system align with the human annotators.",
1279
+ "bbox": [
1280
+ 507,
1281
+ 543,
1282
+ 882,
1283
+ 702
1284
+ ],
1285
+ "page_idx": 6
1286
+ },
1287
+ {
1288
+ "type": "text",
1289
+ "text": "5 Summary and Conclusion",
1290
+ "text_level": 1,
1291
+ "bbox": [
1292
+ 507,
1293
+ 715,
1294
+ 769,
1295
+ 732
1296
+ ],
1297
+ "page_idx": 6
1298
+ },
1299
+ {
1300
+ "type": "text",
1301
+ "text": "This paper presents a dataset for the task of eliciting information from the user through a dialogue with a conversation agent. The specific use of the information elicited is for automatic modeling of linear optimization problems. This is per se a very useful task with broad potential applications, but the methods for data generation and evaluation proposed here can be adopted easily for other possible tasks. The data and human evaluations are available to the research community. $^1$",
1302
+ "bbox": [
1303
+ 507,
1304
+ 741,
1305
+ 882,
1306
+ 902
1307
+ ],
1308
+ "page_idx": 6
1309
+ },
1310
+ {
1311
+ "type": "text",
1312
+ "text": "The dialogue was generated in a dual-agent LLM",
1313
+ "bbox": [
1314
+ 526,
1315
+ 903,
1316
+ 880,
1317
+ 919
1318
+ ],
1319
+ "page_idx": 6
1320
+ },
1321
+ {
1322
+ "type": "text",
1323
+ "text": "setup where a question generation agent acted as the machine agent who elicited information, and a question answering agent acted as the human who had the information about the problem to model. The question generation agent can be used as a baseline agent. The human evaluation results indicate that this baseline may be effective for the task.",
1324
+ "bbox": [
1325
+ 112,
1326
+ 84,
1327
+ 489,
1328
+ 211
1329
+ ],
1330
+ "page_idx": 7
1331
+ },
1332
+ {
1333
+ "type": "text",
1334
+ "text": "The dialogues for a subset of 28 LP problems were evaluated using an extrinsic evaluation that judged whether summaries generated by the dialogues matched the key information from the original problem descriptions. The evaluation was conducted by human evaluators and automatically. Among the automatic evaluation metrics, besides well-known automatic metrics ROUGE and BERTScore, we designed another GPT-4 agent that mimicked the human evaluators. The results indicate a reasonable correlation between ROUGE L, BERTScore P, and the average human information precision scores, and this is slightly better than the correlation between the GPT4 agent and the human IP scores.",
1335
+ "bbox": [
1336
+ 115,
1337
+ 214,
1338
+ 489,
1339
+ 453
1340
+ ],
1341
+ "page_idx": 7
1342
+ },
1343
+ {
1344
+ "type": "text",
1345
+ "text": "As further work, we intend to refine the prompts used for the evaluation approach with GPT-4. In addition, we will conduct more exhaustive types of evaluation on the data set that might be more suitable to the specific domain of linear programming modeling. In particular, we plan to analyse the generated dialogues at the level of the dialogue turns.",
1346
+ "bbox": [
1347
+ 112,
1348
+ 456,
1349
+ 489,
1350
+ 583
1351
+ ],
1352
+ "page_idx": 7
1353
+ },
1354
+ {
1355
+ "type": "text",
1356
+ "text": "Acknowledgements",
1357
+ "text_level": 1,
1358
+ "bbox": [
1359
+ 114,
1360
+ 596,
1361
+ 285,
1362
+ 614
1363
+ ],
1364
+ "page_idx": 7
1365
+ },
1366
+ {
1367
+ "type": "text",
1368
+ "text": "This work was partially funded by the Australian Research Council, Australia through the Discovery Project 2022 (grant number DP220101925). In addition to two authors of this paper who have annotated the data (Diego Molla-Aliod and John Yearwood), we acknowledge the annotations provided by Vicky Mak-Hau and Thuseethan Selvarajah.",
1369
+ "bbox": [
1370
+ 112,
1371
+ 623,
1372
+ 489,
1373
+ 737
1374
+ ],
1375
+ "page_idx": 7
1376
+ },
1377
+ {
1378
+ "type": "text",
1379
+ "text": "References",
1380
+ "text_level": 1,
1381
+ "bbox": [
1382
+ 114,
1383
+ 764,
1384
+ 213,
1385
+ 778
1386
+ ],
1387
+ "page_idx": 7
1388
+ },
1389
+ {
1390
+ "type": "list",
1391
+ "sub_type": "ref_text",
1392
+ "list_items": [
1393
+ "Der-San Chen, Robert Batson, and Yu Dang. 2011. Applied Integer Programming: Modeling and Solution.",
1394
+ "Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. 2023. GPTScore: Evaluate as you desire.",
1395
+ "Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics."
1396
+ ],
1397
+ "bbox": [
1398
+ 114,
1399
+ 788,
1400
+ 489,
1401
+ 917
1402
+ ],
1403
+ "page_idx": 7
1404
+ },
1405
+ {
1406
+ "type": "list",
1407
+ "sub_type": "ref_text",
1408
+ "list_items": [
1409
+ "Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Noseworthy, Laurent Charlin, and Joelle Pineau. 2016. How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2122-2132, Austin, Texas. Association for Computational Linguistics.",
1410
+ "Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-Eval: NLG evaluation using GPT-4 with better human alignment.",
1411
+ "Bodhisattwa Prasad Majumder, Sudha Rao, Michel Galley, and Julian McAuley. 2021. Ask what's missing and what's useful: Improving clarification question generation using global knowledge. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4300-4312, Online. Association for Computational Linguistics.",
1412
+ "OpenAI. 2023. GPT-4 technical report. ArXiv, abs/2303.08774.",
1413
+ "Peng Qi, Yuhao Zhang, and Christopher D. Manning. 2020. Stay hungry, stay focused: Generating informative and specific questions in information-seeking conversations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 25-40, Online. Association for Computational Linguistics.",
1414
+ "Rindra Ramamonjison, Haley Li, Timothy Yu, Shiqi He, Vishnu Rengan, Amin Banitalebi-dehkordi, Zirui Zhou, and Yong Zhang. 2022. Augmenting operations research with auto-formulation of optimization models from problem descriptions. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 29-62, Abu Dhabi, UAE. Association for Computational Linguistics.",
1415
+ "Rindranirina Ramamonjison, Timothy T. Yu, Raymond Li, Haley Li, Giuseppe Carenini, Bissan Ghaddar, Shiqi He, Mahdi Mostajabdaveh, Amin Banitalebi-Dehkordi, Zirui Zhou, and Yong Zhang. 2023. NL4Opt competition: Formulating optimization problems based on their natural language descriptions.",
1416
+ "George Tsatsaronis, Georgios Balikas, Prodromos Malakasiotis, Ioannis Partalas, Matthias Zschunke, Michael R Alvers, Dirk Weissenborn, Anastasia Krithara, Sergios Petridis, Dimitris Polychronopoulos, Yannis Almirantis, John Pavlopoulos, Nicolas Baskiotis, Patrick Gallinari, Thierry Artieres, Axel-Cyrille Ngonga Ngomo, Norman Heino, Eric Gaussier, Liliana Barrio-Alvers, Michael Schroeder, Ion Androutsopoulos, and Georgios Paliouras. 2015. An overview of the BIOASQ large-scale biomedical semantic indexing and question answering competition. BMC Bioinformatics, 16(1):138."
1417
+ ],
1418
+ "bbox": [
1419
+ 509,
1420
+ 85,
1421
+ 885,
1422
+ 917
1423
+ ],
1424
+ "page_idx": 7
1425
+ },
1426
+ {
1427
+ "type": "text",
1428
+ "text": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc.",
1429
+ "bbox": [
1430
+ 114,
1431
+ 85,
1432
+ 489,
1433
+ 177
1434
+ ],
1435
+ "page_idx": 8
1436
+ },
1437
+ {
1438
+ "type": "text",
1439
+ "text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating text generation with BERT.",
1440
+ "bbox": [
1441
+ 114,
1442
+ 184,
1443
+ 489,
1444
+ 225
1445
+ ],
1446
+ "page_idx": 8
1447
+ },
1448
+ {
1449
+ "type": "text",
1450
+ "text": "A Sample Generated Summaries with Mistakes",
1451
+ "text_level": 1,
1452
+ "bbox": [
1453
+ 114,
1454
+ 231,
1455
+ 458,
1456
+ 263
1457
+ ],
1458
+ "page_idx": 8
1459
+ },
1460
+ {
1461
+ "type": "text",
1462
+ "text": "The following generated summary did not include the objective function:",
1463
+ "bbox": [
1464
+ 112,
1465
+ 272,
1466
+ 485,
1467
+ 305
1468
+ ],
1469
+ "page_idx": 8
1470
+ },
1471
+ {
1472
+ "type": "text",
1473
+ "text": "Problem statement:",
1474
+ "text_level": 1,
1475
+ "bbox": [
1476
+ 149,
1477
+ 311,
1478
+ 302,
1479
+ 324
1480
+ ],
1481
+ "page_idx": 8
1482
+ },
1483
+ {
1484
+ "type": "text",
1485
+ "text": "A suit store makes 2-piece and 3-piece suits. To make a 2-piece suit requires 1 hour of measuring and 4 hours of stitching. To make a 3-piece suit requires 2 hours of measuring and 5 hours of stitching. The store has 150 hours of measuring time and 300 hours of stitching time available. In order to stay in business, at least 40 2-piece suites should be made. The profit per 2-piece suit is $600 and the profit per 3-piece suit is$ 800. Formulate a LP to maximize profit?",
1486
+ "bbox": [
1487
+ 149,
1488
+ 328,
1489
+ 452,
1490
+ 520
1491
+ ],
1492
+ "page_idx": 8
1493
+ },
1494
+ {
1495
+ "type": "text",
1496
+ "text": "Generated summary:",
1497
+ "text_level": 1,
1498
+ "bbox": [
1499
+ 149,
1500
+ 525,
1501
+ 314,
1502
+ 539
1503
+ ],
1504
+ "page_idx": 8
1505
+ },
1506
+ {
1507
+ "type": "text",
1508
+ "text": "Alright, thanks for the information! To make sure I've got everything, here's a summary of your situation:",
1509
+ "bbox": [
1510
+ 149,
1511
+ 541,
1512
+ 450,
1513
+ 588
1514
+ ],
1515
+ "page_idx": 8
1516
+ },
1517
+ {
1518
+ "type": "list",
1519
+ "sub_type": "text",
1520
+ "list_items": [
1521
+ "- You manufacture two types of suits: 2-piece suits and 3-piece suits.",
1522
+ "- A 2-piece suit makes a profit of $600, requires 1 hour of measuring time and 4 hours of stitching time.",
1523
+ "- A 3-piece suit makes a profit of $800, requires 2 hours of measuring time and 5 hours of stitching time.",
1524
+ "- You have 150 hours of measuring time and 300 hours of stitching time available.",
1525
+ "- At least 40 2-piece suits must be produced, but there is no minimum requirement for 3-piece suits.",
1526
+ "- There is no limit on how many suits you can sell."
1527
+ ],
1528
+ "bbox": [
1529
+ 147,
1530
+ 594,
1531
+ 450,
1532
+ 848
1533
+ ],
1534
+ "page_idx": 8
1535
+ },
1536
+ {
1537
+ "type": "text",
1538
+ "text": "Have I missed out on anything or is there any other specific requirement or constraint that I should know about?",
1539
+ "bbox": [
1540
+ 149,
1541
+ 871,
1542
+ 450,
1543
+ 917
1544
+ ],
1545
+ "page_idx": 8
1546
+ },
1547
+ {
1548
+ "type": "text",
1549
+ "text": "The following generated summary did not mention decision variables:",
1550
+ "bbox": [
1551
+ 509,
1552
+ 84,
1553
+ 882,
1554
+ 115
1555
+ ],
1556
+ "page_idx": 8
1557
+ },
1558
+ {
1559
+ "type": "text",
1560
+ "text": "Problem statement:",
1561
+ "text_level": 1,
1562
+ "bbox": [
1563
+ 544,
1564
+ 127,
1565
+ 695,
1566
+ 141
1567
+ ],
1568
+ "page_idx": 8
1569
+ },
1570
+ {
1571
+ "type": "text",
1572
+ "text": "A chemical company makes an acidic liquid and a basic liquid using two types of techniques, reaction $X$ and reaction $Y$ . Reaction $X$ can make 35 au of the acidic liquid and 75 au of the basic liquid per minute. Reaction $Y$ can make 42 au of the acidic liquid and 35 au of the basic liquid per minute. Furthermore, reaction $X$ requires 18 units of heat every minute while reaction $Y$ requires 12 units of heat every minute. The chemical company has the capacity to generate at most 1400 units of heat every minute. If the chemical company needs to make at least 900 au of the acidic liquid and 1200 au of the basic liquid per minute, how many minutes should each reaction be run for to minimize the total time needed?",
1573
+ "bbox": [
1574
+ 544,
1575
+ 143,
1576
+ 847,
1577
+ 431
1578
+ ],
1579
+ "page_idx": 8
1580
+ },
1581
+ {
1582
+ "type": "text",
1583
+ "text": "Generated summary:",
1584
+ "text_level": 1,
1585
+ "bbox": [
1586
+ 544,
1587
+ 438,
1588
+ 709,
1589
+ 453
1590
+ ],
1591
+ "page_idx": 8
1592
+ },
1593
+ {
1594
+ "type": "text",
1595
+ "text": "Great! Let's summarize the information:",
1596
+ "bbox": [
1597
+ 544,
1598
+ 454,
1599
+ 845,
1600
+ 469
1601
+ ],
1602
+ "page_idx": 8
1603
+ },
1604
+ {
1605
+ "type": "list",
1606
+ "sub_type": "text",
1607
+ "list_items": [
1608
+ "- Your company has two types of reactions, $X$ and $Y$ , that produce acidic and basic liquids.",
1609
+ "- Reaction $X$ requires 18 units of heat per minute and produces 35 au of acidic and 75 au of basic liquid.",
1610
+ "- Reaction $Y$ requires 12 units of heat per minute and produces 42 au of acidic and 35 au of basic liquid.",
1611
+ "- Your company can generate at most 1400 units of heat every minute.",
1612
+ "- You need to produce at least 900 au of the acidic liquid and 1200 au of the basic liquid per minute.",
1613
+ "- Your goal is to meet these production requirements in the shortest time possible."
1614
+ ],
1615
+ "bbox": [
1616
+ 542,
1617
+ 475,
1618
+ 845,
1619
+ 746
1620
+ ],
1621
+ "page_idx": 8
1622
+ },
1623
+ {
1624
+ "type": "text",
1625
+ "text": "Is there anything else that I should know or did I miss anything?",
1626
+ "bbox": [
1627
+ 544,
1628
+ 770,
1629
+ 843,
1630
+ 801
1631
+ ],
1632
+ "page_idx": 8
1633
+ },
1634
+ {
1635
+ "type": "text",
1636
+ "text": "The following summary has additional information that makes sense but is not in the original problem description:",
1637
+ "bbox": [
1638
+ 507,
1639
+ 812,
1640
+ 882,
1641
+ 860
1642
+ ],
1643
+ "page_idx": 8
1644
+ },
1645
+ {
1646
+ "type": "text",
1647
+ "text": "Problem statement:",
1648
+ "text_level": 1,
1649
+ "bbox": [
1650
+ 544,
1651
+ 871,
1652
+ 695,
1653
+ 884
1654
+ ],
1655
+ "page_idx": 8
1656
+ },
1657
+ {
1658
+ "type": "text",
1659
+ "text": "A spicy paste is to be made from units of peppers and tomatoes and is to contain",
1660
+ "bbox": [
1661
+ 542,
1662
+ 887,
1663
+ 845,
1664
+ 917
1665
+ ],
1666
+ "page_idx": 8
1667
+ },
1668
+ {
1669
+ "type": "text",
1670
+ "text": "a minimum of 130 units of spice and 90 units of flavor. Peppers cost $8 per unit and tomatoes cost$ 5 per unit. One unit of peppers contains 12 units of spice and 2 units of flavor. One unit of tomatoes contains 1 unit of spice and 8 units of flavor. Formulate this as an LP and find the minimum cost spicy paste that can be made.",
1671
+ "bbox": [
1672
+ 147,
1673
+ 84,
1674
+ 453,
1675
+ 227
1676
+ ],
1677
+ "page_idx": 9
1678
+ },
1679
+ {
1680
+ "type": "text",
1681
+ "text": "Generated summary:",
1682
+ "text_level": 1,
1683
+ "bbox": [
1684
+ 149,
1685
+ 236,
1686
+ 314,
1687
+ 250
1688
+ ],
1689
+ "page_idx": 9
1690
+ },
1691
+ {
1692
+ "type": "text",
1693
+ "text": "Thanks for the information. Based on your responses, here's your situation as I understand:",
1694
+ "bbox": [
1695
+ 149,
1696
+ 252,
1697
+ 450,
1698
+ 298
1699
+ ],
1700
+ "page_idx": 9
1701
+ },
1702
+ {
1703
+ "type": "list",
1704
+ "sub_type": "text",
1705
+ "list_items": [
1706
+ "- You're making a spicy paste usingpeppersandtomatoes.",
1707
+ "- Each unit of peppers costs $8, contributes 12 units of spice and 2 units of flavor.",
1708
+ "- Each unit of tomatoes costs $5, contributes 1 unit of spice and 8 units of flavor.",
1709
+ "- Each batch of spicy paste should contain at least 130 units of spice and 90 units of flavor.",
1710
+ "- There are no restrictions on the maximum units of peppers and tomatoes you can use.",
1711
+ "- There's no specific budget, but you aim to minimize costs."
1712
+ ],
1713
+ "bbox": [
1714
+ 147,
1715
+ 306,
1716
+ 453,
1717
+ 561
1718
+ ],
1719
+ "page_idx": 9
1720
+ },
1721
+ {
1722
+ "type": "text",
1723
+ "text": "Have I captured everything correctly?",
1724
+ "bbox": [
1725
+ 149,
1726
+ 569,
1727
+ 435,
1728
+ 585
1729
+ ],
1730
+ "page_idx": 9
1731
+ },
1732
+ {
1733
+ "type": "text",
1734
+ "text": "B LLM Prompts",
1735
+ "text_level": 1,
1736
+ "bbox": [
1737
+ 114,
1738
+ 596,
1739
+ 278,
1740
+ 614
1741
+ ],
1742
+ "page_idx": 9
1743
+ },
1744
+ {
1745
+ "type": "text",
1746
+ "text": "Evaluation Prompt:",
1747
+ "text_level": 1,
1748
+ "bbox": [
1749
+ 114,
1750
+ 624,
1751
+ 275,
1752
+ 639
1753
+ ],
1754
+ "page_idx": 9
1755
+ },
1756
+ {
1757
+ "type": "text",
1758
+ "text": "You are an AI evaluator specializing in assessing the quality of summaries.",
1759
+ "bbox": [
1760
+ 114,
1761
+ 657,
1762
+ 413,
1763
+ 681
1764
+ ],
1765
+ "page_idx": 9
1766
+ },
1767
+ {
1768
+ "type": "text",
1769
+ "text": "Carefully check how the summary captured a linear programming problem statement.",
1770
+ "bbox": [
1771
+ 114,
1772
+ 681,
1773
+ 435,
1774
+ 703
1775
+ ],
1776
+ "page_idx": 9
1777
+ },
1778
+ {
1779
+ "type": "text",
1780
+ "text": "Important information for this task is explicit names and values of decision variables, constraints of all types, and an objective function.",
1781
+ "bbox": [
1782
+ 114,
1783
+ 705,
1784
+ 472,
1785
+ 751
1786
+ ],
1787
+ "page_idx": 9
1788
+ },
1789
+ {
1790
+ "type": "text",
1791
+ "text": "Your primary goal is to rate the summary based on Information Recall, Information Precision, Information Repetition and Readability.",
1792
+ "bbox": [
1793
+ 114,
1794
+ 752,
1795
+ 482,
1796
+ 789
1797
+ ],
1798
+ "page_idx": 9
1799
+ },
1800
+ {
1801
+ "type": "text",
1802
+ "text": "The Problem Statement:",
1803
+ "bbox": [
1804
+ 114,
1805
+ 799,
1806
+ 282,
1807
+ 810
1808
+ ],
1809
+ "page_idx": 9
1810
+ },
1811
+ {
1812
+ "type": "text",
1813
+ "text": "··",
1814
+ "bbox": [
1815
+ 115,
1816
+ 810,
1817
+ 141,
1818
+ 819
1819
+ ],
1820
+ "page_idx": 9
1821
+ },
1822
+ {
1823
+ "type": "text",
1824
+ "text": "0",
1825
+ "bbox": [
1826
+ 115,
1827
+ 822,
1828
+ 141,
1829
+ 835
1830
+ ],
1831
+ "page_idx": 9
1832
+ },
1833
+ {
1834
+ "type": "text",
1835
+ "text": "The Provided Summary:",
1836
+ "bbox": [
1837
+ 115,
1838
+ 858,
1839
+ 275,
1840
+ 869
1841
+ ],
1842
+ "page_idx": 9
1843
+ },
1844
+ {
1845
+ "type": "text",
1846
+ "text": "{1}",
1847
+ "bbox": [
1848
+ 115,
1849
+ 882,
1850
+ 141,
1851
+ 892
1852
+ ],
1853
+ "page_idx": 9
1854
+ },
1855
+ {
1856
+ "type": "text",
1857
+ "text": "111",
1858
+ "bbox": [
1859
+ 115,
1860
+ 892,
1861
+ 141,
1862
+ 900
1863
+ ],
1864
+ "page_idx": 9
1865
+ },
1866
+ {
1867
+ "type": "list",
1868
+ "sub_type": "text",
1869
+ "list_items": [
1870
+ "PROVIDETHEANSWERINAJSONFORMATWITH FOLLOWINGFIELDS:",
1871
+ "\"correct_information\" - string | information accurately captured in the summary",
1872
+ "\"missing_information\" - string | important information existing in the original problem statement but not captured in the summary.",
1873
+ "\"incorrect_information\" - string | information existing in an original problem description but wrongly/incorrectly captured in a summary",
1874
+ "\"Information Recall Score\" - int | Score from 1 to 5",
1875
+ "\"Information Precision Score\" - int | Score from 1 to 5",
1876
+ "\"Information Repetition Score\" - int | Score from 1 to 5",
1877
+ "\"Readability Score\" - int | Score from 1 to 5"
1878
+ ],
1879
+ "bbox": [
1880
+ 510,
1881
+ 86,
1882
+ 878,
1883
+ 300
1884
+ ],
1885
+ "page_idx": 9
1886
+ },
1887
+ {
1888
+ "type": "text",
1889
+ "text": "QG Agent Prompt:",
1890
+ "text_level": 1,
1891
+ "bbox": [
1892
+ 510,
1893
+ 307,
1894
+ 665,
1895
+ 324
1896
+ ],
1897
+ "page_idx": 9
1898
+ },
1899
+ {
1900
+ "type": "text",
1901
+ "text": "YOU ARE \"OptiMouse\" - A CHATBOT HELPING USERS TO FORMULATE FULL OPTIMIZATION PROBLEM STATEMENT.",
1902
+ "bbox": [
1903
+ 510,
1904
+ 326,
1905
+ 875,
1906
+ 362
1907
+ ],
1908
+ "page_idx": 9
1909
+ },
1910
+ {
1911
+ "type": "text",
1912
+ "text": "THE USER IS NOT A MATH EXPERT AND HAS NO EXPERIENCE WITH MATH AND OPTIMIZATIONS.",
1913
+ "bbox": [
1914
+ 510,
1915
+ 363,
1916
+ 838,
1917
+ 386
1918
+ ],
1919
+ "page_idx": 9
1920
+ },
1921
+ {
1922
+ "type": "text",
1923
+ "text": "DO NOT USE ANY MATHEMATICAL TERMINOLOGY OR EXPLANATIONS SUCH AS OBJECTIVE FUNCTION, CONSTRAINTS, ETC.",
1924
+ "bbox": [
1925
+ 510,
1926
+ 387,
1927
+ 845,
1928
+ 423
1929
+ ],
1930
+ "page_idx": 9
1931
+ },
1932
+ {
1933
+ "type": "text",
1934
+ "text": "GATHER NECESSARY DETAILS THAT CAN BE MAPPED TO A LINEAR PROGRAMMING MODEL.",
1935
+ "bbox": [
1936
+ 510,
1937
+ 434,
1938
+ 875,
1939
+ 457
1940
+ ],
1941
+ "page_idx": 9
1942
+ },
1943
+ {
1944
+ "type": "text",
1945
+ "text": "ENGAGE USERS BY ASKING CLEAR, CONCISE, AND SEQUENTIAL QUESTIONS TO RECEIVE INFORMATION ABOUT CONSTRAINTS AND OBJECTIVE FUNCTION.",
1946
+ "bbox": [
1947
+ 510,
1948
+ 458,
1949
+ 870,
1950
+ 492
1951
+ ],
1952
+ "page_idx": 9
1953
+ },
1954
+ {
1955
+ "type": "text",
1956
+ "text": "ASK A QUESTION BASED ON THE PREVIOUS INFORMATION THAT WILL LEAD TO GETTING A CONSTRAINT OR OTHER PARAMETER OF THE MODEL.",
1957
+ "bbox": [
1958
+ 510,
1959
+ 494,
1960
+ 875,
1961
+ 527
1962
+ ],
1963
+ "page_idx": 9
1964
+ },
1965
+ {
1966
+ "type": "text",
1967
+ "text": "THINK DEEPLY SO YOU WILL BE ABLE TO GET FULL PROBLEM DETAILS.",
1968
+ "bbox": [
1969
+ 510,
1970
+ 529,
1971
+ 843,
1972
+ 551
1973
+ ],
1974
+ "page_idx": 9
1975
+ },
1976
+ {
1977
+ "type": "text",
1978
+ "text": "ONE QUESTION ALLOWED PERMESSAGE.",
1979
+ "bbox": [
1980
+ 510,
1981
+ 552,
1982
+ 759,
1983
+ 564
1984
+ ],
1985
+ "page_idx": 9
1986
+ },
1987
+ {
1988
+ "type": "text",
1989
+ "text": "PROVIDA SUMMARY IN BULLET POINTS (SEE EXAMPLE DELIMITED BY \"===\") ONCE YOU HAVE ALL THE INFORMATION NEEDED",
1990
+ "bbox": [
1991
+ 510,
1992
+ 576,
1993
+ 867,
1994
+ 611
1995
+ ],
1996
+ "page_idx": 9
1997
+ },
1998
+ {
1999
+ "type": "text",
2000
+ "text": "DO NOT INCLUDE UNKNOWN/NON-FACTUAL CONSTRAINTS IN A SUMMARY(For example, 'There's no specific requirement on X...', 'There's no limit on X...')",
2001
+ "bbox": [
2002
+ 510,
2003
+ 612,
2004
+ 863,
2005
+ 659
2006
+ ],
2007
+ "page_idx": 9
2008
+ },
2009
+ {
2010
+ "type": "text",
2011
+ "text": "ASK A CLARIFICATION QUESTION BEFORE PROVIDING A SUMMARY TO MAKE SURE YOU HAVE ALL THE CONSTRAINTS AND AN OBJECTIVE FUNCTION.",
2012
+ "bbox": [
2013
+ 510,
2014
+ 659,
2015
+ 868,
2016
+ 695
2017
+ ],
2018
+ "page_idx": 9
2019
+ },
2020
+ {
2021
+ "type": "text",
2022
+ "text": "EXAMPLE OF A SUMMARY:",
2023
+ "bbox": [
2024
+ 510,
2025
+ 707,
2026
+ 670,
2027
+ 718
2028
+ ],
2029
+ "page_idx": 9
2030
+ },
2031
+ {
2032
+ "type": "text",
2033
+ "text": "- - - -",
2034
+ "bbox": [
2035
+ 510,
2036
+ 720,
2037
+ 544,
2038
+ 728
2039
+ ],
2040
+ "page_idx": 9
2041
+ },
2042
+ {
2043
+ "type": "list",
2044
+ "sub_type": "text",
2045
+ "list_items": [
2046
+ "- A coconut seller has to transport coconuts using either rickshaws or ox carts.",
2047
+ "- The rickshaws can take 50 coconuts each and cost $10 per trip.",
2048
+ "- The ox carts can take 30 coconuts each and cost $8 per trip.",
2049
+ "- The seller has at most $200 to spend on transporting the coconuts.",
2050
+ "- The number of rickshaws must not exceed the number of ox carts.",
2051
+ "- - - -"
2052
+ ],
2053
+ "bbox": [
2054
+ 510,
2055
+ 731,
2056
+ 853,
2057
+ 857
2058
+ ],
2059
+ "page_idx": 9
2060
+ },
2061
+ {
2062
+ "type": "text",
2063
+ "text": "START THE CONVERSATION WITH A FRIENDLY GREETING, INTRODUCING YOURSELF AND ASKING WHAT THE USER WOULD LIKE TO OPTIMISE.",
2064
+ "bbox": [
2065
+ 510,
2066
+ 872,
2067
+ 873,
2068
+ 907
2069
+ ],
2070
+ "page_idx": 9
2071
+ },
2072
+ {
2073
+ "type": "text",
2074
+ "text": "QA Agent Prompt:",
2075
+ "text_level": 1,
2076
+ "bbox": [
2077
+ 115,
2078
+ 85,
2079
+ 268,
2080
+ 99
2081
+ ],
2082
+ "page_idx": 10
2083
+ },
2084
+ {
2085
+ "type": "text",
2086
+ "text": "YOU ARE AGENT IMPersonATING THE BUSINESS OWNER MENTIONED IN THE PROBLEM STATEMENT(DELIMITED BY \\*\\*).",
2087
+ "bbox": [
2088
+ 115,
2089
+ 105,
2090
+ 485,
2091
+ 141
2092
+ ],
2093
+ "page_idx": 10
2094
+ },
2095
+ {
2096
+ "type": "text",
2097
+ "text": "BE POLITE.",
2098
+ "bbox": [
2099
+ 115,
2100
+ 142,
2101
+ 191,
2102
+ 152
2103
+ ],
2104
+ "page_idx": 10
2105
+ },
2106
+ {
2107
+ "type": "text",
2108
+ "text": "YOU(THETBUSINESSOWNER)ARE TALKING WITH AN EXPERT IN OPTIMIZATIONS.",
2109
+ "bbox": [
2110
+ 115,
2111
+ 154,
2112
+ 443,
2113
+ 175
2114
+ ],
2115
+ "page_idx": 10
2116
+ },
2117
+ {
2118
+ "type": "text",
2119
+ "text": "ACCURATELY PROVIDE INFORMATION AS REQUESTED BASED ON THE PROBLEM STATEMENT.",
2120
+ "bbox": [
2121
+ 115,
2122
+ 178,
2123
+ 443,
2124
+ 198
2125
+ ],
2126
+ "page_idx": 10
2127
+ },
2128
+ {
2129
+ "type": "text",
2130
+ "text": "MAKE SURE INFORMATION YOU PROVIDING IS CORRECT AND CAN BE FOUND IN THE PROBLEM STATEMENT.",
2131
+ "bbox": [
2132
+ 115,
2133
+ 200,
2134
+ 467,
2135
+ 223
2136
+ ],
2137
+ "page_idx": 10
2138
+ },
2139
+ {
2140
+ "type": "text",
2141
+ "text": "IF THE PROBLEM STATEMENT DOES NOT CONTAIN REQUESTED INFORMATION, SIMPLY SAY YOU DON'T KNOW THESE DETAILS. (for example, \"I'm not sure about it, can we skip this\")",
2142
+ "bbox": [
2143
+ 115,
2144
+ 224,
2145
+ 473,
2146
+ 271
2147
+ ],
2148
+ "page_idx": 10
2149
+ },
2150
+ {
2151
+ "type": "text",
2152
+ "text": "DO NOT MAKE CALCULATIONS OR INFORMATION MANIPULATING. Use facts from the problem ( for example, question: How many X are produced in a day? Answer: I'm not sure, but I know that to produce one X, we need Y minutes.)",
2153
+ "bbox": [
2154
+ 115,
2155
+ 272,
2156
+ 485,
2157
+ 342
2158
+ ],
2159
+ "page_idx": 10
2160
+ },
2161
+ {
2162
+ "type": "text",
2163
+ "text": "DO NOT MENTION THE PROBLEM STATEMENT ANYWHERE; ACT AS IF IT IS YOUR PERSONAL KNOWLEDGE.",
2164
+ "bbox": [
2165
+ 115,
2166
+ 343,
2167
+ 463,
2168
+ 365
2169
+ ],
2170
+ "page_idx": 10
2171
+ },
2172
+ {
2173
+ "type": "text",
2174
+ "text": "THE PROBLEM STATEMENT:",
2175
+ "bbox": [
2176
+ 115,
2177
+ 378,
2178
+ 282,
2179
+ 395
2180
+ ],
2181
+ "page_idx": 10
2182
+ },
2183
+ {
2184
+ "type": "text",
2185
+ "text": "0",
2186
+ "bbox": [
2187
+ 115,
2188
+ 401,
2189
+ 141,
2190
+ 417
2191
+ ],
2192
+ "page_idx": 10
2193
+ },
2194
+ {
2195
+ "type": "text",
2196
+ "text": "START THE CONVERSATION WITH A WARM GREETING",
2197
+ "bbox": [
2198
+ 115,
2199
+ 437,
2200
+ 443,
2201
+ 449
2202
+ ],
2203
+ "page_idx": 10
2204
+ }
2205
+ ]
2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17461/04ea482d-8fa3-46a8-81ee-6753442c6b74_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9518bf4780877f6d86f615963dbfe389832574736145cfd281a53661427341a9
3
+ size 274268
2401.17xxx/2401.17461/full.md ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Synthetic Dialogue Dataset Generation using LLM Agents
2
+
3
+ Yelaman Abdullin and Diego Molla-Aliod
4
+
5
+ Macquarie University
6
+
7
+ yelaman.abdullin@hdr.mq.edu.au, diego.molla-aliod@mq.edu.au
8
+
9
+ # Bahadorreza Ofoghi and John Yearwood
10
+
11
+ Deakin University
12
+
13
+ {b.ofoghi,john.yearwood}@deakin.edu.au
14
+
15
+ # Qingyang Li
16
+
17
+ The University of Melbourne
18
+
19
+ q15@student.unimelb.edu.au
20
+
21
+ # Abstract
22
+
23
+ Linear programming (LP) problems are pervasive in real-life applications. However, despite their apparent simplicity, an untrained user may find it difficult to determine the linear model of their specific problem. We envisage the creation of a goal-oriented conversational agent that will engage in conversation with the user to elicit all information required so that a subsequent agent can generate the linear model. In this paper, we present an approach for the generation of sample dialogues that can be used to develop and train such a conversational agent. Using prompt engineering, we develop two agents that "talk" to each other, one acting as the conversational agent, and the other acting as the user. Using a set of text descriptions of linear problems from NL4Opt available to the user only, the agent and the user engage in conversation until the agent has retrieved all key information from the original problem description. We also propose an extrinsic evaluation of the dialogues by assessing how well the summaries generated by the dialogues match the original problem descriptions. We conduct human and automatic evaluations, including an evaluation approach that uses GPT-4 to mimic the human evaluation metrics. The evaluation results show an overall good quality of the dialogues, though research is still needed to improve the quality of the GPT-4 evaluation metrics. The resulting dialogues, including the human annotations of a subset, are available to the research community. The conversational agent used for the generation of the dialogues can be used as a baseline.
24
+
25
+ # 1 Introduction
26
+
27
+ Linear programming (LP) is a mathematical optimization technique widely employed to address a multitude of real-world challenges, ranging from resource allocation in supply chain management to portfolio optimization in finance. Despite the ubiquity of LP problems and their practical importance,
28
+
29
+ many individuals, particularly those without specialized mathematical backgrounds, often struggle to formulate the appropriate linear models for their specific problem instances. This barrier hinders the broader utilization of LP techniques, especially among non-experts.
30
+
31
+ To mitigate this challenge, we propose the development of a goal-oriented conversational agent capable of assisting users in constructing accurate linear models for their unique problem scenarios. This conversational agent would engage users in a dialogue, eliciting relevant information pertaining to the problem, and subsequently generate the corresponding linear model. This paper focuses on an essential aspect of creating such an agent — the generation of synthetic dialogues that can be employed to train and evaluate the conversational agent's performance.
32
+
33
+ Our methodology leverages prompt engineering to construct two distinct agents: one simulating the conversational agent's behavior, and the other emulating the user's responses during problem-solving interactions. The agents are designed to engage in purposeful dialogues aimed at extracting the necessary information from the user to construct a valid linear model. To facilitate this process, we utilize a set of text descriptions of linear problems, accessible only to the user agent, sourced from the NL4Opt dataset (Ramamonjison et al., 2022, 2023). These text descriptions serve as the basis for the dialogues and enable the conversational agent to iteratively gather the critical information required for problem formulation.
34
+
35
+ In addition to the generation process, we propose an extrinsic evaluation approach for assessing the quality and effectiveness of the generated dialogues. Specifically, we evaluate how well the summaries generated by the dialogues align with the original problem descriptions from NL4Opt. This evaluation encompasses both human assessments, where human judges evaluate the quality
36
+
37
+ of dialogue summaries, and automated metrics to quantitatively measure the informativeness of the generated summaries.
38
+
39
+ Our preliminary results from human and automatic evaluations indicate that the generated dialogues exhibit a high degree of fidelity to the original problem descriptions, thereby demonstrating the quality of the synthetic dialogues generated.
40
+
41
+ The contributions of this paper are:
42
+
43
+ 1. An approach for the generation of dialogues for the development of goal-oriented conversational agents. In this paper, the goal consists of eliciting information from the user in order to generate a linear programming model, noting that the techniques presented here can be adapted to other goals.
44
+ 2. A dataset of 476 dialogues for the development of such a conversational agent, of which 28 have been annotated manually. $^{1}$ Even though these 476 dialogues are generated automatically, since the generation process is non-deterministic, separate runs of the same program will generate different dialogues. For this reason, we consider that these dialogues form a useful dataset for the research community to facilitate reproducibility.
45
+ 3. An extrinsic evaluation approach based on comparing the summaries generated by the dialogue, with the original problem description.
46
+ 4. An automatic evaluation approach using GPT-4 that mimics the behavior of human evaluation.
47
+
48
+ # 2 Background and Related Work
49
+
50
+ # 2.1 Linear Programming and NL4Opt
51
+
52
+ LP problems are pervasive in real-life applications. They are commonly utilized for resource allocation, planning, scheduling, transportation optimization, portfolio management, and numerous other areas. For instance, in production planning, LP can help determine how to use limited human, material, and financial resources to achieve maximum economic benefits.
53
+
54
+ LP problems are a class of mathematical optimization problems where the goal is to find a set of
55
+
56
+ values for the decision variables that satisfies a set of linear constraints and maximizes or minimizes the value of a linear objective function (Chen et al., 2011). The general form of an LP problem can be formulated as follows,
57
+
58
+ Maximize $\mathbf{c}^T\mathbf{x}$
59
+
60
+ subject to $\mathbf{A}\mathbf{x}\leq \mathbf{b}$
61
+
62
+ $$
63
+ \mathbf {x} \geq \mathbf {0}
64
+ $$
65
+
66
+ where $\mathbf{x}$ is a vector of decision variables $\in \mathbb{R}^n$ , $\mathbf{c}$ and $\mathbf{b}$ are given vectors of constants, and $\mathbf{A}$ is a given matrix of constants. Linear programming is widely applicable in various domains due to its ability to model real-world optimization challenges and the availability of efficient solution algorithms. Once an LP model has been formulated for a problem, powerful solvers driven by efficient algorithms can help one to solve it, even for surprisingly complicated and large-scale problems.
67
+
68
+ In 2022, a competition to extract linear programming formulations from natural language (NL4Opt) developed the first dataset of linear programming word problems (Ramamonjison et al., 2022, 2023). It contains 1101 instances from various domains. These LP problems focus on a variety of common contexts such as production, resource allocation, investment allocation, agriculture, transportation, health sciences, sales, etc. Each instance has an unstructured natural language description of the LP problem involving decision variables, one objective function, and several constraints.
69
+
70
+ The NL4Opt dataset provides valuable examples of real-world natural language descriptions for LP problems, showcasing a range of constraint types. We deconstruct each problem description in the development subset into an objective function description and several constraint descriptions. By analyzing these constraints, we find that they can be categorized into nine refined classes in Table 1. Different instances of the dataset consist of different combinations of these constraint types. Constraint types 1 to 3 and 5 to 7 are frequently used to represent capacity limits, budget constraints, or resource availability. Types 4, 8, and 9 impose ratio control and balancing between different quantities. These nine types of constraints are often encountered in real-world problems and can encompass a substantial portion of common constraints.
71
+
72
+ # 2.2 Evaluation Methods
73
+
74
+ Evaluation of dialogue systems is a complex endeavor, typically involving a blend of automated
75
+
76
+ <table><tr><td></td><td>Constraint type</td><td>Math inequality</td></tr><tr><td>1</td><td>Upper bound on single variable</td><td>xi≤b</td></tr><tr><td>2</td><td>Upper bound on sum of vari-ables</td><td>∑ixi≤b</td></tr><tr><td>3</td><td>Upper bound on weighted sum of variables</td><td>∑iaixi≤b</td></tr><tr><td>4</td><td>Upper bound on proportion</td><td>xj≤c∑ixi</td></tr><tr><td>5</td><td>Lower bound on single variable</td><td>xi≥b</td></tr><tr><td>6</td><td>Lower bound on sum of vari-ables</td><td>∑ixi≥b</td></tr><tr><td>7</td><td>Lower bound on weighted sum of variables</td><td>∑iaixi≥b</td></tr><tr><td>8</td><td>Lower bound on proportion</td><td>xj≥c∑ixi</td></tr><tr><td>9</td><td>Comparison constraints</td><td>dxi≤xj</td></tr></table>
77
+
78
+ Table 1: Classification of constraints. Suppose an LP problem has $n$ decision variables, ${x}_{i}$ and ${x}_{j}$ are decision variables, ${a}_{i}$ and $b$ are nonnegative constants, $d$ is a positive constant,and $c$ is a constant $\in \left( {0,1}\right\rbrack ,i,j \in I =$ $\{ 1,2,\ldots ,n\}$ .
79
+
80
+ metrics and human assessments. A traditional automated metric such as ROUGE (Lin, 2004) is frequently employed for measuring textual similarity and evaluating information overlap. However, this metric could be improved in its ability to evaluate semantic coherence and the effective fulfillment of dialogue goals (Liu et al., 2016). In this work, we calculated ROUGE-1, ROUGE-2, and ROUGE-L scores to cover unigram, bigram, and longest common subsequence overlaps, respectively.
81
+
82
+ Recent advancements have directed more sophisticated evaluation methods, including BERTScore (Zhang et al., 2020), which leverages contextual embeddings from pre-trained BERT models to assess semantic similarity between generated and reference text. BERTScore complements ROUGE by adding a more nuanced semantic layer to the evaluation, capturing aspects that traditional metrics may miss.
83
+
84
+ Moreover, the rise of Large Language Models (LLMs) has further enriched the toolkit for text generation evaluation (Fu et al., 2023; Liu et al., 2023). LLMs, with their ability for nuanced analysis, offer possibilities beyond syntactic and surface-level metrics. They can potentially capture deeper aspects of dialogue semantics and goal alignment.
85
+
86
+ # 3 Methodology
87
+
88
+ This section outlines the methods employed to accomplish two objectives of this study: automating the generation of dialogue datasets, and evaluating the quality of the generated dialogues. We utilize a dual-agent setup leveraging LLM, in our case, OpenAI's GPT-4 (OpenAI, 2023), to simulate a
89
+
90
+ conversation between a user and an assistant focusing on linear programming problems.
91
+
92
+ # 3.1 Dual-Agent LLM Setup
93
+
94
+ The dual-agent setup aims to model a conversation between an automatic conversational agent who asks questions to a user, with the aim to identify all key information of the linear problem to model. Specifically, the setup comprises two distinct LLM agents: a Question Generation Agent and a Question Answering Agent who "talk" to each other.
95
+
96
+ - Question Generation (QG) Agent: The primary objective of this agent is to ask questions that will enable it to elicit and summarize the key information of the original problem statement. Importantly, this agent does not have access to the original problem statement and must rely solely on its interactions with the QA Agent to gather all necessary information.
97
+ - Question Answering (QA) Agent: This agent is designed to answer questions based on a pre-defined problem statement from NL4Opt, which serves as the simulated knowledge base for the assistant. To make the dialogue more natural and contextually grounded, the agent is configured to impersonate the individual mentioned in the original problem statement. This design choice enhances the genuineness of the interaction, creating a more realistic user-agent dialogue experience.
98
+
99
+ # 3.2 Implementation Details
100
+
101
+ An essential part of the QA Agent is a component (which also leverages LLM) that compares a summary with an original problem statement. This component's role is twofold: to provide feedback when discrepancies are found between the provided summary and the original problem statement, and to signal the termination of dialogue generation if not. To detect that the QG has generated a summary in the latest dialogue turn, we employed a straightforward approach using regular expressions and predefined keywords.
102
+
103
+ In order to maintain consistency and guide the LLM in its responses, prompts were injected throughout the dialogue. Aside from the initial system prompt, which always begins the conversation, subsequent prompts were introduced at each turn and retained in the dialogue's history. For the QG Agent, a succinct instruction, A NEXTMESSAGE/QUESTION was provided. For the QA
104
+
105
+ Agent, the prompts varied based on the dialogue stage. Mostly, the instruction ANSWER SHORTLY. USE MAXIMUM 30 WORDS. was used. However, when the QG agent provided a summary, the prompt adjusted accordingly: either feedback on summary², or THE SUMMARY ACCEPTED. IT'S TIME TO FINISH DIALOG AND SAY GOODBYE, signaling the end of the conversation.
106
+
107
+ We utilized the dataset of linear programming problems from the NL4Opt competition (Ramamonjison et al., 2023), specifically from subtask 2. The task is designed to take a natural language problem description, labeled semantic entities, and an ordered mapping of variable mentions, and then formulate a precise meaning representation. These problem descriptions served as the basis for the simulated conversations between the two LLM agents in this work.
108
+
109
+ - Development Subset: For this subset, consisting of 98 problems. Two to four separate dialogues were generated per problem.
110
+ - Training Subset: For the training subset, a single dialogue was generated for the first 241 problems.
111
+
112
+ The dual-agent system was implemented using OpenAI's Chat Completion API<sup>3</sup>. We generated the dialogues using different temperature settings for the development subset to ensure a variety of dialogues. The aim was to generate multiple dialogue instances for each problem so that it is possible to robustly evaluate the model's performance and assess the diversity in the generated dataset in subsequent research.
113
+
114
+ Figure 1 shows part of a sample dialogue, and figure 2 shows the associated problem statement. Appendix B shows the actual prompts used.
115
+
116
+ # 4 Evaluation Strategy
117
+
118
+ To evaluate the quality of the generated dialogues, we conduct an extrinsic evaluation. The evaluation process focuses on an underlying hypothesis that serves as the core of our study: a correct summary generated by the QG Agent would imply that the agent has posed the appropriate and necessary questions to restore the original problem statement effectively. The hypothesis is grounded in several key assumptions and logical deductions:
119
+
120
+ QG Agent: Hello there! I'm OptiMouse, your friendly helper. I'm here to assist you in making the best decision for your situation. Could you please tell me what you're trying to achieve or improve?
121
+ QA Agent: Hello! I'm looking to maximize profit for my furniture store while considering space, capital, and product ratio constraints.
122
+ QG Agent: That sounds like a great goal! To help you better, could you please tell me what types of furniture you sell in your store?
123
+ QA Agent: We sell dining tables and chairs in our store.
124
+ QG Agent: That's great! Now, could you please tell me how much profit you make from selling a dining table and how much from selling a chair?
125
+ QA Agent: We make a profit of $350 from selling a dining table and$ 75 from selling a chair.
126
+ QG Agent: That's helpful to know. Now, could you please tell me how much space a dining table and a chair take up in your store?
127
+ QA Agent: A dining table requires 8 sq ft of floor space while a chair requires 2 sq ft.
128
+
129
+ 1 1
130
+ 1 1
131
+
132
+ QG Agent: That's a very important detail. Now, could you please tell me if there are any other constraints or requirements that we need to consider for your furniture store?
133
+
134
+ QA Agent: No, there are no other constraints or requirements for my furniture store.
135
+ QG Agent: Great! Let me summarize the information:
136
+
137
+ - You sell dining tables and chairs in your furniture store
138
+ - You make a profit of $350 from selling a dining table and$ 75 from selling a chair.
139
+ - A dining table requires 8 sq ft of floor space while a chair requires 2 sq ft.
140
+ - You have 500 sq ft of space available in your store.
141
+ - You have a maximum of $20000 available to purchase the dining tables and chairs.
142
+ - A dining table costs you $1000 to purchase and a chair costs $150.
143
+ - At least $70\%$ of all furniture in the store must be chairs.
144
+
145
+ Is there anything else you would like to add or modify in this summary?
146
+
147
+ Figure 1: A sample dialogue between a QG Agent and a QA Agent about maximizing furniture store profits.
148
+
149
+ A furniture store only stocks and sells dining tables and chairs. The profit per dining table is $350 and the profit per chair is $75. There is 500 sq ft of space available and a dining table requires 8 sq ft of floor space while a chair requires 2 sq ft. Because chairs sell in larger quantities, at least 70% of all furniture in the store must be chairs. In terms of capital, a dining table ties up $1000 in capital and a chair ties up $150 in capital. The company wants a maximum of $20000 worth of capital tied up at any time. Formulate an LP to maximize profit.
150
+
151
+ Figure 2: A sample problem statement from the NL4Opt dataset.
152
+
153
+ Extrinsic evaluation. In dialogue systems, particularly goal-oriented systems, the dialogue often progresses through a series of questions and answers to reach a particular end state (Qi et al., 2020; Majumder et al., 2021). Therefore, the questions asked directly impact the quality and accuracy of the final output — here, the summary of the original problem statement. We, therefore, conduct an extrinsic evaluation of the dialogues.
154
+
155
+ Incomplete Initial Information. The QG Agent starts with incomplete information about the problem at hand. Therefore, asking the right questions is crucial for the agent to gather enough information for an accurate summary. A poor or incomplete summary would suggest that the agent has asked insufficient or incorrect questions.
156
+
157
+ Semantic Understanding. An accurate summary involves not just factual correctness but also a semantic understanding of the problem's constraints and objectives. Therefore, correct summarization can be viewed as an implicit validation of the agent's ability to grasp the problem's complexities through its questions.
158
+
159
+ To validate these assumptions, we use an approach involving human evaluation as well as automated evaluation through a third LLM agent of generated summaries. By evaluating the correctness of the generated summaries, we thus indirectly assess the QG Agent's ability to pose appropriate and informative questions that lead to a comprehensive understanding of the problem.
160
+
161
+ # 4.1 Automatic Evaluation
162
+
163
+ For the automated evaluation, in addition to the well-known ROUGE and BERTScore, we designed a metric that employed a third LLM (GPT-4) agent to compare the original problem statement with the generated summary. The evaluation was conducted
164
+
165
+ <table><tr><td>Metric</td><td>Value</td></tr><tr><td>ROUGE-1 P</td><td>0.54</td></tr><tr><td>ROUGE-1 R</td><td>0.62</td></tr><tr><td>ROUGE-1 F1</td><td>0.57</td></tr><tr><td>ROUGE-2 P</td><td>0.33</td></tr><tr><td>ROUGE-2 R</td><td>0.39</td></tr><tr><td>ROUGE-2 F1</td><td>0.35</td></tr><tr><td>ROUGE-L P</td><td>0.38</td></tr><tr><td>ROUGE-L R</td><td>0.43</td></tr><tr><td>ROUGE-L F1</td><td>0.40</td></tr><tr><td>BERTscore P</td><td>0.88</td></tr><tr><td>BERTscore R</td><td>0.91</td></tr><tr><td>BERTscore F1</td><td>0.90</td></tr><tr><td>GPT-4 R</td><td>4.60</td></tr><tr><td>GPT-4 P</td><td>4.62</td></tr></table>
166
+
167
+ Table 2: Average values of metrics per entire dataset. All values range from 0 to 1 except GPT-4, which ranges from 1 to 5.
168
+
169
+ using a "chain-of-thought" prompting (Wei et al., 2022). Our agent employs three criteria ("chains") to evaluate each summary:
170
+
171
+ - Correct Information: Assessing if the summary accurately represents the facts in the original problem statement.
172
+ - Incorrect Information: Identifying any factual inaccuracies or misleading statements in the summary.
173
+ - Missing Information: Determining if any crucial elements from the original problem statement are bypassed in the summary.
174
+
175
+ Considering the three criteria above, the agent provides an "Information Recall Score", "Information Precision Score", "Information Repetition Score", and "Readability Score" to judge if the summary forms a coherent and accurate representation of the original problem. These evaluation metrics are the same as the human evaluation conducted in Section 4.2. Appendix B shows the prompts used.
176
+
177
+ # 4.1.1 Results of the Automatic Evaluation
178
+
179
+ Table 2 summarizes the average metric values across the entire dataset. As we can observe, the absolute values of ROUGE are not very high. However, as we will see below, human evaluation of a subset of the data reveals that the dialogues have generated good summaries in general.
180
+
181
+ <table><tr><td>Name</td><td>Value</td></tr><tr><td>Total number of dialogues</td><td>476</td></tr><tr><td>Dialogues with temperature 0</td><td>315</td></tr><tr><td>Dialogues with temperature 1</td><td>149</td></tr><tr><td>Dialogues with summary</td><td>97%</td></tr><tr><td>Average dialogue length (characters)</td><td>3658</td></tr><tr><td>Total number of turns</td><td>9480</td></tr><tr><td>Average number of turns per dialogue</td><td>20</td></tr><tr><td>Average turn length (characters)</td><td>184</td></tr></table>
182
+
183
+ To provide a more nuanced understanding of our generated dialogues, we have collected some summary statistics. These are presented in Table 3. The dialogues were generated with varying temperature settings to control the randomness of the text generated by the LLMs. In a small number of cases (3%), the dialogue was not able to generate a summary before the cut-off of 40 turns (20 turns for each agent). The high percentage of dialogues with a summary (97%) indicates the Question Generation Agent's effectiveness in concluding the dialogues with a summary, which is crucial for our evaluation. Dialogue length and number of turns give an indication of the depth and extent of the conversations between the agents.
184
+
185
+ # 4.2 Human Evaluation
186
+
187
+ For the human evaluation component, we carefully curated a subset of 28 problem statements from the development subset. This subset was selected to cover all constraint types represented in the development data (Table 1), ensuring a comprehensive evaluation across diverse problem scenarios.
188
+
189
+ Given the small base of problems (98), the selection of 28 problems was simple: first, apply a greedy approach that satisfies the quantity requirements of the types with fewer counts, and then manually swap selected candidates with other candidates from the development set as appropriate, until we reached an acceptable distribution of constraint types in the selection. Table 4 shows the distribution of constraint types in the development set and the selection used for human evaluation.
190
+
191
+ To add an element of variability in dialogue generation, each problem statement in this selection was subjected to a single dialogue generation run. The temperature setting for this run was randomly selected to be either 0 or 1.
192
+
193
+ Four evaluators (details in the Acknowledge-
194
+
195
+ Table 3: Summary statistics of generated dialogues.
196
+
197
+ <table><tr><td></td><td>Constraint Type</td><td>Dev</td><td>Sel</td></tr><tr><td>1</td><td>Upper bound on single variable</td><td>20</td><td>6</td></tr><tr><td>2</td><td>Upper bound on sum of variables</td><td>12</td><td>4</td></tr><tr><td>3</td><td>Upper bound on weighted sum of
198
+ variables</td><td>93</td><td>28</td></tr><tr><td>4</td><td>Upper bound on proportion</td><td>8</td><td>2</td></tr><tr><td>5</td><td>Lower bound on single variable</td><td>35</td><td>11</td></tr><tr><td>6</td><td>Lower bound on sum of variables</td><td>7</td><td>2</td></tr><tr><td>7</td><td>Lower bound on weighted sum of
199
+ variables</td><td>59</td><td>18</td></tr><tr><td>8</td><td>Lower bound on proportion</td><td>15</td><td>5</td></tr><tr><td>9</td><td>Comparison</td><td>43</td><td>13</td></tr></table>
200
+
201
+ Table 4: Counts of types of linear optimization constraints in the development set of 98 problems ("Dev" column), and the selection of 28 problems ("Sel" column). The sum is larger than the number of problems since a problem may have multiple constraints.
202
+
203
+ <table><tr><td>Metric</td><td>Fleiss&#x27; Kappa</td></tr><tr><td>Information Recall</td><td>0.205</td></tr><tr><td>Information Precision</td><td>0.387</td></tr><tr><td>Information Repetition</td><td>-0.009</td></tr><tr><td>Readability</td><td>0.235</td></tr></table>
204
+
205
+ Table 5: Inter-annotator agreement of each of the 4 human evaluation metrics.
206
+
207
+ ments section) then scored how well the summary generated at the end of the dialogue matches the problem statement. For every pair of a problem statement and a generated summary, each evaluator produced the following 4 evaluation metrics. These metrics have been adopted from the human evaluation performed by Tsatsaronis et al. (2015):
208
+
209
+ - Information recall (IR) (1-5) - All the necessary information is in the generated summary.
210
+ - Information precision (IP) (1-5) - No irrelevant information is generated.
211
+ - Information repetition (IRep) (1-5) - The generated summary does not repeat the same information multiple times.
212
+ - Readability (Read) (1-5) - The generated summary is easily readable and fluent.
213
+
214
+ # 4.2.1 Results of the Human Evaluation
215
+
216
+ Inter-annotator agreement of each of the 4 human evaluation metrics, as computed by Fleiss' Kappa, is shown in Table 5.
217
+
218
+ We observe virtually no agreement in Information Repetition, slight agreement in Information Recall and Readability, and Fair agreement in Information Precision.
219
+
220
+ <table><tr><td>Annotator</td><td>IR</td><td>IP</td><td>IRep</td><td>Read</td></tr><tr><td>1</td><td>4.25</td><td>4.25</td><td>4.89</td><td>4.96</td></tr><tr><td>2</td><td>4.18</td><td>4.54</td><td>4.93</td><td>4.96</td></tr><tr><td>3</td><td>4.68</td><td>4.39</td><td>4.93</td><td>4.86</td></tr><tr><td>4</td><td>4.03</td><td>4.36</td><td>4.82</td><td>4.89</td></tr><tr><td>All</td><td>4.29</td><td>4.38</td><td>4.89</td><td>4.92</td></tr></table>
221
+
222
+ Table 6 shows the average values of the human evaluation scores for the selection of 28 problems. Overall, the human evaluation showed high values, and very high values for Information Repetition and Readability. This suggests that the dialogues generated by the pair of agents are of good quality.
223
+
224
+ The human annotators observed the following most common mistakes in the generated summaries. See Appendix A for examples of each.
225
+
226
+ - Missing objective function or decision variables.
227
+ - Inclusion of additional information that seems to be from answers to the agent asking clarifying questions, such as "otherwise the order does not matter", or "there is no upper limit on costs", etc.
228
+
229
+ # 4.3 Correlation Analysis of Automatic and Human Evaluations
230
+
231
+ Table 7 shows Spearman's rank correlation coefficient $\rho$ between the automatic and human evaluations. The table compares recall values of the automatic metrics against Information Recall, precision values against Information Precision, and F1 values against the harmonic mean of Information Recall and Information Precision. The last column of the table shows the correlation between the F1 values of the automatic metrics and the average of Information Recall, Information Precision, Information Repetition, and Readability. We can observe nearly identical values to the harmonic mean of Information Recall and Information Precision. The reason for this may be that the annotations for Information Repetition and Readability are nearly always 5, so their contribution is almost a constant value that does not change the rankings, so they do not affect the values of $\rho$ . This may be a consequence of
232
+
233
+ Table 6: Average human evaluation scores for the sample of 28 documents. IR = Information Recall. IP = Information Precision. IRep = Information Repetition. Read = Readability.
234
+
235
+ <table><tr><td>Metric</td><td>IR</td><td>IP</td><td>IF1</td><td>IAvg</td></tr><tr><td>ROUGE-1 R</td><td>0.43</td><td></td><td></td><td></td></tr><tr><td>ROUGE-1 P</td><td></td><td>0.58</td><td></td><td></td></tr><tr><td>ROUGE-1 F1</td><td></td><td></td><td>0.62</td><td>0.60</td></tr><tr><td>ROUGE-2 R</td><td>0.48</td><td></td><td></td><td></td></tr><tr><td>ROUGE-2 P</td><td></td><td>0.58</td><td></td><td></td></tr><tr><td>ROUGE-2 F1</td><td></td><td></td><td>0.56</td><td>0.57</td></tr><tr><td>ROUGE-L R</td><td>0.47</td><td></td><td></td><td></td></tr><tr><td>ROUGE-L P</td><td></td><td>0.74</td><td></td><td></td></tr><tr><td>ROUGE-L F1</td><td></td><td></td><td>0.71</td><td>0.69</td></tr><tr><td>BERTScore R</td><td>0.53</td><td></td><td></td><td></td></tr><tr><td>BERTScore P</td><td></td><td>0.74</td><td></td><td></td></tr><tr><td>BERTScore F1</td><td></td><td></td><td>0.65</td><td>0.65</td></tr><tr><td>GPT-4 R</td><td>0.42</td><td></td><td></td><td></td></tr><tr><td>GPT-4 P</td><td></td><td>0.67</td><td></td><td></td></tr><tr><td>GPT-4 F1</td><td></td><td></td><td>0.59</td><td>0.58</td></tr></table>
236
+
237
+ Table 7: Spearman's rank correlation coefficient $\rho$ between the automatic and human evaluations. $\mathrm{IP} =$ correlation with Information Recall; $\mathrm{IR} =$ correlation with Information Precision; $\mathrm{IF1} =$ Correlation with the Harmonic mean of IR and IP; $\mathrm{IAvg} =$ Correlation with the average of Information Recall, Information Precision, Repetition, and Readability.
238
+
239
+ using GPT-4 and the carefully designed prompts, which instruct the system to be clear and concise.
240
+
241
+ Among all automatic metrics, Table 7 shows that the best correlation values are for ROUGE-L. GPT-4 achieved competitive results but did not outperform the other metrics. Further work is needed to improve the use of GPT-4. In particular, GPT-4 usually was more generous and would give higher ratings than the human evaluators would. The inclusion of few-shot samples, and more sophisticated prompts, might help the system align with the human annotators.
242
+
243
+ # 5 Summary and Conclusion
244
+
245
+ This paper presents a dataset for the task of eliciting information from the user through a dialogue with a conversation agent. The specific use of the information elicited is for automatic modeling of linear optimization problems. This is per se a very useful task with broad potential applications, but the methods for data generation and evaluation proposed here can be adopted easily for other possible tasks. The data and human evaluations are available to the research community. $^1$
246
+
247
+ The dialogue was generated in a dual-agent LLM
248
+
249
+ setup where a question generation agent acted as the machine agent who elicited information, and a question answering agent acted as the human who had the information about the problem to model. The question generation agent can be used as a baseline agent. The human evaluation results indicate that this baseline may be effective for the task.
250
+
251
+ The dialogues for a subset of 28 LP problems were evaluated using an extrinsic evaluation that judged whether summaries generated by the dialogues matched the key information from the original problem descriptions. The evaluation was conducted by human evaluators and automatically. Among the automatic evaluation metrics, besides well-known automatic metrics ROUGE and BERTScore, we designed another GPT-4 agent that mimicked the human evaluators. The results indicate a reasonable correlation between ROUGE L, BERTScore P, and the average human information precision scores, and this is slightly better than the correlation between the GPT4 agent and the human IP scores.
252
+
253
+ As further work, we intend to refine the prompts used for the evaluation approach with GPT-4. In addition, we will conduct more exhaustive types of evaluation on the data set that might be more suitable to the specific domain of linear programming modeling. In particular, we plan to analyse the generated dialogues at the level of the dialogue turns.
254
+
255
+ # Acknowledgements
256
+
257
+ This work was partially funded by the Australian Research Council, Australia through the Discovery Project 2022 (grant number DP220101925). In addition to two authors of this paper who have annotated the data (Diego Molla-Aliod and John Yearwood), we acknowledge the annotations provided by Vicky Mak-Hau and Thuseethan Selvarajah.
258
+
259
+ # References
260
+
261
+ Der-San Chen, Robert Batson, and Yu Dang. 2011. Applied Integer Programming: Modeling and Solution.
262
+ Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. 2023. GPTScore: Evaluate as you desire.
263
+ Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
264
+
265
+ Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Noseworthy, Laurent Charlin, and Joelle Pineau. 2016. How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2122-2132, Austin, Texas. Association for Computational Linguistics.
266
+ Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-Eval: NLG evaluation using GPT-4 with better human alignment.
267
+ Bodhisattwa Prasad Majumder, Sudha Rao, Michel Galley, and Julian McAuley. 2021. Ask what's missing and what's useful: Improving clarification question generation using global knowledge. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4300-4312, Online. Association for Computational Linguistics.
268
+ OpenAI. 2023. GPT-4 technical report. ArXiv, abs/2303.08774.
269
+ Peng Qi, Yuhao Zhang, and Christopher D. Manning. 2020. Stay hungry, stay focused: Generating informative and specific questions in information-seeking conversations. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 25-40, Online. Association for Computational Linguistics.
270
+ Rindra Ramamonjison, Haley Li, Timothy Yu, Shiqi He, Vishnu Rengan, Amin Banitalebi-dehkordi, Zirui Zhou, and Yong Zhang. 2022. Augmenting operations research with auto-formulation of optimization models from problem descriptions. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track, pages 29-62, Abu Dhabi, UAE. Association for Computational Linguistics.
271
+ Rindranirina Ramamonjison, Timothy T. Yu, Raymond Li, Haley Li, Giuseppe Carenini, Bissan Ghaddar, Shiqi He, Mahdi Mostajabdaveh, Amin Banitalebi-Dehkordi, Zirui Zhou, and Yong Zhang. 2023. NL4Opt competition: Formulating optimization problems based on their natural language descriptions.
272
+ George Tsatsaronis, Georgios Balikas, Prodromos Malakasiotis, Ioannis Partalas, Matthias Zschunke, Michael R Alvers, Dirk Weissenborn, Anastasia Krithara, Sergios Petridis, Dimitris Polychronopoulos, Yannis Almirantis, John Pavlopoulos, Nicolas Baskiotis, Patrick Gallinari, Thierry Artieres, Axel-Cyrille Ngonga Ngomo, Norman Heino, Eric Gaussier, Liliana Barrio-Alvers, Michael Schroeder, Ion Androutsopoulos, and Georgios Paliouras. 2015. An overview of the BIOASQ large-scale biomedical semantic indexing and question answering competition. BMC Bioinformatics, 16(1):138.
273
+
274
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc.
275
+
276
+ Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating text generation with BERT.
277
+
278
+ # A Sample Generated Summaries with Mistakes
279
+
280
+ The following generated summary did not include the objective function:
281
+
282
+ # Problem statement:
283
+
284
+ A suit store makes 2-piece and 3-piece suits. To make a 2-piece suit requires 1 hour of measuring and 4 hours of stitching. To make a 3-piece suit requires 2 hours of measuring and 5 hours of stitching. The store has 150 hours of measuring time and 300 hours of stitching time available. In order to stay in business, at least 40 2-piece suites should be made. The profit per 2-piece suit is $600 and the profit per 3-piece suit is$ 800. Formulate a LP to maximize profit?
285
+
286
+ # Generated summary:
287
+
288
+ Alright, thanks for the information! To make sure I've got everything, here's a summary of your situation:
289
+
290
+ - You manufacture two types of suits: 2-piece suits and 3-piece suits.
291
+ - A 2-piece suit makes a profit of $600, requires 1 hour of measuring time and 4 hours of stitching time.
292
+ - A 3-piece suit makes a profit of $800, requires 2 hours of measuring time and 5 hours of stitching time.
293
+ - You have 150 hours of measuring time and 300 hours of stitching time available.
294
+ - At least 40 2-piece suits must be produced, but there is no minimum requirement for 3-piece suits.
295
+ - There is no limit on how many suits you can sell.
296
+
297
+ Have I missed out on anything or is there any other specific requirement or constraint that I should know about?
298
+
299
+ The following generated summary did not mention decision variables:
300
+
301
+ # Problem statement:
302
+
303
+ A chemical company makes an acidic liquid and a basic liquid using two types of techniques, reaction $X$ and reaction $Y$ . Reaction $X$ can make 35 au of the acidic liquid and 75 au of the basic liquid per minute. Reaction $Y$ can make 42 au of the acidic liquid and 35 au of the basic liquid per minute. Furthermore, reaction $X$ requires 18 units of heat every minute while reaction $Y$ requires 12 units of heat every minute. The chemical company has the capacity to generate at most 1400 units of heat every minute. If the chemical company needs to make at least 900 au of the acidic liquid and 1200 au of the basic liquid per minute, how many minutes should each reaction be run for to minimize the total time needed?
304
+
305
+ # Generated summary:
306
+
307
+ Great! Let's summarize the information:
308
+
309
+ - Your company has two types of reactions, $X$ and $Y$ , that produce acidic and basic liquids.
310
+ - Reaction $X$ requires 18 units of heat per minute and produces 35 au of acidic and 75 au of basic liquid.
311
+ - Reaction $Y$ requires 12 units of heat per minute and produces 42 au of acidic and 35 au of basic liquid.
312
+ - Your company can generate at most 1400 units of heat every minute.
313
+ - You need to produce at least 900 au of the acidic liquid and 1200 au of the basic liquid per minute.
314
+ - Your goal is to meet these production requirements in the shortest time possible.
315
+
316
+ Is there anything else that I should know or did I miss anything?
317
+
318
+ The following summary has additional information that makes sense but is not in the original problem description:
319
+
320
+ # Problem statement:
321
+
322
+ A spicy paste is to be made from units of peppers and tomatoes and is to contain
323
+
324
+ a minimum of 130 units of spice and 90 units of flavor. Peppers cost $8 per unit and tomatoes cost$ 5 per unit. One unit of peppers contains 12 units of spice and 2 units of flavor. One unit of tomatoes contains 1 unit of spice and 8 units of flavor. Formulate this as an LP and find the minimum cost spicy paste that can be made.
325
+
326
+ # Generated summary:
327
+
328
+ Thanks for the information. Based on your responses, here's your situation as I understand:
329
+
330
+ - You're making a spicy paste usingpeppersandtomatoes.
331
+ - Each unit of peppers costs $8, contributes 12 units of spice and 2 units of flavor.
332
+ - Each unit of tomatoes costs $5, contributes 1 unit of spice and 8 units of flavor.
333
+ - Each batch of spicy paste should contain at least 130 units of spice and 90 units of flavor.
334
+ - There are no restrictions on the maximum units of peppers and tomatoes you can use.
335
+ - There's no specific budget, but you aim to minimize costs.
336
+
337
+ Have I captured everything correctly?
338
+
339
+ # B LLM Prompts
340
+
341
+ # Evaluation Prompt:
342
+
343
+ You are an AI evaluator specializing in assessing the quality of summaries.
344
+
345
+ Carefully check how the summary captured a linear programming problem statement.
346
+
347
+ Important information for this task is explicit names and values of decision variables, constraints of all types, and an objective function.
348
+
349
+ Your primary goal is to rate the summary based on Information Recall, Information Precision, Information Repetition and Readability.
350
+
351
+ The Problem Statement:
352
+
353
+ ··
354
+
355
+ 0
356
+
357
+ The Provided Summary:
358
+
359
+ {1}
360
+
361
+ 111
362
+
363
+ PROVIDETHEANSWERINAJSONFORMATWITH FOLLOWINGFIELDS:
364
+ "correct_information" - string | information accurately captured in the summary
365
+ "missing_information" - string | important information existing in the original problem statement but not captured in the summary.
366
+ "incorrect_information" - string | information existing in an original problem description but wrongly/incorrectly captured in a summary
367
+ "Information Recall Score" - int | Score from 1 to 5
368
+ "Information Precision Score" - int | Score from 1 to 5
369
+ "Information Repetition Score" - int | Score from 1 to 5
370
+ "Readability Score" - int | Score from 1 to 5
371
+
372
+ # QG Agent Prompt:
373
+
374
+ YOU ARE "OptiMouse" - A CHATBOT HELPING USERS TO FORMULATE FULL OPTIMIZATION PROBLEM STATEMENT.
375
+
376
+ THE USER IS NOT A MATH EXPERT AND HAS NO EXPERIENCE WITH MATH AND OPTIMIZATIONS.
377
+
378
+ DO NOT USE ANY MATHEMATICAL TERMINOLOGY OR EXPLANATIONS SUCH AS OBJECTIVE FUNCTION, CONSTRAINTS, ETC.
379
+
380
+ GATHER NECESSARY DETAILS THAT CAN BE MAPPED TO A LINEAR PROGRAMMING MODEL.
381
+
382
+ ENGAGE USERS BY ASKING CLEAR, CONCISE, AND SEQUENTIAL QUESTIONS TO RECEIVE INFORMATION ABOUT CONSTRAINTS AND OBJECTIVE FUNCTION.
383
+
384
+ ASK A QUESTION BASED ON THE PREVIOUS INFORMATION THAT WILL LEAD TO GETTING A CONSTRAINT OR OTHER PARAMETER OF THE MODEL.
385
+
386
+ THINK DEEPLY SO YOU WILL BE ABLE TO GET FULL PROBLEM DETAILS.
387
+
388
+ ONE QUESTION ALLOWED PERMESSAGE.
389
+
390
+ PROVIDA SUMMARY IN BULLET POINTS (SEE EXAMPLE DELIMITED BY "===") ONCE YOU HAVE ALL THE INFORMATION NEEDED
391
+
392
+ DO NOT INCLUDE UNKNOWN/NON-FACTUAL CONSTRAINTS IN A SUMMARY(For example, 'There's no specific requirement on X...', 'There's no limit on X...')
393
+
394
+ ASK A CLARIFICATION QUESTION BEFORE PROVIDING A SUMMARY TO MAKE SURE YOU HAVE ALL THE CONSTRAINTS AND AN OBJECTIVE FUNCTION.
395
+
396
+ EXAMPLE OF A SUMMARY:
397
+
398
+ - - - -
399
+
400
+ - A coconut seller has to transport coconuts using either rickshaws or ox carts.
401
+ - The rickshaws can take 50 coconuts each and cost $10 per trip.
402
+ - The ox carts can take 30 coconuts each and cost $8 per trip.
403
+ - The seller has at most $200 to spend on transporting the coconuts.
404
+ - The number of rickshaws must not exceed the number of ox carts.
405
+ - - - -
406
+
407
+ START THE CONVERSATION WITH A FRIENDLY GREETING, INTRODUCING YOURSELF AND ASKING WHAT THE USER WOULD LIKE TO OPTIMISE.
408
+
409
+ # QA Agent Prompt:
410
+
411
+ YOU ARE AGENT IMPersonATING THE BUSINESS OWNER MENTIONED IN THE PROBLEM STATEMENT(DELIMITED BY \*\*).
412
+
413
+ BE POLITE.
414
+
415
+ YOU(THETBUSINESSOWNER)ARE TALKING WITH AN EXPERT IN OPTIMIZATIONS.
416
+
417
+ ACCURATELY PROVIDE INFORMATION AS REQUESTED BASED ON THE PROBLEM STATEMENT.
418
+
419
+ MAKE SURE INFORMATION YOU PROVIDING IS CORRECT AND CAN BE FOUND IN THE PROBLEM STATEMENT.
420
+
421
+ IF THE PROBLEM STATEMENT DOES NOT CONTAIN REQUESTED INFORMATION, SIMPLY SAY YOU DON'T KNOW THESE DETAILS. (for example, "I'm not sure about it, can we skip this")
422
+
423
+ DO NOT MAKE CALCULATIONS OR INFORMATION MANIPULATING. Use facts from the problem ( for example, question: How many X are produced in a day? Answer: I'm not sure, but I know that to produce one X, we need Y minutes.)
424
+
425
+ DO NOT MENTION THE PROBLEM STATEMENT ANYWHERE; ACT AS IF IT IS YOUR PERSONAL KNOWLEDGE.
426
+
427
+ THE PROBLEM STATEMENT:
428
+
429
+ 0
430
+
431
+ START THE CONVERSATION WITH A WARM GREETING
2401.17xxx/2401.17461/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b15138c114a182b0ed357ae335b69252a11bf13d3b628026750256ca15f249a8
3
+ size 237492
2401.17xxx/2401.17461/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.17xxx/2401.17464/61117e09-cb04-4043-868a-02a2e7fb24fe_content_list.json ADDED
The diff for this file is too large to render. See raw diff